Compare commits

..

11 Commits

Author SHA1 Message Date
Techno Tim
7b6b24ce4d feat(k3s): Updated to v1.29.0+k3s1 (#421) 2024-01-26 14:49:24 -06:00
Techno Tim
a5728da35e feat(k3s): Updated to v1.28 (#420)
* feat(k3s): Updated to v1.28.5+k3s1
2024-01-26 13:10:21 -06:00
Techno Tim
cda7c92203 feat(k3s): Updated to v1.27 (#294)
* feat(k3s): Updated to v1.27.1+k3s1

* feat(k3s): Updated to v1.27.1+k3s1

* feat(k3s): Updated to v1.27.4+k3s1

* feat(k3s): Updated to v1.27.9+k3s1
2024-01-26 18:54:58 +00:00
Techno Tim
d910b83bf3 fix(molecule): Cleanup all artifacts, side effects, and actions in case nodes are not ephemeral (#427) 2024-01-26 17:16:26 +00:00
Techno Tim
101313f880 feat(dependabot): Added docker docker and github actions (#422) 2024-01-26 16:19:42 +00:00
Techno Tim
12be355867 feat(k3s): Updated to v1.26 (#207)
* feat(k3s): Updated to v1.26.0+k3s2

* feat(k3s): Updated to v1.26.2+k3s1

* feat(k3s): Updated to v1.26.3+k3s1

* feat(k3s): Updated to v1.26.4+k3s1

* feat(k3s): Updated to v1.26.7+k3s1

* feat(k3s): Updated to v1.26.11+k3s2

* feat(k3s): Updated to v1.26.12+k3s1
2024-01-25 22:09:08 +00:00
Gabor A
aa09e3e9df fix: typos (#416)
Co-authored-by: Techno Tim <timothystewart6@gmail.com>
2024-01-25 20:40:56 +00:00
sholdee
511c410451 Add Debian Bookworm support and refactor Pi OS detection (#415)
* Refactor Pi OS detection and add Debian Bookworm support

* Add bullseye back

---------

Co-authored-by: Techno Tim <timothystewart6@gmail.com>
2024-01-25 19:20:02 +00:00
Balázs Hasprai
df9c6f3014 Fix http_proxy service dir in k3s_agent role (#400)
* Fix http_proxy service dir in k3s_agent role

* Fix http_proxy reset: rm conf files before dirs

* Fix http_proxy reset rm order

---------

Co-authored-by: Techno Tim <timothystewart6@gmail.com>
2024-01-25 11:34:46 -06:00
Timothy Stewart
5ae8fd1223 fix(molecule): lower resources for nodes 2024-01-25 09:30:02 -06:00
Techno Tim
e2e9881f0f Fix CI (#389)
did all the things to make it work
2024-01-24 22:26:38 -06:00
24 changed files with 222 additions and 344 deletions

View File

@@ -35,7 +35,7 @@ k3s_version: ""
ansible_user: NA
systemd_dir: ""
container_iface: ""
flannel_iface: ""
apiserver_endpoint: ""

View File

@@ -9,3 +9,18 @@ updates:
ignore:
- dependency-name: "*"
update-types: ["version-update:semver-major"]
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"
rebase-strategy: "auto"
- package-ecosystem: "docker"
directory: "/"
schedule:
interval: "daily"
rebase-strategy: "auto"
ignore:
- dependency-name: "*"
update-types: ["version-update:semver-major"]

42
.github/workflows/cache.yml vendored Normal file
View File

@@ -0,0 +1,42 @@
---
name: "Cache"
on:
workflow_call:
jobs:
molecule:
name: cache
runs-on: self-hosted
env:
PYTHON_VERSION: "3.11"
steps:
- name: Check out the codebase
uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v3 2.5.0
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@75f3110429a8c05be0e1bf360334e4cced2b63fa # 2.3.3
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies
- name: Cache Vagrant boxes
id: cache-vagrant
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # 4.0
with:
lookup-only: true #if it exists, we don't need to restore and can skip the next step
path: |
~/.vagrant.d/boxes
key: vagrant-boxes-${{ hashFiles('**/molecule.yml') }}
restore-keys: |
vagrant-boxes
- name: Download Vagrant boxes for all scenarios
# To save some cache space, all scenarios share the same cache key.
# On the other hand, this means that the cache contents should be
# the same across all scenarios. This step ensures that.
if: steps.cache-vagrant.outputs.cache-hit != 'true' # only run if false since this is just a cache step
run: |
./.github/download-boxes.sh
vagrant box list

View File

@@ -8,8 +8,11 @@ on:
paths-ignore:
- '**/README.md'
jobs:
pre:
uses: ./.github/workflows/cache.yml
lint:
uses: ./.github/workflows/lint.yml
needs: [pre]
test:
uses: ./.github/workflows/test.yml
needs: [lint]
needs: [pre, lint]

View File

@@ -5,7 +5,7 @@ on:
jobs:
pre-commit-ci:
name: Pre-Commit
runs-on: ubuntu-latest
runs-on: self-hosted
env:
PYTHON_VERSION: "3.11"
@@ -21,21 +21,11 @@ jobs:
python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies
- name: Cache pip
uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # 3.0.11
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('./requirements.txt') }}
restore-keys: |
${{ runner.os }}-pip-
- name: Cache Ansible
uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # 3.0.11
- name: Restore Ansible cache
uses: actions/cache/restore@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # 4.0
with:
path: ~/.ansible/collections
key: ${{ runner.os }}-ansible-${{ hashFiles('collections/requirements.txt') }}
restore-keys: |
${{ runner.os }}-ansible-
key: ansible-${{ hashFiles('collections/requirements.yml') }}
- name: Install dependencies
run: |
@@ -47,16 +37,12 @@ jobs:
python3 -m pip install -r requirements.txt
echo "::endgroup::"
echo "::group::Install Ansible role requirements from collections/requirements.yml"
ansible-galaxy install -r collections/requirements.yml
echo "::endgroup::"
- name: Run pre-commit
uses: pre-commit/action@646c83fcd040023954eafda54b4db0192ce70507 # 3.0.0
ensure-pinned-actions:
name: Ensure SHA Pinned Actions
runs-on: ubuntu-latest
runs-on: self-hosted
steps:
- name: Checkout code
uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v3 2.5.0

View File

@@ -5,7 +5,7 @@ on:
jobs:
molecule:
name: Molecule
runs-on: macos-12
runs-on: self-hosted
strategy:
matrix:
scenario:
@@ -30,35 +30,19 @@ jobs:
* fdad:bad:ba55::/64
EOF
- name: Cache pip
uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # 3.0.11
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('./requirements.txt') }}
restore-keys: |
${{ runner.os }}-pip-
- name: Cache Vagrant boxes
uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # 3.0.11
with:
path: |
~/.vagrant.d/boxes
key: vagrant-boxes-${{ hashFiles('**/molecule.yml') }}
restore-keys: |
vagrant-boxes
- name: Download Vagrant boxes for all scenarios
# To save some cache space, all scenarios share the same cache key.
# On the other hand, this means that the cache contents should be
# the same across all scenarios. This step ensures that.
run: ./.github/download-boxes.sh
- name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@75f3110429a8c05be0e1bf360334e4cced2b63fa # 2.3.3
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies
- name: Restore vagrant Boxes cache
uses: actions/cache/restore@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # 4.0
with:
path: ~/.vagrant.d/boxes
key: vagrant-boxes-${{ hashFiles('**/molecule.yml') }}
fail-on-cache-miss: true
- name: Install dependencies
run: |
echo "::group::Upgrade pip"
@@ -75,10 +59,35 @@ jobs:
env:
ANSIBLE_K3S_LOG_DIR: ${{ runner.temp }}/logs/k3s-ansible/${{ matrix.scenario }}
ANSIBLE_SSH_RETRIES: 4
ANSIBLE_TIMEOUT: 60
ANSIBLE_TIMEOUT: 120
PY_COLORS: 1
ANSIBLE_FORCE_COLOR: 1
# these steps are necessary if not using ephemeral nodes
- name: Delete old Vagrant box versions
if: always() # do this even if a step before has failed
run: vagrant box prune --force
- name: Remove all local Vagrant boxes
if: always() # do this even if a step before has failed
run: vagrant box list | cut -f 1 -d ' ' | xargs -L 1 vagrant box remove -f
- name: Remove all Virtualbox VMs
if: always() # do this even if a step before has failed
run: VBoxManage list vms | awk -F'"' '{print $2}' | xargs -I {} VBoxManage unregistervm --delete "{}"
- name: Remove all Virtualbox HDs
if: always() # do this even if a step before has failed
run: VBoxManage list hdds | awk -F':' '/^UUID:/ {print $2}' | xargs -I {} VBoxManage closemedium disk "{}" --delete
- name: Remove all Virtualbox Networks
if: always() # do this even if a step before has failed
run: VBoxManage list hostonlyifs | grep '^Name:' | awk '{print $2}' | grep '^vboxnet' | xargs -I {} VBoxManage hostonlyif remove {}
- name: Remove Virtualbox network config
if: always() # do this even if a step before has failed
run: sudo rm /etc/vbox/networks.conf
- name: Upload log files
if: always() # do this even if a step before has failed
uses: actions/upload-artifact@83fd05a356d7e2593de66fc9913b3002723633cb # 3.1.1
@@ -86,7 +95,3 @@ jobs:
name: logs
path: |
${{ runner.temp }}/logs
- name: Delete old box versions
if: always() # do this even if a step before has failed
run: vagrant box prune --force

View File

@@ -1,5 +1,5 @@
---
k3s_version: v1.25.16+k3s4
k3s_version: v1.29.0+k3s1
# this is the user that has ssh access to these machines
ansible_user: ansibleuser
systemd_dir: /etc/systemd/system
@@ -7,15 +7,8 @@ systemd_dir: /etc/systemd/system
# Set your timezone
system_timezone: "Your/Timezone"
# node interface which will be used for the container network interface (flannel or calico)
container_iface: "eth0"
# set use_calico to true to use tigera operator/calico instead of the default CNI flannel
# install reference: https://docs.tigera.io/calico/latest/getting-started/kubernetes/k3s/multi-node-install#install-calico
use_calico: false
calico_ebpf: false # use eBPF dataplane instead of iptables https://docs.tigera.io/calico/latest/operations/ebpf
calico_cidr: "10.52.0.0/16" # pod cidr pool
calico_tag: "v3.27.0" # calico version tag
# interface which will be used for flannel
flannel_iface: "eth0"
# apiserver_endpoint is virtual ip-address which will be configured on each master
apiserver_endpoint: "192.168.30.222"
@@ -27,30 +20,23 @@ k3s_token: "some-SUPER-DEDEUPER-secret-password"
# The IP on which the node is reachable in the cluster.
# Here, a sensible default is provided, you can still override
# it for each of your hosts, though.
k3s_node_ip: '{{ ansible_facts[container_iface]["ipv4"]["address"] }}'
k3s_node_ip: '{{ ansible_facts[flannel_iface]["ipv4"]["address"] }}'
# Disable the taint manually by setting: k3s_master_taint = false
k3s_master_taint: "{{ true if groups['node'] | default([]) | length >= 1 else false }}"
# these arguments are recommended for servers as well as agents:
extra_args: >-
{{ '--flannel-iface=' + container_iface if not use_calico else '' }}
--flannel-iface={{ flannel_iface }}
--node-ip={{ k3s_node_ip }}
# change these to your liking, the only required are: --disable servicelb, --tls-san {{ apiserver_endpoint }}
# the contents of the if block is also required if using calico
extra_server_args: >-
{{ extra_args }}
{{ '--node-taint node-role.kubernetes.io/master=true:NoSchedule' if k3s_master_taint else '' }}
{% if use_calico %}
--flannel-backend=none
--disable-network-policy
--cluster-cidr={{ calico_cidr }}
{% endif %}
--tls-san {{ apiserver_endpoint }}
--disable servicelb
--disable traefik
extra_agent_args: >-
{{ extra_args }}
@@ -80,9 +66,9 @@ metal_lb_ip_range: "192.168.30.80-192.168.30.90"
# Please read https://gist.github.com/triangletodd/02f595cd4c0dc9aac5f7763ca2264185 before using this.
# Most notably, your containers must be privileged, and must not have nesting set to true.
# Please note this script disables most of the security of lxc containers, with the trade off being that lxc
# containers are significantly more resource efficent compared to full VMs.
# containers are significantly more resource efficient compared to full VMs.
# Mixing and matching VMs and lxc containers is not supported, ymmv if you want to do this.
# I would only really recommend using this if you have partiularly low powered proxmox nodes where the overhead of
# I would only really recommend using this if you have particularly low powered proxmox nodes where the overhead of
# VMs would use a significant portion of your available resources.
proxmox_lxc_configure: false
# the user that you would use to ssh into the host, for example if you run ssh some-user@my-proxmox-host,

View File

@@ -7,7 +7,7 @@ platforms:
- name: control1
box: generic/ubuntu2204
memory: 2048
memory: 1024
cpus: 2
groups:
- k3s_cluster
@@ -22,8 +22,8 @@ platforms:
ssh.password: "vagrant"
- name: control2
box: generic/debian11
memory: 2048
box: generic/debian12
memory: 1024
cpus: 2
groups:
- k3s_cluster
@@ -34,7 +34,7 @@ platforms:
- name: control3
box: generic/rocky9
memory: 2048
memory: 1024
cpus: 2
groups:
- k3s_cluster
@@ -45,7 +45,7 @@ platforms:
- name: node1
box: generic/ubuntu2204
memory: 2048
memory: 1024
cpus: 2
groups:
- k3s_cluster
@@ -61,7 +61,7 @@ platforms:
- name: node2
box: generic/rocky9
memory: 2048
memory: 1024
cpus: 2
groups:
- k3s_cluster
@@ -72,6 +72,8 @@ platforms:
provisioner:
name: ansible
env:
ANSIBLE_VERBOSITY: 1
playbooks:
converge: ../resources/converge.yml
side_effect: ../resources/reset.yml
@@ -82,7 +84,6 @@ provisioner:
scenario:
test_sequence:
- dependency
- lint
- cleanup
- destroy
- syntax

View File

@@ -6,7 +6,7 @@
ansible.builtin.set_fact:
# See:
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
container_iface: eth1
flannel_iface: eth1
# The test VMs might be a bit slow, so we give them more time to join the cluster:
retry_count: 45

View File

@@ -6,7 +6,7 @@ driver:
platforms:
- name: control1
box: generic/ubuntu2204
memory: 2048
memory: 1024
cpus: 2
groups:
- k3s_cluster
@@ -22,7 +22,7 @@ platforms:
- name: control2
box: generic/ubuntu2204
memory: 2048
memory: 1024
cpus: 2
groups:
- k3s_cluster
@@ -38,7 +38,7 @@ platforms:
- name: node1
box: generic/ubuntu2204
memory: 2048
memory: 1024
cpus: 2
groups:
- k3s_cluster
@@ -53,6 +53,8 @@ platforms:
ssh.password: "vagrant"
provisioner:
name: ansible
env:
ANSIBLE_VERBOSITY: 1
playbooks:
converge: ../resources/converge.yml
side_effect: ../resources/reset.yml
@@ -63,7 +65,6 @@ provisioner:
scenario:
test_sequence:
- dependency
- lint
- cleanup
- destroy
- syntax

View File

@@ -6,7 +6,7 @@
ansible.builtin.set_fact:
# See:
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
container_iface: eth1
flannel_iface: eth1
# In this scenario, we have multiple interfaces that the VIP could be
# broadcasted on. Since we have assigned a dedicated private network
@@ -27,13 +27,13 @@
- fdad:bad:ba55::1b:0/112
- 192.168.123.80-192.168.123.90
# k3s_node_ip is by default set to the IPv4 address of container_iface.
# k3s_node_ip is by default set to the IPv4 address of flannel_iface.
# We want IPv6 addresses here of course, so we just specify them
# manually below.
k3s_node_ip: "{{ node_ipv4 }},{{ node_ipv6 }}"
- name: Override host variables (2/2)
# Since "extra_args" depends on "k3s_node_ip" and "container_iface" we have
# Since "extra_args" depends on "k3s_node_ip" and "flannel_iface" we have
# to set this AFTER overriding the both of them.
ansible.builtin.set_fact:
# A few extra server args are necessary:

View File

@@ -30,7 +30,7 @@
name: net.ipv6.conf.{{ item }}.accept_dad
value: "0"
with_items:
- "{{ container_iface }}"
- "{{ flannel_iface }}"
- name: Write IPv4 configuration
ansible.builtin.template:

View File

@@ -3,6 +3,6 @@ network:
version: 2
renderer: networkd
ethernets:
{{ container_iface }}:
{{ flannel_iface }}:
addresses:
- {{ node_ipv4 }}/24

View File

@@ -21,6 +21,8 @@ platforms:
ip: 192.168.30.50
provisioner:
name: ansible
env:
ANSIBLE_VERBOSITY: 1
playbooks:
converge: ../resources/converge.yml
side_effect: ../resources/reset.yml
@@ -31,7 +33,6 @@ provisioner:
scenario:
test_sequence:
- dependency
- lint
- cleanup
- destroy
- syntax

View File

@@ -6,7 +6,7 @@
ansible.builtin.set_fact:
# See:
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
container_iface: eth1
flannel_iface: eth1
# The test VMs might be a bit slow, so we give them more time to join the cluster:
retry_count: 45

View File

@@ -1,10 +1,10 @@
ansible-core>=2.13.5
ansible-core>=2.16.2
jmespath>=1.0.1
jsonpatch>=1.32
kubernetes>=25.3.0
molecule-vagrant>=1.0.0
molecule>=4.0.3
netaddr>=0.8.0
pre-commit>=2.20.0
pre-commit-hooks>=1.3.1
pyyaml>=6.0
jsonpatch>=1.33
kubernetes>=29.0.0
molecule-plugins[vagrant]
molecule>=6.0.3
netaddr>=0.10.1
pre-commit>=3.6.0
pre-commit-hooks>=4.5.0
pyyaml>=6.0.1

View File

@@ -4,174 +4,165 @@
#
# pip-compile requirements.in
#
ansible-compat==3.0.1
ansible-compat==4.1.11
# via molecule
ansible-core==2.15.4
ansible-core==2.16.2
# via
# -r requirements.in
# ansible-compat
arrow==1.2.3
# via jinja2-time
attrs==22.1.0
# via jsonschema
binaryornot==0.4.4
# via cookiecutter
cachetools==5.2.0
# molecule
attrs==23.2.0
# via
# jsonschema
# referencing
bracex==2.4
# via wcmatch
cachetools==5.3.2
# via google-auth
certifi==2022.9.24
certifi==2023.11.17
# via
# kubernetes
# requests
cffi==1.15.1
cffi==1.16.0
# via cryptography
cfgv==3.3.1
cfgv==3.4.0
# via pre-commit
chardet==5.0.0
# via binaryornot
charset-normalizer==2.1.1
charset-normalizer==3.3.2
# via requests
click==8.1.3
click==8.1.7
# via
# click-help-colors
# cookiecutter
# molecule
click-help-colors==0.9.1
click-help-colors==0.9.4
# via molecule
commonmark==0.9.1
# via rich
cookiecutter==2.1.1
# via molecule
cryptography==38.0.3
cryptography==41.0.7
# via ansible-core
distlib==0.3.6
distlib==0.3.8
# via virtualenv
distro==1.8.0
# via selinux
enrich==1.2.7
# via molecule
filelock==3.8.0
filelock==3.13.1
# via virtualenv
google-auth==2.14.0
google-auth==2.26.2
# via kubernetes
identify==2.5.8
identify==2.5.33
# via pre-commit
idna==3.4
idna==3.6
# via requests
jinja2==3.1.2
jinja2==3.1.3
# via
# ansible-core
# cookiecutter
# jinja2-time
# molecule
# molecule-vagrant
jinja2-time==0.2.0
# via cookiecutter
jmespath==1.0.1
# via -r requirements.in
jsonpatch==1.33
# via -r requirements.in
jsonpointer==2.3
jsonpointer==2.4
# via jsonpatch
jsonschema==4.17.0
jsonschema==4.21.1
# via
# ansible-compat
# molecule
kubernetes==25.3.0
jsonschema-specifications==2023.12.1
# via jsonschema
kubernetes==29.0.0
# via -r requirements.in
markupsafe==2.1.1
markdown-it-py==3.0.0
# via rich
markupsafe==2.1.4
# via jinja2
molecule==4.0.4
mdurl==0.1.2
# via markdown-it-py
molecule==6.0.3
# via
# -r requirements.in
# molecule-vagrant
molecule-vagrant==1.0.0
# molecule-plugins
molecule-plugins[vagrant]==23.5.0
# via -r requirements.in
netaddr==0.10.0
netaddr==0.10.1
# via -r requirements.in
nodeenv==1.7.0
nodeenv==1.8.0
# via pre-commit
oauthlib==3.2.2
# via requests-oauthlib
packaging==21.3
# via
# kubernetes
# requests-oauthlib
packaging==23.2
# via
# ansible-compat
# ansible-core
# molecule
platformdirs==2.5.2
platformdirs==4.1.0
# via virtualenv
pluggy==1.0.0
pluggy==1.3.0
# via molecule
pre-commit==2.21.0
pre-commit==3.6.0
# via -r requirements.in
pre-commit-hooks==4.5.0
# via -r requirements.in
pyasn1==0.4.8
pyasn1==0.5.1
# via
# pyasn1-modules
# rsa
pyasn1-modules==0.2.8
pyasn1-modules==0.3.0
# via google-auth
pycparser==2.21
# via cffi
pygments==2.13.0
pygments==2.17.2
# via rich
pyparsing==3.0.9
# via packaging
pyrsistent==0.19.2
# via jsonschema
python-dateutil==2.8.2
# via
# arrow
# kubernetes
python-slugify==6.1.2
# via cookiecutter
# via kubernetes
python-vagrant==1.0.0
# via molecule-vagrant
# via molecule-plugins
pyyaml==6.0.1
# via
# -r requirements.in
# ansible-compat
# ansible-core
# cookiecutter
# kubernetes
# molecule
# molecule-vagrant
# pre-commit
requests==2.28.1
referencing==0.32.1
# via
# jsonschema
# jsonschema-specifications
requests==2.31.0
# via
# cookiecutter
# kubernetes
# requests-oauthlib
requests-oauthlib==1.3.1
# via kubernetes
resolvelib==0.8.1
resolvelib==1.0.1
# via ansible-core
rich==12.6.0
rich==13.7.0
# via
# enrich
# molecule
rpds-py==0.17.1
# via
# jsonschema
# referencing
rsa==4.9
# via google-auth
ruamel-yaml==0.17.21
ruamel-yaml==0.18.5
# via pre-commit-hooks
selinux==0.2.1
# via molecule-vagrant
ruamel-yaml-clib==0.2.8
# via ruamel-yaml
six==1.16.0
# via
# google-auth
# kubernetes
# python-dateutil
subprocess-tee==0.4.1
# via ansible-compat
text-unidecode==1.3
# via python-slugify
urllib3==1.26.12
urllib3==2.1.0
# via
# kubernetes
# requests
virtualenv==20.16.6
virtualenv==20.25.0
# via pre-commit
websocket-client==1.4.2
wcmatch==8.5
# via molecule
websocket-client==1.7.0
# via kubernetes
# The following packages are considered to be unsafe in a requirements file:

View File

@@ -1,8 +1,8 @@
---
- name: Create k3s.service.d directory
- name: Create k3s-node.service.d directory
file:
path: '{{ systemd_dir }}/k3s.service.d'
path: '{{ systemd_dir }}/k3s-node.service.d'
state: directory
owner: root
group: root
@@ -12,7 +12,7 @@
- name: Copy K3s http_proxy conf file
template:
src: "http_proxy.conf.j2"
dest: "{{ systemd_dir }}/k3s.service.d/http_proxy.conf"
dest: "{{ systemd_dir }}/k3s-node.service.d/http_proxy.conf"
owner: root
group: root
mode: '0755'

View File

@@ -1,6 +1,6 @@
---
# Timeout to wait for MetalLB services to come up
metal_lb_available_timeout: 120s
metal_lb_available_timeout: 240s
# Name of the master group
group_name_master: master

View File

@@ -1,111 +0,0 @@
---
- block:
- name: Create manifests directory on first master
file:
path: /tmp/k3s
state: directory
owner: root
group: root
mode: 0755
- name: "Download to first master: manifest for Tigera Operator and Calico CRDs"
ansible.builtin.get_url:
url: "https://raw.githubusercontent.com/projectcalico/calico/{{ calico_tag }}/manifests/tigera-operator.yaml"
dest: "/tmp/k3s/tigera-operator.yaml"
owner: root
group: root
mode: 0755
- name: Copy Calico custom resources manifest to first master
ansible.builtin.template:
src: "calico.crs.j2"
dest: /tmp/k3s/custom-resources.yaml
- name: Deploy or replace Tigera Operator
block:
- name: Deploy Tigera Operator
ansible.builtin.command:
cmd: kubectl create -f /tmp/k3s/tigera-operator.yaml
register: create_operator
changed_when: "'created' in create_operator.stdout"
failed_when: "'Error' in create_operator.stderr and 'already exists' not in create_operator.stderr"
rescue:
- name: Replace existing Tigera Operator
ansible.builtin.command:
cmd: kubectl replace -f /tmp/k3s/tigera-operator.yaml
register: replace_operator
changed_when: "'replaced' in replace_operator.stdout"
failed_when: "'Error' in replace_operator.stderr"
- name: Wait for Tigera Operator resources
command: >-
k3s kubectl wait {{ item.type }}/{{ item.name }}
--namespace='tigera-operator'
--for=condition=Available=True
--timeout=7s
register: tigera_result
changed_when: false
until: tigera_result is succeeded
retries: 7
delay: 7
with_items:
- { name: tigera-operator, type: deployment }
loop_control:
label: "{{ item.type }}/{{ item.name }}"
- name: Deploy Calico custom resources
block:
- name: Deploy custom resources for Calico
ansible.builtin.command:
cmd: kubectl create -f /tmp/k3s/custom-resources.yaml
register: create_cr
changed_when: "'created' in create_cr.stdout"
failed_when: "'Error' in create_cr.stderr and 'already exists' not in create_cr.stderr"
rescue:
- name: Apply new Calico custom resource manifest
ansible.builtin.command:
cmd: kubectl apply -f /tmp/k3s/custom-resources.yaml
register: apply_cr
changed_when: "'configured' in apply_cr.stdout or 'created' in apply_cr.stdout"
failed_when: "'Error' in apply_cr.stderr"
- name: Wait for Calico system resources to be available
command: >-
{% if item.type == 'daemonset' %}
k3s kubectl wait pods
--namespace='{{ item.namespace }}'
--selector={{ item.selector }}
--for=condition=Ready
{% else %}
k3s kubectl wait {{ item.type }}/{{ item.name }}
--namespace='{{ item.namespace }}'
--for=condition=Available
{% endif %}
--timeout=7s
register: cr_result
changed_when: false
until: cr_result is succeeded
retries: 30
delay: 7
with_items:
- { name: calico-typha, type: deployment, namespace: calico-system }
- { name: calico-kube-controllers, type: deployment, namespace: calico-system }
- { name: csi-node-driver, type: daemonset, selector: 'k8s-app=csi-node-driver', namespace: calico-system }
- { name: calico-node, type: daemonset, selector: 'k8s-app=calico-node', namespace: calico-system }
- { name: calico-apiserver, type: deployment, selector: 'k8s-app=calico-apiserver', namespace: calico-apiserver }
loop_control:
label: "{{ item.type }}/{{ item.name }}"
- name: Patch Felix configuration for eBPF mode
ansible.builtin.command:
cmd: >
kubectl patch felixconfiguration default
--type='merge'
--patch='{"spec": {"bpfKubeProxyIptablesCleanupEnabled": false}}'
register: patch_result
changed_when: "'felixconfiguration.projectcalico.org/default patched' in patch_result.stdout"
failed_when: "'Error' in patch_result.stderr"
when: calico_ebpf == true
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
run_once: true # stops "skipped" log spam

View File

@@ -1,9 +1,4 @@
---
- name: Deploy calico
include_tasks: calico.yml
tags: calico
when: use_calico == true
- name: Deploy metallb pool
include_tasks: metallb.yml
tags: metallb

View File

@@ -1,29 +0,0 @@
# This section includes base Calico installation configuration.
# For more information, see: https://docs.tigera.io/calico/latest/reference/installation/api#operator.tigera.io/v1.Installation
apiVersion: operator.tigera.io/v1
kind: Installation
metadata:
name: default
spec:
# Configures Calico networking.
calicoNetwork:
# Note: The ipPools section cannot be modified post-install.
ipPools:
- blockSize: {{ calico_blockSize if calico_blockSize is defined else '26' }}
cidr: {{ calico_cidr if calico_cidr is defined else '10.52.0.0/16' }}
encapsulation: {{ calico_encapsulation if calico_encapsulation is defined else 'VXLANCrossSubnet' }}
natOutgoing: {{ calico_natOutgoing if calico_natOutgoing is defined else 'Enabled' }}
nodeSelector: {{ calico_nodeSelector if calico_nodeSelector is defined else 'all()' }}
nodeAddressAutodetectionV4:
interface: {{ container_iface if container_iface is defined else 'eth0' }}
linuxDataplane: {{ 'BPF' if calico_ebpf else 'Iptables' }}
---
# This section configures the Calico API server.
# For more information, see: https://docs.tigera.io/calico/latest/reference/installation/api#operator.tigera.io/v1.APIServer
apiVersion: operator.tigera.io/v1
kind: APIServer
metadata:
name: default
spec: {}

View File

@@ -17,21 +17,19 @@
when:
grep_cpuinfo_raspberrypi.rc == 0 or grep_device_tree_model_raspberrypi.rc == 0
- name: Set detected_distribution to Raspbian
set_fact:
detected_distribution: Raspbian
when: >
raspberry_pi|default(false) and
( ansible_facts.lsb.id|default("") == "Raspbian" or
ansible_facts.lsb.description|default("") is match("[Rr]aspbian.*") )
- name: Set detected_distribution to Raspbian (ARM64 on Debian Buster)
- name: Set detected_distribution to Raspbian (ARM64 on Raspbian, Debian Buster/Bullseye/Bookworm)
set_fact:
detected_distribution: Raspbian
vars:
allowed_descriptions:
- "[Rr]aspbian.*"
- "Debian.*buster"
- "Debian.*bullseye"
- "Debian.*bookworm"
when:
- ansible_facts.architecture is search("aarch64")
- raspberry_pi|default(false)
- ansible_facts.lsb.description|default("") is match("Debian.*buster")
- ansible_facts.lsb.description|default("") is match(allowed_descriptions | join('|'))
- name: Set detected_distribution_major_version
set_fact:
@@ -39,14 +37,6 @@
when:
- detected_distribution | default("") == "Raspbian"
- name: Set detected_distribution to Raspbian (ARM64 on Debian Bullseye)
set_fact:
detected_distribution: Raspbian
when:
- ansible_facts.architecture is search("aarch64")
- raspberry_pi|default(false)
- ansible_facts.lsb.description|default("") is match("Debian.*bullseye")
- name: Execute OS related tasks on the Raspberry Pi - {{ action_ }}
include_tasks: "{{ item }}"
with_first_found:

View File

@@ -51,7 +51,9 @@
name: "{{ item }}"
state: absent
with_items:
- "{{ systemd_dir }}/k3s.service.d/http_proxy.conf"
- "{{ systemd_dir }}/k3s.service.d"
- "{{ systemd_dir }}/k3s-node.service.d/http_proxy.conf"
- "{{ systemd_dir }}/k3s-node.service.d"
when: proxy_env is defined