Compare commits

..

8 Commits

Author SHA1 Message Date
sholdee
5d4e24283e Merge c86cbb9fbc into edf0c9eebd 2024-01-19 16:45:26 +00:00
sholdee
c86cbb9fbc Merge branch 'techno-tim:master' into calico 2024-01-19 10:45:23 -06:00
Ethan Shold
f3b88a7ea4 Add eBPF dataplane option 2024-01-19 10:01:30 -06:00
sholdee
18044d23a4 Merge branch 'techno-tim:master' into calico 2024-01-18 21:56:07 -06:00
Ethan Shold
2cd03f38f2 Add calico-apiserver check 2024-01-17 10:14:04 -06:00
sholdee
8e1265fbae Merge branch 'techno-tim:master' into calico 2024-01-17 09:46:24 -06:00
sholdee
f6ee0c72ef Merge branch 'techno-tim:master' into calico 2024-01-14 01:40:08 -06:00
Ethan Shold
e7ba494a00 Add Tigera Operator/Calico CNI option
Small tweak to reduce delta from head

Set calico option to be disabled by default

Add rescue blocks in case updating existing

Refactor items and update comments

Refactor and consolidate calico.yml into block

Refactor to use template for Calico CRs

Revert use_calico to false

Template blockSize

Align default cidr in template with all.yml sample

Apply upstream version tags

Revert to current ver tags. Upstream's don't work.

Update template address detection

Add Tigera Operator/Calico CNI option
2024-01-14 01:31:42 -06:00
42 changed files with 233 additions and 934 deletions

View File

@@ -18,4 +18,3 @@ exclude_paths:
skip_list:
- 'fqcn-builtins'
- var-naming[no-role-prefix]

View File

@@ -35,12 +35,7 @@ k3s_version: ""
ansible_user: NA
systemd_dir: ""
flannel_iface: ""
#calico_iface: ""
calico_ebpf: ""
calico_cidr: ""
calico_tag: ""
container_iface: ""
apiserver_endpoint: ""
@@ -51,9 +46,6 @@ extra_agent_args: ""
kube_vip_tag_version: ""
kube_vip_cloud_provider_tag_version: ""
kube_vip_lb_ip_range: ""
metal_lb_speaker_tag_version: ""
metal_lb_controller_tag_version: ""

View File

@@ -9,18 +9,3 @@ updates:
ignore:
- dependency-name: "*"
update-types: ["version-update:semver-major"]
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"
rebase-strategy: "auto"
- package-ecosystem: "docker"
directory: "/"
schedule:
interval: "daily"
rebase-strategy: "auto"
ignore:
- dependency-name: "*"
update-types: ["version-update:semver-major"]

View File

@@ -1,42 +0,0 @@
---
name: "Cache"
on:
workflow_call:
jobs:
molecule:
name: cache
runs-on: self-hosted
env:
PYTHON_VERSION: "3.11"
steps:
- name: Check out the codebase
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # 4.1.1
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # 5.0.0
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies
- name: Cache Vagrant boxes
id: cache-vagrant
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # 4.0
with:
lookup-only: true #if it exists, we don't need to restore and can skip the next step
path: |
~/.vagrant.d/boxes
key: vagrant-boxes-${{ hashFiles('**/molecule.yml') }}
restore-keys: |
vagrant-boxes
- name: Download Vagrant boxes for all scenarios
# To save some cache space, all scenarios share the same cache key.
# On the other hand, this means that the cache contents should be
# the same across all scenarios. This step ensures that.
if: steps.cache-vagrant.outputs.cache-hit != 'true' # only run if false since this is just a cache step
run: |
./.github/download-boxes.sh
vagrant box list

View File

@@ -2,26 +2,14 @@
name: "CI"
on:
pull_request:
types:
- opened
- synchronize
push:
branches:
- master
paths-ignore:
- '**/.gitignore'
- '**/FUNDING.yml'
- '**/host.ini'
- '**/*.md'
- '**/.editorconfig'
- '**/ansible.example.cfg'
- '**/deploy.sh'
- '**/LICENSE'
- '**/reboot.sh'
- '**/reset.sh'
- '**/README.md'
jobs:
pre:
uses: ./.github/workflows/cache.yml
lint:
uses: ./.github/workflows/lint.yml
needs: [pre]
test:
uses: ./.github/workflows/test.yml
needs: [pre, lint]
needs: [lint]

View File

@@ -5,27 +5,37 @@ on:
jobs:
pre-commit-ci:
name: Pre-Commit
runs-on: self-hosted
runs-on: ubuntu-latest
env:
PYTHON_VERSION: "3.11"
steps:
- name: Check out the codebase
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # 4.1.1
uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v3 2.5.0
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # 5.0.0
uses: actions/setup-python@75f3110429a8c05be0e1bf360334e4cced2b63fa # 2.3.3
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies
- name: Restore Ansible cache
uses: actions/cache/restore@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # 4.0
- name: Cache pip
uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # 3.0.11
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('./requirements.txt') }}
restore-keys: |
${{ runner.os }}-pip-
- name: Cache Ansible
uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # 3.0.11
with:
path: ~/.ansible/collections
key: ansible-${{ hashFiles('collections/requirements.yml') }}
key: ${{ runner.os }}-ansible-${{ hashFiles('collections/requirements.txt') }}
restore-keys: |
${{ runner.os }}-ansible-
- name: Install dependencies
run: |
@@ -37,17 +47,21 @@ jobs:
python3 -m pip install -r requirements.txt
echo "::endgroup::"
echo "::group::Install Ansible role requirements from collections/requirements.yml"
ansible-galaxy install -r collections/requirements.yml
echo "::endgroup::"
- name: Run pre-commit
uses: pre-commit/action@646c83fcd040023954eafda54b4db0192ce70507 # 3.0.0
ensure-pinned-actions:
name: Ensure SHA Pinned Actions
runs-on: self-hosted
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # 4.1.1
uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v3 2.5.0
- name: Ensure SHA pinned actions
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@ba37328d4ea95eaf8b3bd6c6cef308f709a5f2ec # 3.0.3
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@af2eb3226618e2494e3d9084f515ad6dcf16e229 # 2.0.1
with:
allowlist: |
aws-actions/

View File

@@ -5,51 +5,23 @@ on:
jobs:
molecule:
name: Molecule
runs-on: self-hosted
runs-on: macos-12
strategy:
matrix:
scenario:
- default
- ipv6
- single_node
- calico
- cilium
- kube-vip
fail-fast: false
env:
PYTHON_VERSION: "3.11"
steps:
- name: Check out the codebase
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # 4.1.1
uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v3 2.5.0
with:
ref: ${{ github.event.pull_request.head.sha }}
# these steps are necessary if not using ephemeral nodes
- name: Delete old Vagrant box versions
if: always() # do this even if a step before has failed
run: vagrant box prune --force
- name: Remove all local Vagrant boxes
if: always() # do this even if a step before has failed
run: if vagrant box list 2>/dev/null; then vagrant box list | cut -f 1 -d ' ' | xargs -L 1 vagrant box remove -f 2>/dev/null && echo "All Vagrant boxes removed." || echo "No Vagrant boxes found."; else echo "No Vagrant boxes found."; fi
- name: Remove all Virtualbox VMs
if: always() # do this even if a step before has failed
run: VBoxManage list vms | awk -F'"' '{print $2}' | xargs -I {} VBoxManage unregistervm --delete "{}"
- name: Remove all Virtualbox HDs
if: always() # do this even if a step before has failed
run: VBoxManage list hdds | awk -F':' '/^UUID:/ {print $2}' | xargs -I {} VBoxManage closemedium disk "{}" --delete
- name: Remove all Virtualbox Networks
if: always() # do this even if a step before has failed
run: VBoxManage list hostonlyifs | grep '^Name:' | awk '{print $2}' | grep '^vboxnet' | xargs -I {} VBoxManage hostonlyif remove {}
- name: Remove Virtualbox network config
if: always() # do this even if a step before has failed
run: sudo rm /etc/vbox/networks.conf || true
- name: Configure VirtualBox
run: |-
sudo mkdir -p /etc/vbox
@@ -58,19 +30,35 @@ jobs:
* fdad:bad:ba55::/64
EOF
- name: Cache pip
uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # 3.0.11
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('./requirements.txt') }}
restore-keys: |
${{ runner.os }}-pip-
- name: Cache Vagrant boxes
uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # 3.0.11
with:
path: |
~/.vagrant.d/boxes
key: vagrant-boxes-${{ hashFiles('**/molecule.yml') }}
restore-keys: |
vagrant-boxes
- name: Download Vagrant boxes for all scenarios
# To save some cache space, all scenarios share the same cache key.
# On the other hand, this means that the cache contents should be
# the same across all scenarios. This step ensures that.
run: ./.github/download-boxes.sh
- name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # 5.0.0
uses: actions/setup-python@75f3110429a8c05be0e1bf360334e4cced2b63fa # 2.3.3
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies
- name: Restore vagrant Boxes cache
uses: actions/cache/restore@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # 4.0
with:
path: ~/.vagrant.d/boxes
key: vagrant-boxes-${{ hashFiles('**/molecule.yml') }}
fail-on-cache-miss: true
- name: Install dependencies
run: |
echo "::group::Upgrade pip"
@@ -87,40 +75,18 @@ jobs:
env:
ANSIBLE_K3S_LOG_DIR: ${{ runner.temp }}/logs/k3s-ansible/${{ matrix.scenario }}
ANSIBLE_SSH_RETRIES: 4
ANSIBLE_TIMEOUT: 120
ANSIBLE_TIMEOUT: 60
PY_COLORS: 1
ANSIBLE_FORCE_COLOR: 1
# these steps are necessary if not using ephemeral nodes
- name: Delete old Vagrant box versions
if: always() # do this even if a step before has failed
run: vagrant box prune --force
- name: Remove all local Vagrant boxes
if: always() # do this even if a step before has failed
run: if vagrant box list 2>/dev/null; then vagrant box list | cut -f 1 -d ' ' | xargs -L 1 vagrant box remove -f 2>/dev/null && echo "All Vagrant boxes removed." || echo "No Vagrant boxes found."; else echo "No Vagrant boxes found."; fi
- name: Remove all Virtualbox VMs
if: always() # do this even if a step before has failed
run: VBoxManage list vms | awk -F'"' '{print $2}' | xargs -I {} VBoxManage unregistervm --delete "{}"
- name: Remove all Virtualbox HDs
if: always() # do this even if a step before has failed
run: VBoxManage list hdds | awk -F':' '/^UUID:/ {print $2}' | xargs -I {} VBoxManage closemedium disk "{}" --delete
- name: Remove all Virtualbox Networks
if: always() # do this even if a step before has failed
run: VBoxManage list hostonlyifs | grep '^Name:' | awk '{print $2}' | grep '^vboxnet' | xargs -I {} VBoxManage hostonlyif remove {}
- name: Remove Virtualbox network config
if: always() # do this even if a step before has failed
run: sudo rm /etc/vbox/networks.conf || true
- name: Upload log files
if: always() # do this even if a step before has failed
uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 # 4.3.0
uses: actions/upload-artifact@83fd05a356d7e2593de66fc9913b3002723633cb # 3.1.1
with:
name: logs
path: |
${{ runner.temp }}/logs
overwrite: true
- name: Delete old box versions
if: always() # do this even if a step before has failed
run: vagrant box prune --force

1
.gitignore vendored
View File

@@ -1,4 +1,3 @@
.env/
*.log
ansible.cfg
kubeconfig

View File

@@ -1,7 +1,7 @@
---
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.5.0
rev: f71fa2c1f9cf5cb705f73dffe4b21f7c61470ba9 # frozen: v4.4.0
hooks:
- id: requirements-txt-fixer
- id: sort-simple-yaml
@@ -12,24 +12,24 @@ repos:
- id: trailing-whitespace
args: [--markdown-linebreak-ext=md]
- repo: https://github.com/adrienverge/yamllint.git
rev: v1.33.0
rev: b05e028c5881819161d11cb543fd96a30c06cceb # frozen: v1.32.0
hooks:
- id: yamllint
args: [-c=.yamllint]
- repo: https://github.com/ansible-community/ansible-lint.git
rev: v6.22.2
rev: 3293b64b939c0de16ef8cb81dd49255e475bf89a # frozen: v6.17.2
hooks:
- id: ansible-lint
- repo: https://github.com/shellcheck-py/shellcheck-py
rev: v0.9.0.6
rev: 375289a39f5708101b1f916eb729e8d6da96993f # frozen: v0.9.0.5
hooks:
- id: shellcheck
- repo: https://github.com/Lucas-C/pre-commit-hooks
rev: v1.5.4
rev: 12885e376b93dc4536ad68d156065601e4433665 # frozen: v1.5.1
hooks:
- id: remove-crlf
- id: remove-tabs
- repo: https://github.com/sirosen/texthooks
rev: 0.6.4
rev: c4ffd3e31669dd4fa4d31a23436cc13839730084 # frozen: 0.5.0
hooks:
- id: fix-smartquotes

View File

@@ -1,5 +1,5 @@
---
k3s_version: v1.29.0+k3s1
k3s_version: v1.25.16+k3s4
# this is the user that has ssh access to these machines
ansible_user: ansibleuser
systemd_dir: /etc/systemd/system
@@ -7,32 +7,15 @@ systemd_dir: /etc/systemd/system
# Set your timezone
system_timezone: "Your/Timezone"
# interface which will be used for flannel
flannel_iface: "eth0"
# node interface which will be used for the container network interface (flannel or calico)
container_iface: "eth0"
# uncomment calico_iface to use tigera operator/calico cni instead of flannel https://docs.tigera.io/calico/latest/about
# calico_iface: "eth0"
calico_ebpf: false # use eBPF dataplane instead of iptables
calico_tag: "v3.27.0" # calico version tag
# uncomment cilium_iface to use cilium cni instead of flannel or calico
# ensure v4.19.57, v5.1.16, v5.2.0 or more recent kernel
# cilium_iface: "eth0"
cilium_mode: "native" # native when nodes on same subnet or using bgp, else set routed
cilium_tag: "v1.14.6" # cilium version tag
cilium_hubble: true # enable hubble observability relay and ui
# if using calico or cilium, you may specify the cluster pod cidr pool
cluster_cidr: "10.52.0.0/16"
# enable cilium bgp control plane for lb services and pod cidrs. disables metallb.
cilium_bgp: false
# bgp parameters for cilium cni. only active when cilium_iface is defined and cilium_bgp is true.
cilium_bgp_my_asn: "64513"
cilium_bgp_peer_asn: "64512"
cilium_bgp_peer_address: "192.168.30.1"
cilium_bgp_lb_cidr: "192.168.31.0/24" # cidr for cilium loadbalancer ipam
# set use_calico to true to use tigera operator/calico instead of the default CNI flannel
# install reference: https://docs.tigera.io/calico/latest/getting-started/kubernetes/k3s/multi-node-install#install-calico
use_calico: false
calico_ebpf: false # use eBPF dataplane instead of iptables https://docs.tigera.io/calico/latest/operations/ebpf
calico_cidr: "10.52.0.0/16" # pod cidr pool
calico_tag: "v3.27.0" # calico version tag
# apiserver_endpoint is virtual ip-address which will be configured on each master
apiserver_endpoint: "192.168.30.222"
@@ -44,25 +27,25 @@ k3s_token: "some-SUPER-DEDEUPER-secret-password"
# The IP on which the node is reachable in the cluster.
# Here, a sensible default is provided, you can still override
# it for each of your hosts, though.
k3s_node_ip: "{{ ansible_facts[(cilium_iface | default(calico_iface | default(flannel_iface)))]['ipv4']['address'] }}"
k3s_node_ip: '{{ ansible_facts[container_iface]["ipv4"]["address"] }}'
# Disable the taint manually by setting: k3s_master_taint = false
k3s_master_taint: "{{ true if groups['node'] | default([]) | length >= 1 else false }}"
# these arguments are recommended for servers as well as agents:
extra_args: >-
{{ '--flannel-iface=' + flannel_iface if calico_iface is not defined and cilium_iface is not defined else '' }}
{{ '--flannel-iface=' + container_iface if not use_calico else '' }}
--node-ip={{ k3s_node_ip }}
# change these to your liking, the only required are: --disable servicelb, --tls-san {{ apiserver_endpoint }}
# the contents of the if block is also required if using calico or cilium
# the contents of the if block is also required if using calico
extra_server_args: >-
{{ extra_args }}
{{ '--node-taint node-role.kubernetes.io/master=true:NoSchedule' if k3s_master_taint else '' }}
{% if calico_iface is defined or cilium_iface is defined %}
{% if use_calico %}
--flannel-backend=none
--disable-network-policy
--cluster-cidr={{ cluster_cidr | default('10.52.0.0/16') }}
--cluster-cidr={{ calico_cidr }}
{% endif %}
--tls-san {{ apiserver_endpoint }}
--disable servicelb
@@ -72,14 +55,7 @@ extra_agent_args: >-
{{ extra_args }}
# image tag for kube-vip
kube_vip_tag_version: "v0.6.4"
# tag for kube-vip-cloud-provider manifest
# kube_vip_cloud_provider_tag_version: "main"
# kube-vip ip range for load balancer
# (uncomment to use kube-vip for services instead of MetalLB)
# kube_vip_lb_ip_range: "192.168.30.80-192.168.30.90"
kube_vip_tag_version: "v0.5.12"
# metallb type frr or native
metal_lb_type: "native"
@@ -93,8 +69,8 @@ metal_lb_mode: "layer2"
# metal_lb_bgp_peer_address: "192.168.30.1"
# image tag for metal lb
metal_lb_speaker_tag_version: "v0.13.12"
metal_lb_controller_tag_version: "v0.13.12"
metal_lb_speaker_tag_version: "v0.13.9"
metal_lb_controller_tag_version: "v0.13.9"
# metallb ip range for load balancer
metal_lb_ip_range: "192.168.30.80-192.168.30.90"
@@ -104,9 +80,9 @@ metal_lb_ip_range: "192.168.30.80-192.168.30.90"
# Please read https://gist.github.com/triangletodd/02f595cd4c0dc9aac5f7763ca2264185 before using this.
# Most notably, your containers must be privileged, and must not have nesting set to true.
# Please note this script disables most of the security of lxc containers, with the trade off being that lxc
# containers are significantly more resource efficient compared to full VMs.
# containers are significantly more resource efficent compared to full VMs.
# Mixing and matching VMs and lxc containers is not supported, ymmv if you want to do this.
# I would only really recommend using this if you have particularly low powered proxmox nodes where the overhead of
# I would only really recommend using this if you have partiularly low powered proxmox nodes where the overhead of
# VMs would use a significant portion of your available resources.
proxmox_lxc_configure: false
# the user that you would use to ssh into the host, for example if you run ssh some-user@my-proxmox-host,

View File

@@ -13,12 +13,6 @@ We have these scenarios:
To save a bit of test time, this cluster is _not_ highly available, it consists of only one control and one worker node.
- **single_node**:
Very similar to the default scenario, but uses only a single node for all cluster functionality.
- **calico**:
The same as single node, but uses calico cni instead of flannel.
- **cilium**:
The same as single node, but uses cilium cni instead of flannel.
- **kube-vip**
The same as single node, but uses kube-vip as service loadbalancer instead of MetalLB
## How to execute

View File

@@ -1,49 +0,0 @@
---
dependency:
name: galaxy
driver:
name: vagrant
platforms:
- name: control1
box: generic/ubuntu2204
memory: 4096
cpus: 4
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
groups:
- k3s_cluster
- master
interfaces:
- network_name: private_network
ip: 192.168.30.62
provisioner:
name: ansible
env:
ANSIBLE_VERBOSITY: 1
playbooks:
converge: ../resources/converge.yml
side_effect: ../resources/reset.yml
verify: ../resources/verify.yml
inventory:
links:
group_vars: ../../inventory/sample/group_vars
scenario:
test_sequence:
- dependency
- cleanup
- destroy
- syntax
- create
- prepare
- converge
# idempotence is not possible with the playbook in its current form.
- verify
# We are repurposing side_effect here to test the reset playbook.
# This is why we do not run it before verify (which tests the cluster),
# but after the verify step.
- side_effect
- cleanup
- destroy

View File

@@ -1,16 +0,0 @@
---
- name: Apply overrides
hosts: all
tasks:
- name: Override host variables
ansible.builtin.set_fact:
# See:
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
calico_iface: eth1
# The test VMs might be a bit slow, so we give them more time to join the cluster:
retry_count: 45
# Make sure that our IP ranges do not collide with those of the other scenarios
apiserver_endpoint: "192.168.30.224"
metal_lb_ip_range: "192.168.30.100-192.168.30.109"

View File

@@ -1,49 +0,0 @@
---
dependency:
name: galaxy
driver:
name: vagrant
platforms:
- name: control1
box: generic/ubuntu2204
memory: 4096
cpus: 4
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
groups:
- k3s_cluster
- master
interfaces:
- network_name: private_network
ip: 192.168.30.63
provisioner:
name: ansible
env:
ANSIBLE_VERBOSITY: 1
playbooks:
converge: ../resources/converge.yml
side_effect: ../resources/reset.yml
verify: ../resources/verify.yml
inventory:
links:
group_vars: ../../inventory/sample/group_vars
scenario:
test_sequence:
- dependency
- cleanup
- destroy
- syntax
- create
- prepare
- converge
# idempotence is not possible with the playbook in its current form.
- verify
# We are repurposing side_effect here to test the reset playbook.
# This is why we do not run it before verify (which tests the cluster),
# but after the verify step.
- side_effect
- cleanup
- destroy

View File

@@ -1,16 +0,0 @@
---
- name: Apply overrides
hosts: all
tasks:
- name: Override host variables
ansible.builtin.set_fact:
# See:
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
cilium_iface: eth1
# The test VMs might be a bit slow, so we give them more time to join the cluster:
retry_count: 45
# Make sure that our IP ranges do not collide with those of the other scenarios
apiserver_endpoint: "192.168.30.225"
metal_lb_ip_range: "192.168.30.110-192.168.30.119"

View File

@@ -7,7 +7,7 @@ platforms:
- name: control1
box: generic/ubuntu2204
memory: 1024
memory: 2048
cpus: 2
groups:
- k3s_cluster
@@ -22,8 +22,8 @@ platforms:
ssh.password: "vagrant"
- name: control2
box: generic/debian12
memory: 1024
box: generic/debian11
memory: 2048
cpus: 2
groups:
- k3s_cluster
@@ -34,7 +34,7 @@ platforms:
- name: control3
box: generic/rocky9
memory: 1024
memory: 2048
cpus: 2
groups:
- k3s_cluster
@@ -45,7 +45,7 @@ platforms:
- name: node1
box: generic/ubuntu2204
memory: 1024
memory: 2048
cpus: 2
groups:
- k3s_cluster
@@ -61,7 +61,7 @@ platforms:
- name: node2
box: generic/rocky9
memory: 1024
memory: 2048
cpus: 2
groups:
- k3s_cluster
@@ -72,8 +72,6 @@ platforms:
provisioner:
name: ansible
env:
ANSIBLE_VERBOSITY: 1
playbooks:
converge: ../resources/converge.yml
side_effect: ../resources/reset.yml
@@ -84,6 +82,7 @@ provisioner:
scenario:
test_sequence:
- dependency
- lint
- cleanup
- destroy
- syntax

View File

@@ -6,7 +6,7 @@
ansible.builtin.set_fact:
# See:
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
flannel_iface: eth1
container_iface: eth1
# The test VMs might be a bit slow, so we give them more time to join the cluster:
retry_count: 45

View File

@@ -6,7 +6,7 @@ driver:
platforms:
- name: control1
box: generic/ubuntu2204
memory: 1024
memory: 2048
cpus: 2
groups:
- k3s_cluster
@@ -22,7 +22,7 @@ platforms:
- name: control2
box: generic/ubuntu2204
memory: 1024
memory: 2048
cpus: 2
groups:
- k3s_cluster
@@ -38,7 +38,7 @@ platforms:
- name: node1
box: generic/ubuntu2204
memory: 1024
memory: 2048
cpus: 2
groups:
- k3s_cluster
@@ -53,8 +53,6 @@ platforms:
ssh.password: "vagrant"
provisioner:
name: ansible
env:
ANSIBLE_VERBOSITY: 1
playbooks:
converge: ../resources/converge.yml
side_effect: ../resources/reset.yml
@@ -65,6 +63,7 @@ provisioner:
scenario:
test_sequence:
- dependency
- lint
- cleanup
- destroy
- syntax

View File

@@ -6,7 +6,7 @@
ansible.builtin.set_fact:
# See:
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
flannel_iface: eth1
container_iface: eth1
# In this scenario, we have multiple interfaces that the VIP could be
# broadcasted on. Since we have assigned a dedicated private network
@@ -27,13 +27,13 @@
- fdad:bad:ba55::1b:0/112
- 192.168.123.80-192.168.123.90
# k3s_node_ip is by default set to the IPv4 address of flannel_iface.
# k3s_node_ip is by default set to the IPv4 address of container_iface.
# We want IPv6 addresses here of course, so we just specify them
# manually below.
k3s_node_ip: "{{ node_ipv4 }},{{ node_ipv6 }}"
- name: Override host variables (2/2)
# Since "extra_args" depends on "k3s_node_ip" and "flannel_iface" we have
# Since "extra_args" depends on "k3s_node_ip" and "container_iface" we have
# to set this AFTER overriding the both of them.
ansible.builtin.set_fact:
# A few extra server args are necessary:

View File

@@ -30,7 +30,7 @@
name: net.ipv6.conf.{{ item }}.accept_dad
value: "0"
with_items:
- "{{ flannel_iface }}"
- "{{ container_iface }}"
- name: Write IPv4 configuration
ansible.builtin.template:

View File

@@ -3,6 +3,6 @@ network:
version: 2
renderer: networkd
ethernets:
{{ flannel_iface }}:
{{ container_iface }}:
addresses:
- {{ node_ipv4 }}/24

View File

@@ -1,49 +0,0 @@
---
dependency:
name: galaxy
driver:
name: vagrant
platforms:
- name: control1
box: generic/ubuntu2204
memory: 4096
cpus: 4
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
groups:
- k3s_cluster
- master
interfaces:
- network_name: private_network
ip: 192.168.30.62
provisioner:
name: ansible
env:
ANSIBLE_VERBOSITY: 1
playbooks:
converge: ../resources/converge.yml
side_effect: ../resources/reset.yml
verify: ../resources/verify.yml
inventory:
links:
group_vars: ../../inventory/sample/group_vars
scenario:
test_sequence:
- dependency
- cleanup
- destroy
- syntax
- create
- prepare
- converge
# idempotence is not possible with the playbook in its current form.
- verify
# We are repurposing side_effect here to test the reset playbook.
# This is why we do not run it before verify (which tests the cluster),
# but after the verify step.
- side_effect
- cleanup
- destroy

View File

@@ -1,17 +0,0 @@
---
- name: Apply overrides
hosts: all
tasks:
- name: Override host variables
ansible.builtin.set_fact:
# See:
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
flannel_iface: eth1
# The test VMs might be a bit slow, so we give them more time to join the cluster:
retry_count: 45
# Make sure that our IP ranges do not collide with those of the other scenarios
apiserver_endpoint: "192.168.30.225"
# Use kube-vip instead of MetalLB
kube_vip_lb_ip_range: "192.168.30.110-192.168.30.119"

View File

@@ -21,8 +21,6 @@ platforms:
ip: 192.168.30.50
provisioner:
name: ansible
env:
ANSIBLE_VERBOSITY: 1
playbooks:
converge: ../resources/converge.yml
side_effect: ../resources/reset.yml
@@ -33,6 +31,7 @@ provisioner:
scenario:
test_sequence:
- dependency
- lint
- cleanup
- destroy
- syntax

View File

@@ -6,7 +6,7 @@
ansible.builtin.set_fact:
# See:
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
flannel_iface: eth1
container_iface: eth1
# The test VMs might be a bit slow, so we give them more time to join the cluster:
retry_count: 45

View File

@@ -1,10 +1,10 @@
ansible-core>=2.16.2
ansible-core>=2.13.5
jmespath>=1.0.1
jsonpatch>=1.33
kubernetes>=29.0.0
molecule-plugins[vagrant]
molecule>=6.0.3
netaddr>=0.10.1
pre-commit>=3.6.0
pre-commit-hooks>=4.5.0
pyyaml>=6.0.1
jsonpatch>=1.32
kubernetes>=25.3.0
molecule-vagrant>=1.0.0
molecule>=4.0.3
netaddr>=0.8.0
pre-commit>=2.20.0
pre-commit-hooks>=1.3.1
pyyaml>=6.0

View File

@@ -4,165 +4,174 @@
#
# pip-compile requirements.in
#
ansible-compat==4.1.11
ansible-compat==3.0.1
# via molecule
ansible-core==2.16.3
ansible-core==2.15.4
# via
# -r requirements.in
# ansible-compat
# molecule
attrs==23.2.0
# via
# jsonschema
# referencing
bracex==2.4
# via wcmatch
cachetools==5.3.2
arrow==1.2.3
# via jinja2-time
attrs==22.1.0
# via jsonschema
binaryornot==0.4.4
# via cookiecutter
cachetools==5.2.0
# via google-auth
certifi==2023.11.17
certifi==2022.9.24
# via
# kubernetes
# requests
cffi==1.16.0
cffi==1.15.1
# via cryptography
cfgv==3.4.0
cfgv==3.3.1
# via pre-commit
charset-normalizer==3.3.2
chardet==5.0.0
# via binaryornot
charset-normalizer==2.1.1
# via requests
click==8.1.7
click==8.1.3
# via
# click-help-colors
# cookiecutter
# molecule
click-help-colors==0.9.4
click-help-colors==0.9.1
# via molecule
cryptography==41.0.7
commonmark==0.9.1
# via rich
cookiecutter==2.1.1
# via molecule
cryptography==38.0.3
# via ansible-core
distlib==0.3.8
distlib==0.3.6
# via virtualenv
distro==1.8.0
# via selinux
enrich==1.2.7
# via molecule
filelock==3.13.1
filelock==3.8.0
# via virtualenv
google-auth==2.26.2
google-auth==2.14.0
# via kubernetes
identify==2.5.33
identify==2.5.8
# via pre-commit
idna==3.6
idna==3.4
# via requests
jinja2==3.1.3
jinja2==3.1.2
# via
# ansible-core
# cookiecutter
# jinja2-time
# molecule
# molecule-vagrant
jinja2-time==0.2.0
# via cookiecutter
jmespath==1.0.1
# via -r requirements.in
jsonpatch==1.33
# via -r requirements.in
jsonpointer==2.4
jsonpointer==2.3
# via jsonpatch
jsonschema==4.21.1
jsonschema==4.17.0
# via
# ansible-compat
# molecule
jsonschema-specifications==2023.12.1
# via jsonschema
kubernetes==29.0.0
kubernetes==25.3.0
# via -r requirements.in
markdown-it-py==3.0.0
# via rich
markupsafe==2.1.4
markupsafe==2.1.1
# via jinja2
mdurl==0.1.2
# via markdown-it-py
molecule==6.0.3
molecule==4.0.4
# via
# -r requirements.in
# molecule-plugins
molecule-plugins[vagrant]==23.5.0
# molecule-vagrant
molecule-vagrant==1.0.0
# via -r requirements.in
netaddr==0.10.1
netaddr==0.10.0
# via -r requirements.in
nodeenv==1.8.0
nodeenv==1.7.0
# via pre-commit
oauthlib==3.2.2
# via
# kubernetes
# requests-oauthlib
packaging==23.2
# via requests-oauthlib
packaging==21.3
# via
# ansible-compat
# ansible-core
# molecule
platformdirs==4.1.0
platformdirs==2.5.2
# via virtualenv
pluggy==1.3.0
pluggy==1.0.0
# via molecule
pre-commit==3.6.0
pre-commit==2.21.0
# via -r requirements.in
pre-commit-hooks==4.5.0
# via -r requirements.in
pyasn1==0.5.1
pyasn1==0.4.8
# via
# pyasn1-modules
# rsa
pyasn1-modules==0.3.0
pyasn1-modules==0.2.8
# via google-auth
pycparser==2.21
# via cffi
pygments==2.17.2
pygments==2.13.0
# via rich
pyparsing==3.0.9
# via packaging
pyrsistent==0.19.2
# via jsonschema
python-dateutil==2.8.2
# via kubernetes
# via
# arrow
# kubernetes
python-slugify==6.1.2
# via cookiecutter
python-vagrant==1.0.0
# via molecule-plugins
# via molecule-vagrant
pyyaml==6.0.1
# via
# -r requirements.in
# ansible-compat
# ansible-core
# cookiecutter
# kubernetes
# molecule
# molecule-vagrant
# pre-commit
referencing==0.32.1
# via
# jsonschema
# jsonschema-specifications
requests==2.31.0
requests==2.28.1
# via
# cookiecutter
# kubernetes
# requests-oauthlib
requests-oauthlib==1.3.1
# via kubernetes
resolvelib==1.0.1
resolvelib==0.8.1
# via ansible-core
rich==13.7.0
rich==12.6.0
# via
# enrich
# molecule
rpds-py==0.17.1
# via
# jsonschema
# referencing
rsa==4.9
# via google-auth
ruamel-yaml==0.18.5
ruamel-yaml==0.17.21
# via pre-commit-hooks
ruamel-yaml-clib==0.2.8
# via ruamel-yaml
selinux==0.2.1
# via molecule-vagrant
six==1.16.0
# via
# google-auth
# kubernetes
# python-dateutil
subprocess-tee==0.4.1
# via ansible-compat
urllib3==2.1.0
text-unidecode==1.3
# via python-slugify
urllib3==1.26.12
# via
# kubernetes
# requests
virtualenv==20.25.0
virtualenv==20.16.6
# via pre-commit
wcmatch==8.5
# via molecule
websocket-client==1.7.0
websocket-client==1.4.2
# via kubernetes
# The following packages are considered to be unsafe in a requirements file:

View File

@@ -1,8 +1,8 @@
---
- name: Create k3s-node.service.d directory
- name: Create k3s.service.d directory
file:
path: '{{ systemd_dir }}/k3s-node.service.d'
path: '{{ systemd_dir }}/k3s.service.d'
state: directory
owner: root
group: root
@@ -12,7 +12,7 @@
- name: Copy K3s http_proxy conf file
template:
src: "http_proxy.conf.j2"
dest: "{{ systemd_dir }}/k3s-node.service.d/http_proxy.conf"
dest: "{{ systemd_dir }}/k3s.service.d/http_proxy.conf"
owner: root
group: root
mode: '0755'

View File

@@ -1,27 +0,0 @@
---
- name: Create manifests directory on first master
file:
path: /var/lib/rancher/k3s/server/manifests
state: directory
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
- name: Download vip cloud provider manifest to first master
ansible.builtin.get_url:
url: "https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/{{ kube_vip_cloud_provider_tag_version | default('main') }}/manifest/kube-vip-cloud-controller.yaml" # noqa yaml[line-length]
dest: "/var/lib/rancher/k3s/server/manifests/kube-vip-cloud-controller.yaml"
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
- name: Copy kubevip configMap manifest to first master
template:
src: "kubevip.yaml.j2"
dest: "/var/lib/rancher/k3s/server/manifests/kubevip.yaml"
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']

View File

@@ -29,12 +29,6 @@
- name: Deploy metallb manifest
include_tasks: metallb.yml
tags: metallb
when: kube_vip_lb_ip_range is not defined and (not cilium_bgp or cilium_iface is not defined)
- name: Deploy kube-vip manifest
include_tasks: kube-vip.yml
tags: kubevip
when: kube_vip_lb_ip_range is defined
- name: Init cluster inside the transient k3s-init service
command:

View File

@@ -1,13 +0,0 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: kubevip
namespace: kube-system
data:
{% if kube_vip_lb_ip_range is string %}
{# kube_vip_lb_ip_range was used in the legacy way: single string instead of a list #}
{# => transform to list with single element #}
{% set kube_vip_lb_ip_range = [kube_vip_lb_ip_range] %}
{% endif %}
range-global: {{ kube_vip_lb_ip_range | join(',') }}

View File

@@ -43,7 +43,7 @@ spec:
- name: vip_ddns
value: "false"
- name: svc_enable
value: "{{ 'true' if kube_vip_lb_ip_range is defined else 'false' }}"
value: "false"
- name: vip_leaderelection
value: "true"
- name: vip_leaseduration

View File

@@ -1,6 +1,6 @@
---
# Timeout to wait for MetalLB services to come up
metal_lb_available_timeout: 240s
metal_lb_available_timeout: 120s
# Name of the master group
group_name_master: master

View File

@@ -1,8 +1,5 @@
---
- name: Deploy Calico to cluster
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
run_once: true
block:
- block:
- name: Create manifests directory on first master
file:
path: /tmp/k3s
@@ -23,9 +20,6 @@
ansible.builtin.template:
src: "calico.crs.j2"
dest: /tmp/k3s/custom-resources.yaml
owner: root
group: root
mode: 0755
- name: Deploy or replace Tigera Operator
block:
@@ -55,10 +49,10 @@
retries: 7
delay: 7
with_items:
- {name: tigera-operator, type: deployment}
- { name: tigera-operator, type: deployment }
loop_control:
label: "{{ item.type }}/{{ item.name }}"
- name: Deploy Calico custom resources
block:
- name: Deploy custom resources for Calico
@@ -74,7 +68,7 @@
register: apply_cr
changed_when: "'configured' in apply_cr.stdout or 'created' in apply_cr.stdout"
failed_when: "'Error' in apply_cr.stderr"
- name: Wait for Calico system resources to be available
command: >-
{% if item.type == 'daemonset' %}
@@ -94,11 +88,11 @@
retries: 30
delay: 7
with_items:
- {name: calico-typha, type: deployment, namespace: calico-system}
- {name: calico-kube-controllers, type: deployment, namespace: calico-system}
- {name: csi-node-driver, type: daemonset, selector: 'k8s-app=csi-node-driver', namespace: calico-system}
- {name: calico-node, type: daemonset, selector: 'k8s-app=calico-node', namespace: calico-system}
- {name: calico-apiserver, type: deployment, namespace: calico-apiserver}
- { name: calico-typha, type: deployment, namespace: calico-system }
- { name: calico-kube-controllers, type: deployment, namespace: calico-system }
- { name: csi-node-driver, type: daemonset, selector: 'k8s-app=csi-node-driver', namespace: calico-system }
- { name: calico-node, type: daemonset, selector: 'k8s-app=calico-node', namespace: calico-system }
- { name: calico-apiserver, type: deployment, selector: 'k8s-app=calico-apiserver', namespace: calico-apiserver }
loop_control:
label: "{{ item.type }}/{{ item.name }}"
@@ -106,9 +100,12 @@
ansible.builtin.command:
cmd: >
kubectl patch felixconfiguration default
--type='merge'
--type='merge'
--patch='{"spec": {"bpfKubeProxyIptablesCleanupEnabled": false}}'
register: patch_result
changed_when: "'felixconfiguration.projectcalico.org/default patched' in patch_result.stdout"
failed_when: "'Error' in patch_result.stderr"
when: calico_ebpf
when: calico_ebpf == true
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
run_once: true # stops "skipped" log spam

View File

@@ -1,253 +0,0 @@
---
- name: Prepare Cilium CLI on first master and deploy CNI
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
run_once: true
block:
- name: Create tmp directory on first master
file:
path: /tmp/k3s
state: directory
owner: root
group: root
mode: 0755
- name: Check if Cilium CLI is installed
ansible.builtin.command: cilium version
register: cilium_cli_installed
failed_when: false
changed_when: false
ignore_errors: true
- name: Check for Cilium CLI version in command output
set_fact:
installed_cli_version: >-
{{
cilium_cli_installed.stdout_lines
| join(' ')
| regex_findall('cilium-cli: (v\d+\.\d+\.\d+)')
| first
| default('unknown')
}}
when: cilium_cli_installed.rc == 0
- name: Get latest stable Cilium CLI version file
ansible.builtin.get_url:
url: "https://raw.githubusercontent.com/cilium/cilium-cli/main/stable.txt"
dest: "/tmp/k3s/cilium-cli-stable.txt"
owner: root
group: root
mode: 0755
- name: Read Cilium CLI stable version from file
ansible.builtin.command: cat /tmp/k3s/cilium-cli-stable.txt
register: cli_ver
changed_when: false
- name: Log installed Cilium CLI version
ansible.builtin.debug:
msg: "Installed Cilium CLI version: {{ installed_cli_version | default('Not installed') }}"
- name: Log latest stable Cilium CLI version
ansible.builtin.debug:
msg: "Latest Cilium CLI version: {{ cli_ver.stdout }}"
- name: Determine if Cilium CLI needs installation or update
set_fact:
cilium_cli_needs_update: >-
{{
cilium_cli_installed.rc != 0 or
(cilium_cli_installed.rc == 0 and
installed_cli_version != cli_ver.stdout)
}}
- name: Install or update Cilium CLI
when: cilium_cli_needs_update
block:
- name: Set architecture variable
ansible.builtin.set_fact:
cli_arch: "{{ 'arm64' if ansible_architecture == 'aarch64' else 'amd64' }}"
- name: Download Cilium CLI and checksum
ansible.builtin.get_url:
url: "{{ cilium_base_url }}/cilium-linux-{{ cli_arch }}{{ item }}"
dest: "/tmp/k3s/cilium-linux-{{ cli_arch }}{{ item }}"
owner: root
group: root
mode: 0755
loop:
- ".tar.gz"
- ".tar.gz.sha256sum"
vars:
cilium_base_url: "https://github.com/cilium/cilium-cli/releases/download/{{ cli_ver.stdout }}"
- name: Verify the downloaded tarball
ansible.builtin.shell: |
cd /tmp/k3s && sha256sum --check cilium-linux-{{ cli_arch }}.tar.gz.sha256sum
args:
executable: /bin/bash
changed_when: false
- name: Extract Cilium CLI to /usr/local/bin
ansible.builtin.unarchive:
src: "/tmp/k3s/cilium-linux-{{ cli_arch }}.tar.gz"
dest: /usr/local/bin
remote_src: true
- name: Remove downloaded tarball and checksum file
ansible.builtin.file:
path: "{{ item }}"
state: absent
loop:
- "/tmp/k3s/cilium-linux-{{ cli_arch }}.tar.gz"
- "/tmp/k3s/cilium-linux-{{ cli_arch }}.tar.gz.sha256sum"
- name: Wait for connectivity to kube VIP
ansible.builtin.command: ping -c 1 {{ apiserver_endpoint }}
register: ping_result
until: ping_result.rc == 0
retries: 21
delay: 1
ignore_errors: true
changed_when: false
- name: Fail if kube VIP not reachable
ansible.builtin.fail:
msg: "API endpoint {{ apiserver_endpoint }} is not reachable"
when: ping_result.rc != 0
- name: Test for existing Cilium install
ansible.builtin.command: k3s kubectl -n kube-system get daemonsets cilium
register: cilium_installed
failed_when: false
changed_when: false
ignore_errors: true
- name: Check existing Cilium install
when: cilium_installed.rc == 0
block:
- name: Check Cilium version
ansible.builtin.command: cilium version
register: cilium_version
failed_when: false
changed_when: false
ignore_errors: true
- name: Parse installed Cilium version
set_fact:
installed_cilium_version: >-
{{
cilium_version.stdout_lines
| join(' ')
| regex_findall('cilium image.+(\d+\.\d+\.\d+)')
| first
| default('unknown')
}}
- name: Determine if Cilium needs update
set_fact:
cilium_needs_update: >-
{{ 'v' + installed_cilium_version != cilium_tag }}
- name: Log result
ansible.builtin.debug:
msg: >
Installed Cilium version: {{ installed_cilium_version }},
Target Cilium version: {{ cilium_tag }},
Update needed: {{ cilium_needs_update }}
- name: Install Cilium
ansible.builtin.command: >-
{% if cilium_installed.rc != 0 %}
cilium install
{% else %}
cilium upgrade
{% endif %}
--version "{{ cilium_tag }}"
--helm-set operator.replicas="1"
{{ '--helm-set devices=' + cilium_iface if cilium_iface != 'auto' else '' }}
--helm-set ipam.operator.clusterPoolIPv4PodCIDRList={{ cluster_cidr }}
{% if cilium_mode == "native" or (cilium_bgp and cilium_exportPodCIDR != 'false') %}
--helm-set ipv4NativeRoutingCIDR={{ cluster_cidr }}
{% endif %}
--helm-set k8sServiceHost={{ apiserver_endpoint }}
--helm-set k8sServicePort="6443"
--helm-set routingMode={{ cilium_mode | default("native") }}
--helm-set autoDirectNodeRoutes={{ "true" if cilium_mode == "native" else "false" }}
--helm-set kubeProxyReplacement={{ kube_proxy_replacement | default("true") }}
--helm-set bpf.masquerade={{ enable_bpf_masquerade | default("true") }}
--helm-set bgpControlPlane.enabled={{ cilium_bgp | default("false") }}
--helm-set hubble.enabled={{ "true" if cilium_hubble else "false" }}
--helm-set hubble.relay.enabled={{ "true" if cilium_hubble else "false" }}
--helm-set hubble.ui.enabled={{ "true" if cilium_hubble else "false" }}
{% if kube_proxy_replacement is not false %}
--helm-set bpf.loadBalancer.algorithm={{ bpf_lb_algorithm | default("maglev") }}
--helm-set bpf.loadBalancer.mode={{ bpf_lb_mode | default("hybrid") }}
{% endif %}
environment:
KUBECONFIG: /home/{{ ansible_user }}/.kube/config
register: cilium_install_result
changed_when: cilium_install_result.rc == 0
when: cilium_installed.rc != 0 or cilium_needs_update
- name: Wait for Cilium resources
command: >-
{% if item.type == 'daemonset' %}
k3s kubectl wait pods
--namespace=kube-system
--selector='k8s-app=cilium'
--for=condition=Ready
{% else %}
k3s kubectl wait {{ item.type }}/{{ item.name }}
--namespace=kube-system
--for=condition=Available
{% endif %}
--timeout=7s
register: cr_result
changed_when: false
until: cr_result is succeeded
retries: 30
delay: 7
with_items:
- {name: cilium-operator, type: deployment}
- {name: cilium, type: daemonset, selector: 'k8s-app=cilium'}
- {name: hubble-relay, type: deployment, check_hubble: true}
- {name: hubble-ui, type: deployment, check_hubble: true}
loop_control:
label: "{{ item.type }}/{{ item.name }}"
when: >-
not item.check_hubble | default(false) or (item.check_hubble | default(false) and cilium_hubble)
- name: Configure Cilium BGP
when: cilium_bgp
block:
- name: Copy BGP manifests to first master
ansible.builtin.template:
src: "cilium.crs.j2"
dest: /tmp/k3s/cilium-bgp.yaml
owner: root
group: root
mode: 0755
- name: Apply BGP manifests
ansible.builtin.command:
cmd: kubectl apply -f /tmp/k3s/cilium-bgp.yaml
register: apply_cr
changed_when: "'configured' in apply_cr.stdout or 'created' in apply_cr.stdout"
failed_when: "'is invalid' in apply_cr.stderr"
ignore_errors: true
- name: Print error message if BGP manifests application fails
ansible.builtin.debug:
msg: "{{ apply_cr.stderr }}"
when: "'is invalid' in apply_cr.stderr"
- name: Test for BGP config resources
ansible.builtin.command: "{{ item }}"
loop:
- k3s kubectl get CiliumBGPPeeringPolicy.cilium.io
- k3s kubectl get CiliumLoadBalancerIPPool.cilium.io
changed_when: false
loop_control:
label: "{{ item }}"

View File

@@ -2,17 +2,11 @@
- name: Deploy calico
include_tasks: calico.yml
tags: calico
when: calico_iface is defined and cilium_iface is not defined
- name: Deploy cilium
include_tasks: cilium.yml
tags: cilium
when: cilium_iface is defined
when: use_calico == true
- name: Deploy metallb pool
include_tasks: metallb.yml
tags: metallb
when: kube_vip_lb_ip_range is not defined and (not cilium_bgp or cilium_iface is not defined)
- name: Remove tmp directory used for manifests
file:

View File

@@ -8,27 +8,6 @@
with_items: "{{ groups[group_name_master | default('master')] }}"
run_once: true
- name: Delete outdated metallb replicas
shell: |-
set -o pipefail
REPLICAS=$(k3s kubectl --namespace='metallb-system' get replicasets \
-l 'component=controller,app=metallb' \
-o jsonpath='{.items[0].spec.template.spec.containers[0].image}, {.items[0].metadata.name}' 2>/dev/null || true)
REPLICAS_SETS=$(echo ${REPLICAS} | grep -v '{{ metal_lb_controller_tag_version }}' | sed -e "s/^.*\s//g")
if [ -n "${REPLICAS_SETS}" ] ; then
for REPLICAS in "${REPLICAS_SETS}"
do
k3s kubectl --namespace='metallb-system' \
delete rs "${REPLICAS}"
done
fi
args:
executable: /bin/bash
changed_when: false
run_once: true
with_items: "{{ groups[group_name_master | default('master')] }}"
- name: Copy metallb CRs manifest to first master
template:
src: "metallb.crs.j2"

View File

@@ -9,13 +9,13 @@ spec:
calicoNetwork:
# Note: The ipPools section cannot be modified post-install.
ipPools:
- blockSize: {{ calico_blockSize | default('26') }}
cidr: {{ cluster_cidr | default('10.52.0.0/16') }}
encapsulation: {{ calico_encapsulation | default('VXLANCrossSubnet') }}
natOutgoing: {{ calico_natOutgoing | default('Enabled') }}
nodeSelector: {{ calico_nodeSelector | default('all()') }}
- blockSize: {{ calico_blockSize if calico_blockSize is defined else '26' }}
cidr: {{ calico_cidr if calico_cidr is defined else '10.52.0.0/16' }}
encapsulation: {{ calico_encapsulation if calico_encapsulation is defined else 'VXLANCrossSubnet' }}
natOutgoing: {{ calico_natOutgoing if calico_natOutgoing is defined else 'Enabled' }}
nodeSelector: {{ calico_nodeSelector if calico_nodeSelector is defined else 'all()' }}
nodeAddressAutodetectionV4:
interface: {{ calico_iface }}
interface: {{ container_iface if container_iface is defined else 'eth0' }}
linuxDataplane: {{ 'BPF' if calico_ebpf else 'Iptables' }}
---
@@ -27,15 +27,3 @@ kind: APIServer
metadata:
name: default
spec: {}
{% if calico_ebpf %}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kubernetes-services-endpoint
namespace: tigera-operator
data:
KUBERNETES_SERVICE_HOST: '{{ apiserver_endpoint }}'
KUBERNETES_SERVICE_PORT: '6443'
{% endif %}

View File

@@ -1,29 +0,0 @@
apiVersion: "cilium.io/v2alpha1"
kind: CiliumBGPPeeringPolicy
metadata:
name: 01-bgp-peering-policy
spec: # CiliumBGPPeeringPolicySpec
virtualRouters: # []CiliumBGPVirtualRouter
- localASN: {{ cilium_bgp_my_asn }}
exportPodCIDR: {{ cilium_exportPodCIDR | default('true') }}
neighbors: # []CiliumBGPNeighbor
- peerAddress: '{{ cilium_bgp_peer_address + "/32"}}'
peerASN: {{ cilium_bgp_peer_asn }}
eBGPMultihopTTL: 10
connectRetryTimeSeconds: 120
holdTimeSeconds: 90
keepAliveTimeSeconds: 30
gracefulRestart:
enabled: true
restartTimeSeconds: 120
serviceSelector:
matchExpressions:
- {key: somekey, operator: NotIn, values: ['never-used-value']}
---
apiVersion: "cilium.io/v2alpha1"
kind: CiliumLoadBalancerIPPool
metadata:
name: "01-lb-pool"
spec:
cidrs:
- cidr: "{{ cilium_bgp_lb_cidr }}"

View File

@@ -17,27 +17,21 @@
when:
grep_cpuinfo_raspberrypi.rc == 0 or grep_device_tree_model_raspberrypi.rc == 0
- name: Set detected_distribution to Raspbian (ARM64 on Raspbian, Debian Buster/Bullseye/Bookworm)
- name: Set detected_distribution to Raspbian
set_fact:
detected_distribution: Raspbian
vars:
allowed_descriptions:
- "[Rr]aspbian.*"
- "Debian.*buster"
- "Debian.*bullseye"
- "Debian.*bookworm"
when:
- ansible_facts.architecture is search("aarch64")
- raspberry_pi|default(false)
- ansible_facts.lsb.description|default("") is match(allowed_descriptions | join('|'))
when: >
raspberry_pi|default(false) and
( ansible_facts.lsb.id|default("") == "Raspbian" or
ansible_facts.lsb.description|default("") is match("[Rr]aspbian.*") )
- name: Set detected_distribution to Raspbian (ARM64 on Debian Bookworm)
- name: Set detected_distribution to Raspbian (ARM64 on Debian Buster)
set_fact:
detected_distribution: Raspbian
when:
- ansible_facts.architecture is search("aarch64")
- raspberry_pi|default(false)
- ansible_facts.lsb.description|default("") is match("Debian.*bookworm")
- ansible_facts.lsb.description|default("") is match("Debian.*buster")
- name: Set detected_distribution_major_version
set_fact:
@@ -45,6 +39,14 @@
when:
- detected_distribution | default("") == "Raspbian"
- name: Set detected_distribution to Raspbian (ARM64 on Debian Bullseye)
set_fact:
detected_distribution: Raspbian
when:
- ansible_facts.architecture is search("aarch64")
- raspberry_pi|default(false)
- ansible_facts.lsb.description|default("") is match("Debian.*bullseye")
- name: Execute OS related tasks on the Raspberry Pi - {{ action_ }}
include_tasks: "{{ item }}"
with_first_found:

View File

@@ -45,16 +45,13 @@
- /var/lib/rancher/k3s
- /var/lib/rancher/
- /var/lib/cni/
- /etc/cni/net.d
- name: Remove K3s http_proxy files
file:
name: "{{ item }}"
state: absent
with_items:
- "{{ systemd_dir }}/k3s.service.d/http_proxy.conf"
- "{{ systemd_dir }}/k3s.service.d"
- "{{ systemd_dir }}/k3s-node.service.d/http_proxy.conf"
- "{{ systemd_dir }}/k3s-node.service.d"
when: proxy_env is defined

View File

@@ -46,14 +46,3 @@
roles:
- role: k3s_server_post
become: true
- name: Storing kubeconfig in the playbook directory
hosts: master
environment: "{{ proxy_env | default({}) }}"
tasks:
- name: Copying kubeconfig from {{ hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] }}
ansible.builtin.fetch:
src: "{{ ansible_user_dir }}/.kube/config"
dest: ./kubeconfig
flat: true
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']