Compare commits

..

1 Commits

Author SHA1 Message Date
tim
f1277d4e8d Merge pull request 'master' (#1) from tim/k3s-ansible:master into master
Reviewed-on: homelab/k3s-ansible#1
2024-01-29 22:00:04 +01:00
31 changed files with 62 additions and 533 deletions

View File

@@ -18,4 +18,3 @@ exclude_paths:
skip_list:
- 'fqcn-builtins'
- var-naming[no-role-prefix]

View File

@@ -9,17 +9,12 @@ set -euo pipefail
GIT_ROOT=$(git rev-parse --show-toplevel)
PROVIDER=virtualbox
yq --version
# Define the path to the molecule.yml files
MOLECULE_YML_PATH="${GIT_ROOT}/molecule/*/molecule.yml"
# Extract and sort unique boxes from all molecule.yml files
all_boxes=$(for file in $MOLECULE_YML_PATH; do
yq eval '.platforms[].box' "$file"
done | sort -u)
echo all_boxes: "$all_boxes"
# Read all boxes for all platforms from the "molecule.yml" files
all_boxes=$(cat "${GIT_ROOT}"/molecule/*/molecule.yml |
yq -r '.platforms[].box' | # Read the "box" property of each node under "platforms"
grep --invert-match --regexp=--- | # Filter out file separators
sort |
uniq)
# Read the boxes that are currently present on the system (for the current provider)
present_boxes=$(

View File

@@ -11,19 +11,19 @@ jobs:
steps:
- name: Check out the codebase
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # 4.1.7
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v3 4.1.1
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # 5.1.1
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # 5.0.0
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies
- name: Cache Vagrant boxes
id: cache-vagrant
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # 4.0.2
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # 4.0
with:
lookup-only: true #if it exists, we don't need to restore and can skip the next step
path: |

View File

@@ -11,18 +11,18 @@ jobs:
steps:
- name: Check out the codebase
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # 4.1.7
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v3 4.1.1
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # 5.1.1
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # 5.0.0
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies
- name: Restore Ansible cache
uses: actions/cache/restore@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # 4.0.2
uses: actions/cache/restore@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # 4.0
with:
path: ~/.ansible/collections
key: ansible-${{ hashFiles('collections/requirements.yml') }}
@@ -38,16 +38,16 @@ jobs:
echo "::endgroup::"
- name: Run pre-commit
uses: pre-commit/action@2c7b3805fd2a0fd8c1884dcaebf91fc102a13ecd # 3.0.1
uses: pre-commit/action@646c83fcd040023954eafda54b4db0192ce70507 # 3.0.0
ensure-pinned-actions:
name: Ensure SHA Pinned Actions
runs-on: self-hosted
steps:
- name: Checkout code
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # 4.1.7
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v3 4.1.1
- name: Ensure SHA pinned actions
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@b88cd0aad2c36a63e42c71f81cb1958fed95ac87 # 3.0.10
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@ba37328d4ea95eaf8b3bd6c6cef308f709a5f2ec # 3.0.3
with:
allowlist: |
aws-actions/

View File

@@ -10,10 +10,9 @@ jobs:
matrix:
scenario:
- default
# - ipv6
- ipv6
- single_node
- calico
- cilium
- kube-vip
fail-fast: false
env:
@@ -21,7 +20,7 @@ jobs:
steps:
- name: Check out the codebase
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # 4.1.7
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v3 4.1.1
with:
ref: ${{ github.event.pull_request.head.sha }}
@@ -59,13 +58,13 @@ jobs:
EOF
- name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # 5.1.1
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # 5.0.0
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies
- name: Restore vagrant Boxes cache
uses: actions/cache/restore@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # 4.0.2
uses: actions/cache/restore@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # 4.0
with:
path: ~/.vagrant.d/boxes
key: vagrant-boxes-${{ hashFiles('**/molecule.yml') }}
@@ -118,7 +117,7 @@ jobs:
- name: Upload log files
if: always() # do this even if a step before has failed
uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # 4.3.4
uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 # 4.3.0
with:
name: logs
path: |

View File

@@ -1,7 +1,7 @@
---
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.5.0
rev: f71fa2c1f9cf5cb705f73dffe4b21f7c61470ba9 # frozen: v4.4.0
hooks:
- id: requirements-txt-fixer
- id: sort-simple-yaml
@@ -12,24 +12,24 @@ repos:
- id: trailing-whitespace
args: [--markdown-linebreak-ext=md]
- repo: https://github.com/adrienverge/yamllint.git
rev: v1.33.0
rev: b05e028c5881819161d11cb543fd96a30c06cceb # frozen: v1.32.0
hooks:
- id: yamllint
args: [-c=.yamllint]
- repo: https://github.com/ansible-community/ansible-lint.git
rev: v6.22.2
rev: 3293b64b939c0de16ef8cb81dd49255e475bf89a # frozen: v6.17.2
hooks:
- id: ansible-lint
- repo: https://github.com/shellcheck-py/shellcheck-py
rev: v0.9.0.6
rev: 375289a39f5708101b1f916eb729e8d6da96993f # frozen: v0.9.0.5
hooks:
- id: shellcheck
- repo: https://github.com/Lucas-C/pre-commit-hooks
rev: v1.5.4
rev: 12885e376b93dc4536ad68d156065601e4433665 # frozen: v1.5.1
hooks:
- id: remove-crlf
- id: remove-tabs
- repo: https://github.com/sirosen/texthooks
rev: 0.6.4
rev: c4ffd3e31669dd4fa4d31a23436cc13839730084 # frozen: 0.5.0
hooks:
- id: fix-smartquotes

View File

@@ -96,22 +96,8 @@ ansible-playbook reset.yml -i inventory/my-cluster/hosts.ini
To copy your `kube config` locally so that you can access your **Kubernetes** cluster run:
```bash
scp debian@master_ip:/etc/rancher/k3s/k3s.yaml ~/.kube/config
scp debian@master_ip:~/.kube/config ~/.kube/config
```
If you get file Permission denied, go into the node and temporarly run:
```bash
sudo chmod 777 /etc/rancher/k3s/k3s.yaml
```
Then copy with the scp command and reset the permissions back to:
```bash
sudo chmod 600 /etc/rancher/k3s/k3s.yaml
```
You'll then want to modify the config to point to master IP by running:
```bash
sudo nano ~/.kube/config
```
Then change `server: https://127.0.0.1:6443` to match your master IP: `server: https://192.168.1.222:6443`
### 🔨 Testing your cluster

View File

@@ -1,5 +1,5 @@
---
k3s_version: v1.30.2+k3s2
k3s_version: v1.29.0+k3s1
# this is the user that has ssh access to these machines
ansible_user: ansibleuser
systemd_dir: /etc/systemd/system
@@ -13,26 +13,8 @@ flannel_iface: "eth0"
# uncomment calico_iface to use tigera operator/calico cni instead of flannel https://docs.tigera.io/calico/latest/about
# calico_iface: "eth0"
calico_ebpf: false # use eBPF dataplane instead of iptables
calico_tag: "v3.28.0" # calico version tag
# uncomment cilium_iface to use cilium cni instead of flannel or calico
# ensure v4.19.57, v5.1.16, v5.2.0 or more recent kernel
# cilium_iface: "eth0"
cilium_mode: "native" # native when nodes on same subnet or using bgp, else set routed
cilium_tag: "v1.16.0" # cilium version tag
cilium_hubble: true # enable hubble observability relay and ui
# if using calico or cilium, you may specify the cluster pod cidr pool
cluster_cidr: "10.52.0.0/16"
# enable cilium bgp control plane for lb services and pod cidrs. disables metallb.
cilium_bgp: false
# bgp parameters for cilium cni. only active when cilium_iface is defined and cilium_bgp is true.
cilium_bgp_my_asn: "64513"
cilium_bgp_peer_asn: "64512"
cilium_bgp_peer_address: "192.168.30.1"
cilium_bgp_lb_cidr: "192.168.31.0/24" # cidr for cilium loadbalancer ipam
calico_cidr: "10.52.0.0/16" # calico cluster pod cidr pool
calico_tag: "v3.27.0" # calico version tag
# apiserver_endpoint is virtual ip-address which will be configured on each master
apiserver_endpoint: "192.168.30.222"
@@ -44,25 +26,25 @@ k3s_token: "some-SUPER-DEDEUPER-secret-password"
# The IP on which the node is reachable in the cluster.
# Here, a sensible default is provided, you can still override
# it for each of your hosts, though.
k3s_node_ip: "{{ ansible_facts[(cilium_iface | default(calico_iface | default(flannel_iface)))]['ipv4']['address'] }}"
k3s_node_ip: "{{ ansible_facts[(calico_iface | default(flannel_iface))]['ipv4']['address'] }}"
# Disable the taint manually by setting: k3s_master_taint = false
k3s_master_taint: "{{ true if groups['node'] | default([]) | length >= 1 else false }}"
# these arguments are recommended for servers as well as agents:
extra_args: >-
{{ '--flannel-iface=' + flannel_iface if calico_iface is not defined and cilium_iface is not defined else '' }}
{{ '--flannel-iface=' + flannel_iface if calico_iface is not defined else '' }}
--node-ip={{ k3s_node_ip }}
# change these to your liking, the only required are: --disable servicelb, --tls-san {{ apiserver_endpoint }}
# the contents of the if block is also required if using calico or cilium
# the contents of the if block is also required if using calico
extra_server_args: >-
{{ extra_args }}
{{ '--node-taint node-role.kubernetes.io/master=true:NoSchedule' if k3s_master_taint else '' }}
{% if calico_iface is defined or cilium_iface is defined %}
{% if calico_iface is defined %}
--flannel-backend=none
--disable-network-policy
--cluster-cidr={{ cluster_cidr | default('10.52.0.0/16') }}
--cluster-cidr={{ calico_cidr | default('10.52.0.0/16') }}
{% endif %}
--tls-san {{ apiserver_endpoint }}
--disable servicelb
@@ -72,7 +54,7 @@ extra_agent_args: >-
{{ extra_args }}
# image tag for kube-vip
kube_vip_tag_version: "v0.8.2"
kube_vip_tag_version: "v0.6.4"
# tag for kube-vip-cloud-provider manifest
# kube_vip_cloud_provider_tag_version: "main"
@@ -93,8 +75,8 @@ metal_lb_mode: "layer2"
# metal_lb_bgp_peer_address: "192.168.30.1"
# image tag for metal lb
metal_lb_speaker_tag_version: "v0.14.8"
metal_lb_controller_tag_version: "v0.14.8"
metal_lb_speaker_tag_version: "v0.13.12"
metal_lb_controller_tag_version: "v0.13.12"
# metallb ip range for load balancer
metal_lb_ip_range: "192.168.30.80-192.168.30.90"
@@ -160,10 +142,6 @@ custom_registries_yaml: |
username: yourusername
password: yourpassword
# On some distros like Diet Pi, there is no dbus installed. dbus required by the default reboot command.
# Uncomment if you need a custom reboot command
# custom_reboot_command: /usr/sbin/shutdown -r now
# Only enable and configure these if you access the internet through a proxy
# proxy_env:
# HTTP_PROXY: "http://proxy.domain.local:3128"

View File

@@ -15,8 +15,6 @@ We have these scenarios:
Very similar to the default scenario, but uses only a single node for all cluster functionality.
- **calico**:
The same as single node, but uses calico cni instead of flannel.
- **cilium**:
The same as single node, but uses cilium cni instead of flannel.
- **kube-vip**
The same as single node, but uses kube-vip as service loadbalancer instead of MetalLB

View File

@@ -1,49 +0,0 @@
---
dependency:
name: galaxy
driver:
name: vagrant
platforms:
- name: control1
box: generic/ubuntu2204
memory: 4096
cpus: 4
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
groups:
- k3s_cluster
- master
interfaces:
- network_name: private_network
ip: 192.168.30.63
provisioner:
name: ansible
env:
ANSIBLE_VERBOSITY: 1
playbooks:
converge: ../resources/converge.yml
side_effect: ../resources/reset.yml
verify: ../resources/verify.yml
inventory:
links:
group_vars: ../../inventory/sample/group_vars
scenario:
test_sequence:
- dependency
- cleanup
- destroy
- syntax
- create
- prepare
- converge
# idempotence is not possible with the playbook in its current form.
- verify
# We are repurposing side_effect here to test the reset playbook.
# This is why we do not run it before verify (which tests the cluster),
# but after the verify step.
- side_effect
- cleanup
- destroy

View File

@@ -1,16 +0,0 @@
---
- name: Apply overrides
hosts: all
tasks:
- name: Override host variables
ansible.builtin.set_fact:
# See:
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
cilium_iface: eth1
# The test VMs might be a bit slow, so we give them more time to join the cluster:
retry_count: 45
# Make sure that our IP ranges do not collide with those of the other scenarios
apiserver_endpoint: "192.168.30.225"
metal_lb_ip_range: "192.168.30.110-192.168.30.119"

View File

@@ -6,5 +6,4 @@
- name: Reboot the nodes (and Wait upto 5 mins max)
become: true
reboot:
reboot_command: "{{ custom_reboot_command | default(omit) }}"
reboot_timeout: 300

View File

@@ -6,7 +6,7 @@
#
ansible-compat==4.1.11
# via molecule
ansible-core==2.17.2
ansible-core==2.16.2
# via
# -r requirements.in
# ansible-compat
@@ -77,7 +77,7 @@ molecule==6.0.3
# via
# -r requirements.in
# molecule-plugins
molecule-plugins[vagrant]==23.5.3
molecule-plugins[vagrant]==23.5.0
# via -r requirements.in
netaddr==0.10.1
# via -r requirements.in
@@ -96,9 +96,9 @@ platformdirs==4.1.0
# via virtualenv
pluggy==1.3.0
# via molecule
pre-commit==3.8.0
pre-commit==3.6.0
# via -r requirements.in
pre-commit-hooks==4.6.0
pre-commit-hooks==4.5.0
# via -r requirements.in
pyasn1==0.5.1
# via

View File

@@ -12,7 +12,6 @@
- name: Reboot and wait for node to come back up
become: true
reboot:
reboot_command: "{{ custom_reboot_command | default(omit) }}"
reboot_timeout: 3600
- name: Revert changes to Proxmox cluster

View File

@@ -1,4 +1,5 @@
---
- name: Create k3s-node.service.d directory
file:
path: '{{ systemd_dir }}/k3s-node.service.d'
@@ -6,7 +7,7 @@
owner: root
group: root
mode: '0755'
when: proxy_env is defined
- name: Copy K3s http_proxy conf file
template:
@@ -15,4 +16,3 @@
owner: root
group: root
mode: '0755'
when: proxy_env is defined

View File

@@ -1,35 +1,19 @@
---
- name: Check for PXE-booted system
block:
- name: Check if system is PXE-booted
ansible.builtin.command:
cmd: cat /proc/cmdline
register: boot_cmdline
changed_when: false
check_mode: false
- name: Set fact for PXE-booted system
ansible.builtin.set_fact:
is_pxe_booted: "{{ 'root=/dev/nfs' in boot_cmdline.stdout }}"
when: boot_cmdline.stdout is defined
- name: Include http_proxy configuration tasks
ansible.builtin.include_tasks: http_proxy.yml
- name: Deploy K3s http_proxy conf
include_tasks: http_proxy.yml
when: proxy_env is defined
- name: Configure the k3s service
ansible.builtin.template:
- name: Copy K3s service file
template:
src: "k3s.service.j2"
dest: "{{ systemd_dir }}/k3s-node.service"
owner: root
group: root
mode: '0755'
mode: 0755
- name: Manage k3s service
ansible.builtin.systemd:
- name: Enable and check K3s service
systemd:
name: k3s-node
daemon_reload: true
state: restarted

View File

@@ -7,14 +7,11 @@ After=network-online.target
Type=notify
ExecStartPre=-/sbin/modprobe br_netfilter
ExecStartPre=-/sbin/modprobe overlay
# Conditional snapshotter based on PXE boot status
ExecStart=/usr/local/bin/k3s agent \
--server https://{{ apiserver_endpoint | ansible.utils.ipwrap }}:6443 \
{% if is_pxe_booted | default(false) %}--snapshotter native \
{% endif %}--token {{ hostvars[groups[group_name_master | default('master')][0]]['token'] | default(k3s_token) }} \
{{ extra_agent_args | default("") }}
ExecStart=/usr/local/bin/k3s agent --server https://{{ apiserver_endpoint | ansible.utils.ipwrap }}:6443 --token {{ hostvars[groups[group_name_master | default('master')][0]]['token'] | default(k3s_token) }} {{ extra_agent_args | default("") }}
KillMode=process
Delegate=yes
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=1048576
LimitNPROC=infinity
LimitCORE=infinity

View File

@@ -4,9 +4,6 @@
# will determine the right interface automatically at runtime.
kube_vip_iface: null
# Enables ARP broadcasts from Leader
kube_vip_arp: true
# Name of the master group
group_name_master: master

View File

@@ -29,7 +29,7 @@
- name: Deploy metallb manifest
include_tasks: metallb.yml
tags: metallb
when: kube_vip_lb_ip_range is not defined and (not cilium_bgp or cilium_iface is not defined)
when: kube_vip_lb_ip_range is not defined
- name: Deploy kube-vip manifest
include_tasks: kube-vip.yml

View File

@@ -10,7 +10,7 @@
- name: Download vip rbac manifest to first master
ansible.builtin.get_url:
url: "https://kube-vip.io/manifests/rbac.yaml"
url: "https://raw.githubusercontent.com/kube-vip/kube-vip/{{ kube_vip_tag_version }}/docs/manifests/rbac.yaml"
dest: "/var/lib/rancher/k3s/server/manifests/vip-rbac.yaml"
owner: root
group: root

View File

@@ -27,7 +27,7 @@ spec:
- manager
env:
- name: vip_arp
value: "{{ 'true' if kube_vip_arp | bool else 'false' }}"
value: "true"
- name: port
value: "6443"
{% if kube_vip_iface %}

View File

@@ -48,7 +48,7 @@
k3s kubectl wait {{ item.type }}/{{ item.name }}
--namespace='tigera-operator'
--for=condition=Available=True
--timeout=30s
--timeout=7s
register: tigera_result
changed_when: false
until: tigera_result is succeeded
@@ -87,7 +87,7 @@
--namespace='{{ item.namespace }}'
--for=condition=Available
{% endif %}
--timeout=30s
--timeout=7s
register: cr_result
changed_when: false
until: cr_result is succeeded

View File

@@ -1,253 +0,0 @@
---
- name: Prepare Cilium CLI on first master and deploy CNI
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
run_once: true
block:
- name: Create tmp directory on first master
file:
path: /tmp/k3s
state: directory
owner: root
group: root
mode: 0755
- name: Check if Cilium CLI is installed
ansible.builtin.command: cilium version
register: cilium_cli_installed
failed_when: false
changed_when: false
ignore_errors: true
- name: Check for Cilium CLI version in command output
set_fact:
installed_cli_version: >-
{{
cilium_cli_installed.stdout_lines
| join(' ')
| regex_findall('cilium-cli: (v\d+\.\d+\.\d+)')
| first
| default('unknown')
}}
when: cilium_cli_installed.rc == 0
- name: Get latest stable Cilium CLI version file
ansible.builtin.get_url:
url: "https://raw.githubusercontent.com/cilium/cilium-cli/main/stable.txt"
dest: "/tmp/k3s/cilium-cli-stable.txt"
owner: root
group: root
mode: 0755
- name: Read Cilium CLI stable version from file
ansible.builtin.command: cat /tmp/k3s/cilium-cli-stable.txt
register: cli_ver
changed_when: false
- name: Log installed Cilium CLI version
ansible.builtin.debug:
msg: "Installed Cilium CLI version: {{ installed_cli_version | default('Not installed') }}"
- name: Log latest stable Cilium CLI version
ansible.builtin.debug:
msg: "Latest Cilium CLI version: {{ cli_ver.stdout }}"
- name: Determine if Cilium CLI needs installation or update
set_fact:
cilium_cli_needs_update: >-
{{
cilium_cli_installed.rc != 0 or
(cilium_cli_installed.rc == 0 and
installed_cli_version != cli_ver.stdout)
}}
- name: Install or update Cilium CLI
when: cilium_cli_needs_update
block:
- name: Set architecture variable
ansible.builtin.set_fact:
cli_arch: "{{ 'arm64' if ansible_architecture == 'aarch64' else 'amd64' }}"
- name: Download Cilium CLI and checksum
ansible.builtin.get_url:
url: "{{ cilium_base_url }}/cilium-linux-{{ cli_arch }}{{ item }}"
dest: "/tmp/k3s/cilium-linux-{{ cli_arch }}{{ item }}"
owner: root
group: root
mode: 0755
loop:
- ".tar.gz"
- ".tar.gz.sha256sum"
vars:
cilium_base_url: "https://github.com/cilium/cilium-cli/releases/download/{{ cli_ver.stdout }}"
- name: Verify the downloaded tarball
ansible.builtin.shell: |
cd /tmp/k3s && sha256sum --check cilium-linux-{{ cli_arch }}.tar.gz.sha256sum
args:
executable: /bin/bash
changed_when: false
- name: Extract Cilium CLI to /usr/local/bin
ansible.builtin.unarchive:
src: "/tmp/k3s/cilium-linux-{{ cli_arch }}.tar.gz"
dest: /usr/local/bin
remote_src: true
- name: Remove downloaded tarball and checksum file
ansible.builtin.file:
path: "{{ item }}"
state: absent
loop:
- "/tmp/k3s/cilium-linux-{{ cli_arch }}.tar.gz"
- "/tmp/k3s/cilium-linux-{{ cli_arch }}.tar.gz.sha256sum"
- name: Wait for connectivity to kube VIP
ansible.builtin.command: ping -c 1 {{ apiserver_endpoint }}
register: ping_result
until: ping_result.rc == 0
retries: 21
delay: 1
ignore_errors: true
changed_when: false
- name: Fail if kube VIP not reachable
ansible.builtin.fail:
msg: "API endpoint {{ apiserver_endpoint }} is not reachable"
when: ping_result.rc != 0
- name: Test for existing Cilium install
ansible.builtin.command: k3s kubectl -n kube-system get daemonsets cilium
register: cilium_installed
failed_when: false
changed_when: false
ignore_errors: true
- name: Check existing Cilium install
when: cilium_installed.rc == 0
block:
- name: Check Cilium version
ansible.builtin.command: cilium version
register: cilium_version
failed_when: false
changed_when: false
ignore_errors: true
- name: Parse installed Cilium version
set_fact:
installed_cilium_version: >-
{{
cilium_version.stdout_lines
| join(' ')
| regex_findall('cilium image.+(\d+\.\d+\.\d+)')
| first
| default('unknown')
}}
- name: Determine if Cilium needs update
set_fact:
cilium_needs_update: >-
{{ 'v' + installed_cilium_version != cilium_tag }}
- name: Log result
ansible.builtin.debug:
msg: >
Installed Cilium version: {{ installed_cilium_version }},
Target Cilium version: {{ cilium_tag }},
Update needed: {{ cilium_needs_update }}
- name: Install Cilium
ansible.builtin.command: >-
{% if cilium_installed.rc != 0 %}
cilium install
{% else %}
cilium upgrade
{% endif %}
--version "{{ cilium_tag }}"
--helm-set operator.replicas="1"
{{ '--helm-set devices=' + cilium_iface if cilium_iface != 'auto' else '' }}
--helm-set ipam.operator.clusterPoolIPv4PodCIDRList={{ cluster_cidr }}
{% if cilium_mode == "native" or (cilium_bgp and cilium_exportPodCIDR != 'false') %}
--helm-set ipv4NativeRoutingCIDR={{ cluster_cidr }}
{% endif %}
--helm-set k8sServiceHost="127.0.0.1"
--helm-set k8sServicePort="6444"
--helm-set routingMode={{ cilium_mode | default("native") }}
--helm-set autoDirectNodeRoutes={{ "true" if cilium_mode == "native" else "false" }}
--helm-set kubeProxyReplacement={{ kube_proxy_replacement | default("true") }}
--helm-set bpf.masquerade={{ enable_bpf_masquerade | default("true") }}
--helm-set bgpControlPlane.enabled={{ cilium_bgp | default("false") }}
--helm-set hubble.enabled={{ "true" if cilium_hubble else "false" }}
--helm-set hubble.relay.enabled={{ "true" if cilium_hubble else "false" }}
--helm-set hubble.ui.enabled={{ "true" if cilium_hubble else "false" }}
{% if kube_proxy_replacement is not false %}
--helm-set bpf.loadBalancer.algorithm={{ bpf_lb_algorithm | default("maglev") }}
--helm-set bpf.loadBalancer.mode={{ bpf_lb_mode | default("hybrid") }}
{% endif %}
environment:
KUBECONFIG: "{{ ansible_user_dir }}/.kube/config"
register: cilium_install_result
changed_when: cilium_install_result.rc == 0
when: cilium_installed.rc != 0 or cilium_needs_update
- name: Wait for Cilium resources
command: >-
{% if item.type == 'daemonset' %}
k3s kubectl wait pods
--namespace=kube-system
--selector='k8s-app=cilium'
--for=condition=Ready
{% else %}
k3s kubectl wait {{ item.type }}/{{ item.name }}
--namespace=kube-system
--for=condition=Available
{% endif %}
--timeout=30s
register: cr_result
changed_when: false
until: cr_result is succeeded
retries: 30
delay: 7
with_items:
- {name: cilium-operator, type: deployment}
- {name: cilium, type: daemonset, selector: 'k8s-app=cilium'}
- {name: hubble-relay, type: deployment, check_hubble: true}
- {name: hubble-ui, type: deployment, check_hubble: true}
loop_control:
label: "{{ item.type }}/{{ item.name }}"
when: >-
not item.check_hubble | default(false) or (item.check_hubble | default(false) and cilium_hubble)
- name: Configure Cilium BGP
when: cilium_bgp
block:
- name: Copy BGP manifests to first master
ansible.builtin.template:
src: "cilium.crs.j2"
dest: /tmp/k3s/cilium-bgp.yaml
owner: root
group: root
mode: 0755
- name: Apply BGP manifests
ansible.builtin.command:
cmd: kubectl apply -f /tmp/k3s/cilium-bgp.yaml
register: apply_cr
changed_when: "'configured' in apply_cr.stdout or 'created' in apply_cr.stdout"
failed_when: "'is invalid' in apply_cr.stderr"
ignore_errors: true
- name: Print error message if BGP manifests application fails
ansible.builtin.debug:
msg: "{{ apply_cr.stderr }}"
when: "'is invalid' in apply_cr.stderr"
- name: Test for BGP config resources
ansible.builtin.command: "{{ item }}"
loop:
- k3s kubectl get CiliumBGPPeeringPolicy.cilium.io
- k3s kubectl get CiliumLoadBalancerIPPool.cilium.io
changed_when: false
loop_control:
label: "{{ item }}"

View File

@@ -2,17 +2,12 @@
- name: Deploy calico
include_tasks: calico.yml
tags: calico
when: calico_iface is defined and cilium_iface is not defined
- name: Deploy cilium
include_tasks: cilium.yml
tags: cilium
when: cilium_iface is defined
when: calico_iface is defined
- name: Deploy metallb pool
include_tasks: metallb.yml
tags: metallb
when: kube_vip_lb_ip_range is not defined and (not cilium_bgp or cilium_iface is not defined)
when: kube_vip_lb_ip_range is not defined
- name: Remove tmp directory used for manifests
file:

View File

@@ -83,23 +83,9 @@
loop_control:
label: "{{ item.description }}"
- name: Set metallb webhook service name
set_fact:
metallb_webhook_service_name: >-
{{
(
(metal_lb_controller_tag_version | regex_replace('^v', ''))
is
version('0.14.4', '<', version_type='semver')
) | ternary(
'webhook-service',
'metallb-webhook-service'
)
}}
- name: Test metallb-system webhook-service endpoint
command: >-
k3s kubectl -n metallb-system get endpoints {{ metallb_webhook_service_name }}
k3s kubectl -n metallb-system get endpoints webhook-service
changed_when: false
with_items: "{{ groups[group_name_master | default('master')] }}"
run_once: true

View File

@@ -10,7 +10,7 @@ spec:
# Note: The ipPools section cannot be modified post-install.
ipPools:
- blockSize: {{ calico_blockSize | default('26') }}
cidr: {{ cluster_cidr | default('10.52.0.0/16') }}
cidr: {{ calico_cidr | default('10.52.0.0/16') }}
encapsulation: {{ calico_encapsulation | default('VXLANCrossSubnet') }}
natOutgoing: {{ calico_natOutgoing | default('Enabled') }}
nodeSelector: {{ calico_nodeSelector | default('all()') }}

View File

@@ -1,34 +0,0 @@
apiVersion: "cilium.io/v2alpha1"
kind: CiliumBGPPeeringPolicy
metadata:
name: 01-bgp-peering-policy
spec: # CiliumBGPPeeringPolicySpec
virtualRouters: # []CiliumBGPVirtualRouter
- localASN: {{ cilium_bgp_my_asn }}
exportPodCIDR: {{ cilium_exportPodCIDR | default('true') }}
neighbors: # []CiliumBGPNeighbor
- peerAddress: '{{ cilium_bgp_peer_address + "/32"}}'
peerASN: {{ cilium_bgp_peer_asn }}
eBGPMultihopTTL: 10
connectRetryTimeSeconds: 120
holdTimeSeconds: 90
keepAliveTimeSeconds: 30
gracefulRestart:
enabled: true
restartTimeSeconds: 120
serviceSelector:
matchExpressions:
- {key: somekey, operator: NotIn, values: ['never-used-value']}
---
apiVersion: "cilium.io/v2alpha1"
kind: CiliumLoadBalancerIPPool
metadata:
name: "01-lb-pool"
spec:
blocks:
{% if "/" in cilium_bgp_lb_cidr %}
- cidr: {{ cilium_bgp_lb_cidr }}
{% else %}
- start: {{ cilium_bgp_lb_cidr.split('-')[0] }}
stop: {{ cilium_bgp_lb_cidr.split('-')[1] }}
{% endif %}

View File

@@ -2,5 +2,4 @@
- name: Reboot server
become: true
reboot:
reboot_command: "{{ custom_reboot_command | default(omit) }}"
listen: reboot server

View File

@@ -1,5 +1,4 @@
---
- name: Reboot
reboot:
reboot_command: "{{ custom_reboot_command | default(omit) }}"
listen: reboot

View File

@@ -1,27 +1,7 @@
---
- name: Test for cmdline path
stat:
path: /boot/firmware/cmdline.txt
register: boot_cmdline_path
failed_when: false
changed_when: false
- name: Set cmdline path based on Debian version and command result
set_fact:
cmdline_path: >-
{{
(
boot_cmdline_path.stat.exists and
ansible_facts.lsb.description | default('') is match('Debian.*(?!(bookworm|sid))')
) | ternary(
'/boot/firmware/cmdline.txt',
'/boot/cmdline.txt'
)
}}
- name: Activating cgroup support
lineinfile:
path: "{{ cmdline_path }}"
path: /boot/cmdline.txt
regexp: '^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$'
line: '\1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory'
backrefs: true

View File

@@ -1,13 +1,4 @@
---
- name: Pre tasks
hosts: all
pre_tasks:
- name: Verify Ansible is version 2.11 or above. (If this fails you may need to update Ansible)
assert:
that: "ansible_version.full is version_compare('2.11', '>=')"
msg: >
"Ansible is out of date. See here for more info: https://docs.technotim.live/posts/ansible-automation/"
- name: Prepare Proxmox cluster
hosts: proxmox
gather_facts: true