Compare commits

..

59 Commits

Author SHA1 Message Date
Timothy Stewart
ca79207dc9 chore(ci): triggering build 2023-12-09 22:22:19 -06:00
Timothy Stewart
af7f41379b fix(CI): testing macos-13 2023-11-01 12:05:58 -05:00
Timothy Stewart
69c8d28c36 fix(CI): testing macos-latest 2023-11-01 11:29:32 -05:00
Timothy Stewart
723f07caf1 fix(CI): testing ubuntu 2023-11-01 11:24:06 -05:00
Timothy Stewart
9e9b862334 fix(CI): Remove vagrant box cache 2023-10-30 16:00:55 -05:00
Timothy Stewart
df99f988c5 fix(CI): Break up workflows and use templates 2023-10-29 15:29:25 -05:00
Timothy Stewart
71821b2be3 fix(CI): Break up workflows and use templates 2023-10-29 15:22:05 -05:00
Timothy Stewart
4066ca7b6c fix(CI): Break up workflows and use templates 2023-10-29 15:03:05 -05:00
Timothy Stewart
6ca0dcecae fix(CI): Break up workflows and use templates 2023-10-29 15:00:37 -05:00
Timothy Stewart
9593ae69ef fix(CI): Break up workflows and use templates 2023-10-29 14:57:58 -05:00
Timothy Stewart
926b245281 fix(CI): Break up workflows and use templates 2023-10-29 14:57:10 -05:00
Timothy Stewart
f89eb5ec07 fix(CI): Break up workflows and use templates 2023-10-29 14:55:40 -05:00
Timothy Stewart
d767e6add0 fix(CI): Break up workflows and use templates 2023-10-29 14:40:50 -05:00
Timothy Stewart
0cb638faf1 fix(CI): Break up workflows and use templates 2023-10-29 14:39:50 -05:00
Timothy Stewart
0c3d5e9a29 fix(CI): Break up workflows and use templates 2023-10-29 14:37:19 -05:00
Timothy Stewart
841e924449 fix(CI): Break up workflows and use templates 2023-10-29 14:32:27 -05:00
Timothy Stewart
22c1bccd13 fix(CI): Break up workflows and use templates 2023-10-29 14:30:22 -05:00
Timothy Stewart
26aa538e5f fix(CI): Break up workflows and use templates 2023-10-29 14:28:36 -05:00
Timothy Stewart
06d4773e42 fix(CI): Break up workflows and use templates 2023-10-29 14:27:51 -05:00
Timothy Stewart
c6a20c7f14 fix(CI): Break up workflows and use templates 2023-10-29 14:27:20 -05:00
Timothy Stewart
856015d127 fix(CI): Break up workflows and use templates 2023-10-29 14:26:53 -05:00
Timothy Stewart
8637027635 fix(CI): Break up workflows and use templates 2023-10-29 14:19:14 -05:00
Timothy Stewart
207b8f6f88 fix(CI): Break up workflows and use templates 2023-10-29 14:16:09 -05:00
Timothy Stewart
4d6dc254f3 fix(CI): Break up workflows and use templates 2023-10-29 14:11:48 -05:00
Timothy Stewart
f54f180997 fix(CI): Break up workflows and use templates 2023-10-29 14:10:04 -05:00
Timothy Stewart
c9a96de15b fix(CI): Break up workflows and use templates 2023-10-29 14:04:29 -05:00
Timothy Stewart
7404865bd7 fix(CI): Break up workflows and use templates 2023-10-29 14:01:43 -05:00
Timothy Stewart
015e91bca9 fix(CI): Break up workflows and use templates 2023-10-29 13:58:32 -05:00
Timothy Stewart
4450075425 fix(CI): Break up workflows and use templates 2023-10-29 13:56:11 -05:00
Timothy Stewart
fc88777e16 fix(CI): Break up workflows and use templates 2023-10-29 13:55:34 -05:00
Timothy Stewart
2884d54e6b fix(CI): Break up workflows and use templates 2023-10-29 13:55:09 -05:00
Timothy Stewart
0874e56045 fix(CI): Break up workflows and use templates 2023-10-29 13:53:58 -05:00
Timothy Stewart
4bb58181e8 fix(CI): Break up workflows and use templates 2023-10-29 13:47:52 -05:00
Timothy Stewart
f52a556109 fix(CI): Break up workflows and use templates 2023-10-29 13:44:22 -05:00
Timothy Stewart
90fe88379c fix(CI): Break up workflows and use templates 2023-10-29 13:43:22 -05:00
Timothy Stewart
3ff7547f8c fix(CI): Break up workflows and use templates 2023-10-29 13:42:08 -05:00
Timothy Stewart
edd2ba29ce fix(CI): Break up workflows and use templates 2023-10-29 13:41:31 -05:00
Timothy Stewart
4174e81e68 fix(CI): Break up workflows and use templates 2023-10-29 13:40:45 -05:00
Timothy Stewart
8c8ac8b942 fix(CI): Break up workflows and use templates 2023-10-29 13:40:22 -05:00
Timothy Stewart
eb10968a75 fix(CI): Break up workflows and use templates 2023-10-29 13:38:40 -05:00
Timothy Stewart
96b2d243ea fix(CI): Break up workflows and use templates 2023-10-29 13:38:09 -05:00
Timothy Stewart
2490fecb69 fix(CI): Break up workflows and use templates 2023-10-29 13:37:25 -05:00
Timothy Stewart
80fc191c20 fix(CI): Break up workflows and use templates 2023-10-29 13:36:36 -05:00
Timothy Stewart
bc6aa32198 fix(CI): Break up workflows and use templates 2023-10-29 13:35:35 -05:00
Timothy Stewart
85f7ec6889 fix(CI): Break up workflows and use templates 2023-10-29 13:33:44 -05:00
Timothy Stewart
d90fde7b3b fix(CI): Break up workflows and use templates 2023-10-29 13:33:16 -05:00
Timothy Stewart
756c140bee fix(CI): Break up workflows and use templates 2023-10-29 13:32:34 -05:00
Timothy Stewart
85c1826985 fix(CI): Break up workflows and use templates 2023-10-29 13:31:34 -05:00
Timothy Stewart
ad672b3965 fix(CI): Break up workflows and use templates 2023-10-29 13:30:20 -05:00
Timothy Stewart
f19ff12057 fix(CI): Break up workflows and use templates 2023-10-29 13:29:28 -05:00
Timothy Stewart
173e104663 fix(CI): Break up workflows and use templates 2023-10-29 13:27:13 -05:00
Timothy Stewart
1e06289dca fix(CI): Break up workflows and use templates 2023-10-29 13:26:35 -05:00
Timothy Stewart
c2a3b9f56f fix(CI): Break up workflows and use templates 2023-10-29 13:20:32 -05:00
Timothy Stewart
14aa2f8383 fix(CI): Break up workflows and use templates 2023-10-29 13:20:01 -05:00
Timothy Stewart
fb6d94362c fix(CI): Break up workflows and use templates 2023-10-29 13:18:09 -05:00
Timothy Stewart
9483f18ae2 fix(CI): Break up workflows and use templates 2023-10-29 13:16:01 -05:00
Timothy Stewart
2b4ef14b49 fix(CI): Break up workflows and use templates 2023-10-29 13:15:25 -05:00
Timothy Stewart
32b9bfa44f fix(CI): Break up workflows and use templates 2023-10-29 13:11:55 -05:00
Timothy Stewart
a7d4f88b79 fix(CI): Break up workflows and use templates 2023-10-29 13:09:41 -05:00
51 changed files with 429 additions and 1250 deletions

View File

@@ -13,9 +13,5 @@ exclude_paths:
- 'molecule/**/prepare.yml'
- 'molecule/**/reset.yml'
# The file was generated by galaxy ansible - don't mess with it.
- 'galaxy.yml'
skip_list:
- 'fqcn-builtins'
- var-naming[no-role-prefix]

View File

@@ -37,11 +37,6 @@ systemd_dir: ""
flannel_iface: ""
#calico_iface: ""
calico_ebpf: ""
calico_cidr: ""
calico_tag: ""
apiserver_endpoint: ""
k3s_token: "NA"
@@ -51,9 +46,6 @@ extra_agent_args: ""
kube_vip_tag_version: ""
kube_vip_cloud_provider_tag_version: ""
kube_vip_lb_ip_range: ""
metal_lb_speaker_tag_version: ""
metal_lb_controller_tag_version: ""

View File

@@ -9,18 +9,3 @@ updates:
ignore:
- dependency-name: "*"
update-types: ["version-update:semver-major"]
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"
rebase-strategy: "auto"
- package-ecosystem: "docker"
directory: "/"
schedule:
interval: "daily"
rebase-strategy: "auto"
ignore:
- dependency-name: "*"
update-types: ["version-update:semver-major"]

View File

@@ -1,42 +0,0 @@
---
name: "Cache"
on:
workflow_call:
jobs:
molecule:
name: cache
runs-on: self-hosted
env:
PYTHON_VERSION: "3.11"
steps:
- name: Check out the codebase
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # 4.1.1
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # 5.0.0
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies
- name: Cache Vagrant boxes
id: cache-vagrant
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # 4.0
with:
lookup-only: true #if it exists, we don't need to restore and can skip the next step
path: |
~/.vagrant.d/boxes
key: vagrant-boxes-${{ hashFiles('**/molecule.yml') }}
restore-keys: |
vagrant-boxes
- name: Download Vagrant boxes for all scenarios
# To save some cache space, all scenarios share the same cache key.
# On the other hand, this means that the cache contents should be
# the same across all scenarios. This step ensures that.
if: steps.cache-vagrant.outputs.cache-hit != 'true' # only run if false since this is just a cache step
run: |
./.github/download-boxes.sh
vagrant box list

View File

@@ -2,26 +2,20 @@
name: "CI"
on:
pull_request:
types:
- opened
- synchronize
push:
branches:
- master
paths-ignore:
- '**/.gitignore'
- '**/FUNDING.yml'
- '**/host.ini'
- '**/*.md'
- '**/.editorconfig'
- '**/ansible.example.cfg'
- '**/deploy.sh'
- '**/LICENSE'
- '**/reboot.sh'
- '**/reset.sh'
- '**/README.md'
jobs:
pre:
uses: ./.github/workflows/cache.yml
lint:
uses: ./.github/workflows/lint.yml
needs: [pre]
test:
uses: ./.github/workflows/test.yml
needs: [pre, lint]
test-default:
uses: ./.github/workflows/test-default.yml
needs: [lint]
test-ipv6:
uses: ./.github/workflows/test-ipv6.yml
needs: [lint, test-default]
test-single-node:
uses: ./.github/workflows/test-single-node.yml
needs: [lint, test-default, test-ipv6]

View File

@@ -5,7 +5,7 @@ on:
jobs:
pre-commit-ci:
name: Pre-Commit
runs-on: self-hosted
runs-on: ubuntu-22.04
env:
PYTHON_VERSION: "3.11"
@@ -16,16 +16,26 @@ jobs:
ref: ${{ github.event.pull_request.head.sha }}
- name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # 5.0.0
uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # 4.7.1
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies
- name: Restore Ansible cache
uses: actions/cache/restore@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # 4.0
- name: Cache pip
uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # 3.3.2
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('./requirements.txt') }}
restore-keys: |
${{ runner.os }}-pip-
- name: Cache Ansible
uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # 3.3.2
with:
path: ~/.ansible/collections
key: ansible-${{ hashFiles('collections/requirements.yml') }}
key: ${{ runner.os }}-ansible-${{ hashFiles('collections/requirements.txt') }}
restore-keys: |
${{ runner.os }}-ansible-
- name: Install dependencies
run: |
@@ -37,17 +47,21 @@ jobs:
python3 -m pip install -r requirements.txt
echo "::endgroup::"
echo "::group::Install Ansible role requirements from collections/requirements.yml"
ansible-galaxy install -r collections/requirements.yml
echo "::endgroup::"
- name: Run pre-commit
uses: pre-commit/action@646c83fcd040023954eafda54b4db0192ce70507 # 3.0.0
ensure-pinned-actions:
name: Ensure SHA Pinned Actions
runs-on: self-hosted
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # 4.1.1
- name: Ensure SHA pinned actions
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@ba37328d4ea95eaf8b3bd6c6cef308f709a5f2ec # 3.0.3
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@f32435541e24cd6a4700a7f52bb2ec59e80603b1 # 2.0.1
with:
allowlist: |
aws-actions/

80
.github/workflows/test-default.yml vendored Normal file
View File

@@ -0,0 +1,80 @@
---
name: Molecule Default
on:
workflow_call:
jobs:
molecule:
name: Molecule
runs-on: macos-13
strategy:
matrix:
scenario:
- default
fail-fast: false
env:
PYTHON_VERSION: "3.11"
steps:
- name: Check out the codebase
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # 4.1.1
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Configure VirtualBox
run: |-
sudo mkdir -p /etc/vbox
cat <<EOF | sudo tee -a /etc/vbox/networks.conf > /dev/null
* 192.168.30.0/24
* fdad:bad:ba55::/64
EOF
- name: Cache pip
uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # 3.3.2
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('./requirements.txt') }}
restore-keys: |
${{ runner.os }}-pip-
- name: Download Vagrant boxes for all scenarios
# To save some cache space, all scenarios share the same cache key.
# On the other hand, this means that the cache contents should be
# the same across all scenarios. This step ensures that.
run: ./.github/download-boxes.sh
- name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # 4.7.1
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies
- name: Install dependencies
run: |
echo "::group::Upgrade pip"
python3 -m pip install --upgrade pip
echo "::endgroup::"
echo "::group::Install Python requirements from requirements.txt"
python3 -m pip install -r requirements.txt
echo "::endgroup::"
- name: Test with molecule
run: molecule test --scenario-name ${{ matrix.scenario }}
timeout-minutes: 90
env:
ANSIBLE_K3S_LOG_DIR: ${{ runner.temp }}/logs/k3s-ansible/${{ matrix.scenario }}
ANSIBLE_SSH_RETRIES: 4
ANSIBLE_TIMEOUT: 60
PY_COLORS: 1
ANSIBLE_FORCE_COLOR: 1
- name: Upload log files
if: always() # do this even if a step before has failed
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # 3.1.3
with:
name: logs
path: |
${{ runner.temp }}/logs
- name: Delete old box versions
if: always() # do this even if a step before has failed
run: vagrant box prune --force

80
.github/workflows/test-ipv6.yml vendored Normal file
View File

@@ -0,0 +1,80 @@
---
name: Molecule IPv6
on:
workflow_call:
jobs:
molecule:
name: Molecule
runs-on: macos-13
strategy:
matrix:
scenario:
- ipv6
fail-fast: false
env:
PYTHON_VERSION: "3.11"
steps:
- name: Check out the codebase
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # 4.1.1
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Configure VirtualBox
run: |-
sudo mkdir -p /etc/vbox
cat <<EOF | sudo tee -a /etc/vbox/networks.conf > /dev/null
* 192.168.30.0/24
* fdad:bad:ba55::/64
EOF
- name: Cache pip
uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # 3.3.2
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('./requirements.txt') }}
restore-keys: |
${{ runner.os }}-pip-
- name: Download Vagrant boxes for all scenarios
# To save some cache space, all scenarios share the same cache key.
# On the other hand, this means that the cache contents should be
# the same across all scenarios. This step ensures that.
run: ./.github/download-boxes.sh
- name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # 4.7.1
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies
- name: Install dependencies
run: |
echo "::group::Upgrade pip"
python3 -m pip install --upgrade pip
echo "::endgroup::"
echo "::group::Install Python requirements from requirements.txt"
python3 -m pip install -r requirements.txt
echo "::endgroup::"
- name: Test with molecule
run: molecule test --scenario-name ${{ matrix.scenario }}
timeout-minutes: 90
env:
ANSIBLE_K3S_LOG_DIR: ${{ runner.temp }}/logs/k3s-ansible/${{ matrix.scenario }}
ANSIBLE_SSH_RETRIES: 4
ANSIBLE_TIMEOUT: 60
PY_COLORS: 1
ANSIBLE_FORCE_COLOR: 1
- name: Upload log files
if: always() # do this even if a step before has failed
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # 3.1.3
with:
name: logs
path: |
${{ runner.temp }}/logs
- name: Delete old box versions
if: always() # do this even if a step before has failed
run: vagrant box prune --force

80
.github/workflows/test-single-node.yml vendored Normal file
View File

@@ -0,0 +1,80 @@
---
name: Molecule Single Node
on:
workflow_call:
jobs:
molecule:
name: Molecule
runs-on: macos-13
strategy:
matrix:
scenario:
- single_node
fail-fast: false
env:
PYTHON_VERSION: "3.11"
steps:
- name: Check out the codebase
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # 4.1.1
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Configure VirtualBox
run: |-
sudo mkdir -p /etc/vbox
cat <<EOF | sudo tee -a /etc/vbox/networks.conf > /dev/null
* 192.168.30.0/24
* fdad:bad:ba55::/64
EOF
- name: Cache pip
uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # 3.3.2
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('./requirements.txt') }}
restore-keys: |
${{ runner.os }}-pip-
- name: Download Vagrant boxes for all scenarios
# To save some cache space, all scenarios share the same cache key.
# On the other hand, this means that the cache contents should be
# the same across all scenarios. This step ensures that.
run: ./.github/download-boxes.sh
- name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # 4.7.1
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies
- name: Install dependencies
run: |
echo "::group::Upgrade pip"
python3 -m pip install --upgrade pip
echo "::endgroup::"
echo "::group::Install Python requirements from requirements.txt"
python3 -m pip install -r requirements.txt
echo "::endgroup::"
- name: Test with molecule
run: molecule test --scenario-name ${{ matrix.scenario }}
timeout-minutes: 90
env:
ANSIBLE_K3S_LOG_DIR: ${{ runner.temp }}/logs/k3s-ansible/${{ matrix.scenario }}
ANSIBLE_SSH_RETRIES: 4
ANSIBLE_TIMEOUT: 60
PY_COLORS: 1
ANSIBLE_FORCE_COLOR: 1
- name: Upload log files
if: always() # do this even if a step before has failed
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # 3.1.3
with:
name: logs
path: |
${{ runner.temp }}/logs
- name: Delete old box versions
if: always() # do this even if a step before has failed
run: vagrant box prune --force

View File

@@ -1,126 +0,0 @@
---
name: Test
on:
workflow_call:
jobs:
molecule:
name: Molecule
runs-on: self-hosted
strategy:
matrix:
scenario:
- default
- ipv6
- single_node
- calico
- cilium
- kube-vip
fail-fast: false
env:
PYTHON_VERSION: "3.11"
steps:
- name: Check out the codebase
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # 4.1.1
with:
ref: ${{ github.event.pull_request.head.sha }}
# these steps are necessary if not using ephemeral nodes
- name: Delete old Vagrant box versions
if: always() # do this even if a step before has failed
run: vagrant box prune --force
- name: Remove all local Vagrant boxes
if: always() # do this even if a step before has failed
run: if vagrant box list 2>/dev/null; then vagrant box list | cut -f 1 -d ' ' | xargs -L 1 vagrant box remove -f 2>/dev/null && echo "All Vagrant boxes removed." || echo "No Vagrant boxes found."; else echo "No Vagrant boxes found."; fi
- name: Remove all Virtualbox VMs
if: always() # do this even if a step before has failed
run: VBoxManage list vms | awk -F'"' '{print $2}' | xargs -I {} VBoxManage unregistervm --delete "{}"
- name: Remove all Virtualbox HDs
if: always() # do this even if a step before has failed
run: VBoxManage list hdds | awk -F':' '/^UUID:/ {print $2}' | xargs -I {} VBoxManage closemedium disk "{}" --delete
- name: Remove all Virtualbox Networks
if: always() # do this even if a step before has failed
run: VBoxManage list hostonlyifs | grep '^Name:' | awk '{print $2}' | grep '^vboxnet' | xargs -I {} VBoxManage hostonlyif remove {}
- name: Remove Virtualbox network config
if: always() # do this even if a step before has failed
run: sudo rm /etc/vbox/networks.conf || true
- name: Configure VirtualBox
run: |-
sudo mkdir -p /etc/vbox
cat <<EOF | sudo tee -a /etc/vbox/networks.conf > /dev/null
* 192.168.30.0/24
* fdad:bad:ba55::/64
EOF
- name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # 5.0.0
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies
- name: Restore vagrant Boxes cache
uses: actions/cache/restore@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # 4.0
with:
path: ~/.vagrant.d/boxes
key: vagrant-boxes-${{ hashFiles('**/molecule.yml') }}
fail-on-cache-miss: true
- name: Install dependencies
run: |
echo "::group::Upgrade pip"
python3 -m pip install --upgrade pip
echo "::endgroup::"
echo "::group::Install Python requirements from requirements.txt"
python3 -m pip install -r requirements.txt
echo "::endgroup::"
- name: Test with molecule
run: molecule test --scenario-name ${{ matrix.scenario }}
timeout-minutes: 90
env:
ANSIBLE_K3S_LOG_DIR: ${{ runner.temp }}/logs/k3s-ansible/${{ matrix.scenario }}
ANSIBLE_SSH_RETRIES: 4
ANSIBLE_TIMEOUT: 120
PY_COLORS: 1
ANSIBLE_FORCE_COLOR: 1
# these steps are necessary if not using ephemeral nodes
- name: Delete old Vagrant box versions
if: always() # do this even if a step before has failed
run: vagrant box prune --force
- name: Remove all local Vagrant boxes
if: always() # do this even if a step before has failed
run: if vagrant box list 2>/dev/null; then vagrant box list | cut -f 1 -d ' ' | xargs -L 1 vagrant box remove -f 2>/dev/null && echo "All Vagrant boxes removed." || echo "No Vagrant boxes found."; else echo "No Vagrant boxes found."; fi
- name: Remove all Virtualbox VMs
if: always() # do this even if a step before has failed
run: VBoxManage list vms | awk -F'"' '{print $2}' | xargs -I {} VBoxManage unregistervm --delete "{}"
- name: Remove all Virtualbox HDs
if: always() # do this even if a step before has failed
run: VBoxManage list hdds | awk -F':' '/^UUID:/ {print $2}' | xargs -I {} VBoxManage closemedium disk "{}" --delete
- name: Remove all Virtualbox Networks
if: always() # do this even if a step before has failed
run: VBoxManage list hostonlyifs | grep '^Name:' | awk '{print $2}' | grep '^vboxnet' | xargs -I {} VBoxManage hostonlyif remove {}
- name: Remove Virtualbox network config
if: always() # do this even if a step before has failed
run: sudo rm /etc/vbox/networks.conf || true
- name: Upload log files
if: always() # do this even if a step before has failed
uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 # 4.3.0
with:
name: logs
path: |
${{ runner.temp }}/logs
overwrite: true

1
.gitignore vendored
View File

@@ -1,4 +1,3 @@
.env/
*.log
ansible.cfg
kubeconfig

View File

@@ -1,7 +1,7 @@
---
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.5.0
rev: f71fa2c1f9cf5cb705f73dffe4b21f7c61470ba9 # frozen: v4.4.0
hooks:
- id: requirements-txt-fixer
- id: sort-simple-yaml
@@ -12,24 +12,24 @@ repos:
- id: trailing-whitespace
args: [--markdown-linebreak-ext=md]
- repo: https://github.com/adrienverge/yamllint.git
rev: v1.33.0
rev: b05e028c5881819161d11cb543fd96a30c06cceb # frozen: v1.32.0
hooks:
- id: yamllint
args: [-c=.yamllint]
- repo: https://github.com/ansible-community/ansible-lint.git
rev: v6.22.2
rev: 3293b64b939c0de16ef8cb81dd49255e475bf89a # frozen: v6.17.2
hooks:
- id: ansible-lint
- repo: https://github.com/shellcheck-py/shellcheck-py
rev: v0.9.0.6
rev: 375289a39f5708101b1f916eb729e8d6da96993f # frozen: v0.9.0.5
hooks:
- id: shellcheck
- repo: https://github.com/Lucas-C/pre-commit-hooks
rev: v1.5.4
rev: 12885e376b93dc4536ad68d156065601e4433665 # frozen: v1.5.1
hooks:
- id: remove-crlf
- id: remove-tabs
- repo: https://github.com/sirosen/texthooks
rev: 0.6.4
rev: c4ffd3e31669dd4fa4d31a23436cc13839730084 # frozen: 0.5.0
hooks:
- id: fix-smartquotes

View File

@@ -6,6 +6,4 @@ rules:
max: 120
level: warning
truthy:
allowed-values: ['true', 'false']
ignore:
- galaxy.yml
allowed-values: ['true', 'false', 'yes', 'no']

View File

@@ -118,28 +118,6 @@ You can find more information about it [here](molecule/README.md).
This repo uses `pre-commit` and `pre-commit-hooks` to lint and fix common style and syntax errors. Be sure to install python packages and then run `pre-commit install`. For more information, see [pre-commit](https://pre-commit.com/)
## 🌌 Ansible Galaxy
This collection can now be used in larger ansible projects.
Instructions:
- create or modify a file `collections/requirements.yml` in your project
```yml
collections:
- name: ansible.utils
- name: community.general
- name: ansible.posix
- name: kubernetes.core
- name: https://github.com/techno-tim/k3s-ansible.git
type: git
version: master
```
- install via `ansible-galaxy collection install -r ./collections/requirements.yml`
- every role is now available via the prefix `techno_tim.k3s_ansible.` e.g. `techno_tim.k3s_ansible.lxc`
## Thanks 🤝
This repo is really standing on the shoulders of giants. Thank you to all those who have contributed and thanks to these repos for code and ideas:

View File

@@ -1,81 +0,0 @@
### REQUIRED
# The namespace of the collection. This can be a company/brand/organization or product namespace under which all
# content lives. May only contain alphanumeric lowercase characters and underscores. Namespaces cannot start with
# underscores or numbers and cannot contain consecutive underscores
namespace: techno_tim
# The name of the collection. Has the same character restrictions as 'namespace'
name: k3s_ansible
# The version of the collection. Must be compatible with semantic versioning
version: 1.0.0
# The path to the Markdown (.md) readme file. This path is relative to the root of the collection
readme: README.md
# A list of the collection's content authors. Can be just the name or in the format 'Full Name <email> (url)
# @nicks:irc/im.site#channel'
authors:
- your name <example@domain.com>
### OPTIONAL but strongly recommended
# A short summary description of the collection
description: >
The easiest way to bootstrap a self-hosted High Availability Kubernetes
cluster. A fully automated HA k3s etcd install with kube-vip, MetalLB,
and more.
# Either a single license or a list of licenses for content inside of a collection. Ansible Galaxy currently only
# accepts L(SPDX,https://spdx.org/licenses/) licenses. This key is mutually exclusive with 'license_file'
license:
- Apache-2.0
# A list of tags you want to associate with the collection for indexing/searching. A tag name has the same character
# requirements as 'namespace' and 'name'
tags:
- etcd
- high-availability
- k8s
- k3s
- k3s-cluster
- kube-vip
- kubernetes
- metallb
- rancher
# Collections that this collection requires to be installed for it to be usable. The key of the dict is the
# collection label 'namespace.name'. The value is a version range
# L(specifiers,https://python-semanticversion.readthedocs.io/en/latest/#requirement-specification). Multiple version
# range specifiers can be set and are separated by ','
dependencies:
ansible.utils: '*'
ansible.posix: '*'
community.general: '*'
kubernetes.core: '*'
# The URL of the originating SCM repository
repository: https://github.com/techno-tim/k3s-ansible
# The URL to any online docs
documentation: https://github.com/techno-tim/k3s-ansible
# The URL to the homepage of the collection/project
homepage: https://www.youtube.com/watch?v=CbkEWcUZ7zM
# The URL to the collection issue tracker
issues: https://github.com/techno-tim/k3s-ansible/issues
# A list of file glob-like patterns used to filter any files or directories that should not be included in the build
# artifact. A pattern is matched from the relative path of the file or directory of the collection directory. This
# uses 'fnmatch' to match the files or directories. Some directories and files like 'galaxy.yml', '*.pyc', '*.retry',
# and '.git' are always filtered. Mutually exclusive with 'manifest'
build_ignore: []
# A dict controlling use of manifest directives used in building the collection artifact. The key 'directives' is a
# list of MANIFEST.in style
# L(directives,https://packaging.python.org/en/latest/guides/using-manifest-in/#manifest-in-commands). The key
# 'omit_default_directives' is a boolean that controls whether the default directives are used. Mutually exclusive
# with 'build_ignore'
# manifest: null

View File

@@ -1,5 +1,5 @@
---
k3s_version: v1.29.0+k3s1
k3s_version: v1.25.12+k3s1
# this is the user that has ssh access to these machines
ansible_user: ansibleuser
systemd_dir: /etc/systemd/system
@@ -10,30 +10,6 @@ system_timezone: "Your/Timezone"
# interface which will be used for flannel
flannel_iface: "eth0"
# uncomment calico_iface to use tigera operator/calico cni instead of flannel https://docs.tigera.io/calico/latest/about
# calico_iface: "eth0"
calico_ebpf: false # use eBPF dataplane instead of iptables
calico_tag: "v3.27.0" # calico version tag
# uncomment cilium_iface to use cilium cni instead of flannel or calico
# ensure v4.19.57, v5.1.16, v5.2.0 or more recent kernel
# cilium_iface: "eth0"
cilium_mode: "native" # native when nodes on same subnet or using bgp, else set routed
cilium_tag: "v1.14.6" # cilium version tag
cilium_hubble: true # enable hubble observability relay and ui
# if using calico or cilium, you may specify the cluster pod cidr pool
cluster_cidr: "10.52.0.0/16"
# enable cilium bgp control plane for lb services and pod cidrs. disables metallb.
cilium_bgp: false
# bgp parameters for cilium cni. only active when cilium_iface is defined and cilium_bgp is true.
cilium_bgp_my_asn: "64513"
cilium_bgp_peer_asn: "64512"
cilium_bgp_peer_address: "192.168.30.1"
cilium_bgp_lb_cidr: "192.168.31.0/24" # cidr for cilium loadbalancer ipam
# apiserver_endpoint is virtual ip-address which will be configured on each master
apiserver_endpoint: "192.168.30.222"
@@ -44,42 +20,28 @@ k3s_token: "some-SUPER-DEDEUPER-secret-password"
# The IP on which the node is reachable in the cluster.
# Here, a sensible default is provided, you can still override
# it for each of your hosts, though.
k3s_node_ip: "{{ ansible_facts[(cilium_iface | default(calico_iface | default(flannel_iface)))]['ipv4']['address'] }}"
k3s_node_ip: '{{ ansible_facts[flannel_iface]["ipv4"]["address"] }}'
# Disable the taint manually by setting: k3s_master_taint = false
k3s_master_taint: "{{ true if groups['node'] | default([]) | length >= 1 else false }}"
# these arguments are recommended for servers as well as agents:
extra_args: >-
{{ '--flannel-iface=' + flannel_iface if calico_iface is not defined and cilium_iface is not defined else '' }}
--flannel-iface={{ flannel_iface }}
--node-ip={{ k3s_node_ip }}
# change these to your liking, the only required are: --disable servicelb, --tls-san {{ apiserver_endpoint }}
# the contents of the if block is also required if using calico or cilium
extra_server_args: >-
{{ extra_args }}
{{ '--node-taint node-role.kubernetes.io/master=true:NoSchedule' if k3s_master_taint else '' }}
{% if calico_iface is defined or cilium_iface is defined %}
--flannel-backend=none
--disable-network-policy
--cluster-cidr={{ cluster_cidr | default('10.52.0.0/16') }}
{% endif %}
--tls-san {{ apiserver_endpoint }}
--disable servicelb
--disable traefik
extra_agent_args: >-
{{ extra_args }}
# image tag for kube-vip
kube_vip_tag_version: "v0.6.4"
# tag for kube-vip-cloud-provider manifest
# kube_vip_cloud_provider_tag_version: "main"
# kube-vip ip range for load balancer
# (uncomment to use kube-vip for services instead of MetalLB)
# kube_vip_lb_ip_range: "192.168.30.80-192.168.30.90"
kube_vip_tag_version: "v0.5.12"
# metallb type frr or native
metal_lb_type: "native"
@@ -93,8 +55,8 @@ metal_lb_mode: "layer2"
# metal_lb_bgp_peer_address: "192.168.30.1"
# image tag for metal lb
metal_lb_speaker_tag_version: "v0.13.12"
metal_lb_controller_tag_version: "v0.13.12"
metal_lb_speaker_tag_version: "v0.13.9"
metal_lb_controller_tag_version: "v0.13.9"
# metallb ip range for load balancer
metal_lb_ip_range: "192.168.30.80-192.168.30.90"
@@ -104,9 +66,9 @@ metal_lb_ip_range: "192.168.30.80-192.168.30.90"
# Please read https://gist.github.com/triangletodd/02f595cd4c0dc9aac5f7763ca2264185 before using this.
# Most notably, your containers must be privileged, and must not have nesting set to true.
# Please note this script disables most of the security of lxc containers, with the trade off being that lxc
# containers are significantly more resource efficient compared to full VMs.
# containers are significantly more resource efficent compared to full VMs.
# Mixing and matching VMs and lxc containers is not supported, ymmv if you want to do this.
# I would only really recommend using this if you have particularly low powered proxmox nodes where the overhead of
# I would only really recommend using this if you have partiularly low powered proxmox nodes where the overhead of
# VMs would use a significant portion of your available resources.
proxmox_lxc_configure: false
# the user that you would use to ssh into the host, for example if you run ssh some-user@my-proxmox-host,

View File

@@ -13,12 +13,6 @@ We have these scenarios:
To save a bit of test time, this cluster is _not_ highly available, it consists of only one control and one worker node.
- **single_node**:
Very similar to the default scenario, but uses only a single node for all cluster functionality.
- **calico**:
The same as single node, but uses calico cni instead of flannel.
- **cilium**:
The same as single node, but uses cilium cni instead of flannel.
- **kube-vip**
The same as single node, but uses kube-vip as service loadbalancer instead of MetalLB
## How to execute

View File

@@ -1,49 +0,0 @@
---
dependency:
name: galaxy
driver:
name: vagrant
platforms:
- name: control1
box: generic/ubuntu2204
memory: 4096
cpus: 4
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
groups:
- k3s_cluster
- master
interfaces:
- network_name: private_network
ip: 192.168.30.62
provisioner:
name: ansible
env:
ANSIBLE_VERBOSITY: 1
playbooks:
converge: ../resources/converge.yml
side_effect: ../resources/reset.yml
verify: ../resources/verify.yml
inventory:
links:
group_vars: ../../inventory/sample/group_vars
scenario:
test_sequence:
- dependency
- cleanup
- destroy
- syntax
- create
- prepare
- converge
# idempotence is not possible with the playbook in its current form.
- verify
# We are repurposing side_effect here to test the reset playbook.
# This is why we do not run it before verify (which tests the cluster),
# but after the verify step.
- side_effect
- cleanup
- destroy

View File

@@ -1,16 +0,0 @@
---
- name: Apply overrides
hosts: all
tasks:
- name: Override host variables
ansible.builtin.set_fact:
# See:
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
calico_iface: eth1
# The test VMs might be a bit slow, so we give them more time to join the cluster:
retry_count: 45
# Make sure that our IP ranges do not collide with those of the other scenarios
apiserver_endpoint: "192.168.30.224"
metal_lb_ip_range: "192.168.30.100-192.168.30.109"

View File

@@ -1,49 +0,0 @@
---
dependency:
name: galaxy
driver:
name: vagrant
platforms:
- name: control1
box: generic/ubuntu2204
memory: 4096
cpus: 4
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
groups:
- k3s_cluster
- master
interfaces:
- network_name: private_network
ip: 192.168.30.63
provisioner:
name: ansible
env:
ANSIBLE_VERBOSITY: 1
playbooks:
converge: ../resources/converge.yml
side_effect: ../resources/reset.yml
verify: ../resources/verify.yml
inventory:
links:
group_vars: ../../inventory/sample/group_vars
scenario:
test_sequence:
- dependency
- cleanup
- destroy
- syntax
- create
- prepare
- converge
# idempotence is not possible with the playbook in its current form.
- verify
# We are repurposing side_effect here to test the reset playbook.
# This is why we do not run it before verify (which tests the cluster),
# but after the verify step.
- side_effect
- cleanup
- destroy

View File

@@ -1,16 +0,0 @@
---
- name: Apply overrides
hosts: all
tasks:
- name: Override host variables
ansible.builtin.set_fact:
# See:
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
cilium_iface: eth1
# The test VMs might be a bit slow, so we give them more time to join the cluster:
retry_count: 45
# Make sure that our IP ranges do not collide with those of the other scenarios
apiserver_endpoint: "192.168.30.225"
metal_lb_ip_range: "192.168.30.110-192.168.30.119"

View File

@@ -7,7 +7,7 @@ platforms:
- name: control1
box: generic/ubuntu2204
memory: 1024
memory: 2048
cpus: 2
groups:
- k3s_cluster
@@ -22,8 +22,8 @@ platforms:
ssh.password: "vagrant"
- name: control2
box: generic/debian12
memory: 1024
box: generic/debian11
memory: 2048
cpus: 2
groups:
- k3s_cluster
@@ -34,7 +34,7 @@ platforms:
- name: control3
box: generic/rocky9
memory: 1024
memory: 2048
cpus: 2
groups:
- k3s_cluster
@@ -45,7 +45,7 @@ platforms:
- name: node1
box: generic/ubuntu2204
memory: 1024
memory: 2048
cpus: 2
groups:
- k3s_cluster
@@ -61,7 +61,7 @@ platforms:
- name: node2
box: generic/rocky9
memory: 1024
memory: 2048
cpus: 2
groups:
- k3s_cluster
@@ -72,8 +72,6 @@ platforms:
provisioner:
name: ansible
env:
ANSIBLE_VERBOSITY: 1
playbooks:
converge: ../resources/converge.yml
side_effect: ../resources/reset.yml
@@ -84,6 +82,7 @@ provisioner:
scenario:
test_sequence:
- dependency
- lint
- cleanup
- destroy
- syntax

View File

@@ -17,6 +17,6 @@
# and security needs.
ansible.builtin.systemd:
name: firewalld
enabled: false
enabled: no
state: stopped
become: true

View File

@@ -6,7 +6,7 @@ driver:
platforms:
- name: control1
box: generic/ubuntu2204
memory: 1024
memory: 2048
cpus: 2
groups:
- k3s_cluster
@@ -22,7 +22,7 @@ platforms:
- name: control2
box: generic/ubuntu2204
memory: 1024
memory: 2048
cpus: 2
groups:
- k3s_cluster
@@ -38,7 +38,7 @@ platforms:
- name: node1
box: generic/ubuntu2204
memory: 1024
memory: 2048
cpus: 2
groups:
- k3s_cluster
@@ -53,8 +53,6 @@ platforms:
ssh.password: "vagrant"
provisioner:
name: ansible
env:
ANSIBLE_VERBOSITY: 1
playbooks:
converge: ../resources/converge.yml
side_effect: ../resources/reset.yml
@@ -65,6 +63,7 @@ provisioner:
scenario:
test_sequence:
- dependency
- lint
- cleanup
- destroy
- syntax

View File

@@ -1,49 +0,0 @@
---
dependency:
name: galaxy
driver:
name: vagrant
platforms:
- name: control1
box: generic/ubuntu2204
memory: 4096
cpus: 4
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
groups:
- k3s_cluster
- master
interfaces:
- network_name: private_network
ip: 192.168.30.62
provisioner:
name: ansible
env:
ANSIBLE_VERBOSITY: 1
playbooks:
converge: ../resources/converge.yml
side_effect: ../resources/reset.yml
verify: ../resources/verify.yml
inventory:
links:
group_vars: ../../inventory/sample/group_vars
scenario:
test_sequence:
- dependency
- cleanup
- destroy
- syntax
- create
- prepare
- converge
# idempotence is not possible with the playbook in its current form.
- verify
# We are repurposing side_effect here to test the reset playbook.
# This is why we do not run it before verify (which tests the cluster),
# but after the verify step.
- side_effect
- cleanup
- destroy

View File

@@ -1,17 +0,0 @@
---
- name: Apply overrides
hosts: all
tasks:
- name: Override host variables
ansible.builtin.set_fact:
# See:
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
flannel_iface: eth1
# The test VMs might be a bit slow, so we give them more time to join the cluster:
retry_count: 45
# Make sure that our IP ranges do not collide with those of the other scenarios
apiserver_endpoint: "192.168.30.225"
# Use kube-vip instead of MetalLB
kube_vip_lb_ip_range: "192.168.30.110-192.168.30.119"

View File

@@ -35,7 +35,7 @@
- name: Assert that the nginx welcome page is available
ansible.builtin.uri:
url: http://{{ ip | ansible.utils.ipwrap }}:{{ port_ }}/
return_content: true
return_content: yes
register: result
failed_when: "'Welcome to nginx!' not in result.content"
vars:

View File

@@ -21,8 +21,6 @@ platforms:
ip: 192.168.30.50
provisioner:
name: ansible
env:
ANSIBLE_VERBOSITY: 1
playbooks:
converge: ../resources/converge.yml
side_effect: ../resources/reset.yml
@@ -33,6 +31,7 @@ provisioner:
scenario:
test_sequence:
- dependency
- lint
- cleanup
- destroy
- syntax

View File

@@ -1,7 +1,7 @@
---
- name: Reboot k3s_cluster
hosts: k3s_cluster
gather_facts: true
gather_facts: yes
tasks:
- name: Reboot the nodes (and Wait upto 5 mins max)
become: true

View File

@@ -1,10 +1,10 @@
ansible-core>=2.16.2
ansible-core>=2.13.5
jmespath>=1.0.1
jsonpatch>=1.33
kubernetes>=29.0.0
molecule-plugins[vagrant]
molecule>=6.0.3
netaddr>=0.10.1
pre-commit>=3.6.0
pre-commit-hooks>=4.5.0
pyyaml>=6.0.1
jsonpatch>=1.32
kubernetes>=25.3.0
molecule-vagrant>=1.0.0
molecule>=4.0.3
netaddr>=0.8.0
pre-commit>=2.20.0
pre-commit-hooks>=1.3.1
pyyaml>=6.0

View File

@@ -4,165 +4,174 @@
#
# pip-compile requirements.in
#
ansible-compat==4.1.11
ansible-compat==3.0.1
# via molecule
ansible-core==2.16.3
ansible-core==2.15.4
# via
# -r requirements.in
# ansible-compat
# molecule
attrs==23.2.0
# via
# jsonschema
# referencing
bracex==2.4
# via wcmatch
cachetools==5.3.2
arrow==1.2.3
# via jinja2-time
attrs==22.1.0
# via jsonschema
binaryornot==0.4.4
# via cookiecutter
cachetools==5.2.0
# via google-auth
certifi==2023.11.17
certifi==2022.9.24
# via
# kubernetes
# requests
cffi==1.16.0
cffi==1.15.1
# via cryptography
cfgv==3.4.0
cfgv==3.3.1
# via pre-commit
charset-normalizer==3.3.2
chardet==5.0.0
# via binaryornot
charset-normalizer==2.1.1
# via requests
click==8.1.7
click==8.1.3
# via
# click-help-colors
# cookiecutter
# molecule
click-help-colors==0.9.4
click-help-colors==0.9.1
# via molecule
cryptography==41.0.7
commonmark==0.9.1
# via rich
cookiecutter==2.1.1
# via molecule
cryptography==38.0.3
# via ansible-core
distlib==0.3.8
distlib==0.3.6
# via virtualenv
distro==1.8.0
# via selinux
enrich==1.2.7
# via molecule
filelock==3.13.1
filelock==3.8.0
# via virtualenv
google-auth==2.26.2
google-auth==2.14.0
# via kubernetes
identify==2.5.33
identify==2.5.8
# via pre-commit
idna==3.6
idna==3.4
# via requests
jinja2==3.1.3
jinja2==3.1.2
# via
# ansible-core
# cookiecutter
# jinja2-time
# molecule
# molecule-vagrant
jinja2-time==0.2.0
# via cookiecutter
jmespath==1.0.1
# via -r requirements.in
jsonpatch==1.33
# via -r requirements.in
jsonpointer==2.4
jsonpointer==2.3
# via jsonpatch
jsonschema==4.21.1
jsonschema==4.17.0
# via
# ansible-compat
# molecule
jsonschema-specifications==2023.12.1
# via jsonschema
kubernetes==29.0.0
kubernetes==25.3.0
# via -r requirements.in
markdown-it-py==3.0.0
# via rich
markupsafe==2.1.4
markupsafe==2.1.1
# via jinja2
mdurl==0.1.2
# via markdown-it-py
molecule==6.0.3
molecule==4.0.4
# via
# -r requirements.in
# molecule-plugins
molecule-plugins[vagrant]==23.5.0
# molecule-vagrant
molecule-vagrant==1.0.0
# via -r requirements.in
netaddr==0.10.1
netaddr==0.9.0
# via -r requirements.in
nodeenv==1.8.0
nodeenv==1.7.0
# via pre-commit
oauthlib==3.2.2
# via
# kubernetes
# requests-oauthlib
packaging==23.2
# via requests-oauthlib
packaging==21.3
# via
# ansible-compat
# ansible-core
# molecule
platformdirs==4.1.0
platformdirs==2.5.2
# via virtualenv
pluggy==1.3.0
pluggy==1.0.0
# via molecule
pre-commit==3.6.0
pre-commit==2.21.0
# via -r requirements.in
pre-commit-hooks==4.5.0
# via -r requirements.in
pyasn1==0.5.1
pyasn1==0.4.8
# via
# pyasn1-modules
# rsa
pyasn1-modules==0.3.0
pyasn1-modules==0.2.8
# via google-auth
pycparser==2.21
# via cffi
pygments==2.17.2
pygments==2.13.0
# via rich
pyparsing==3.0.9
# via packaging
pyrsistent==0.19.2
# via jsonschema
python-dateutil==2.8.2
# via kubernetes
# via
# arrow
# kubernetes
python-slugify==6.1.2
# via cookiecutter
python-vagrant==1.0.0
# via molecule-plugins
# via molecule-vagrant
pyyaml==6.0.1
# via
# -r requirements.in
# ansible-compat
# ansible-core
# cookiecutter
# kubernetes
# molecule
# molecule-vagrant
# pre-commit
referencing==0.32.1
# via
# jsonschema
# jsonschema-specifications
requests==2.31.0
requests==2.28.1
# via
# cookiecutter
# kubernetes
# requests-oauthlib
requests-oauthlib==1.3.1
# via kubernetes
resolvelib==1.0.1
resolvelib==0.8.1
# via ansible-core
rich==13.7.0
rich==12.6.0
# via
# enrich
# molecule
rpds-py==0.17.1
# via
# jsonschema
# referencing
rsa==4.9
# via google-auth
ruamel-yaml==0.18.5
ruamel-yaml==0.17.21
# via pre-commit-hooks
ruamel-yaml-clib==0.2.8
# via ruamel-yaml
selinux==0.2.1
# via molecule-vagrant
six==1.16.0
# via
# google-auth
# kubernetes
# python-dateutil
subprocess-tee==0.4.1
# via ansible-compat
urllib3==2.1.0
text-unidecode==1.3
# via python-slugify
urllib3==1.26.12
# via
# kubernetes
# requests
virtualenv==20.25.0
virtualenv==20.16.6
# via pre-commit
wcmatch==8.5
# via molecule
websocket-client==1.7.0
websocket-client==1.4.2
# via kubernetes
# The following packages are considered to be unsafe in a requirements file:

View File

@@ -1,7 +1,7 @@
---
- name: Reset k3s cluster
hosts: k3s_cluster
gather_facts: true
gather_facts: yes
roles:
- role: reset
become: true
@@ -17,7 +17,7 @@
- name: Revert changes to Proxmox cluster
hosts: proxmox
gather_facts: true
become: true
become: yes
remote_user: "{{ proxmox_lxc_ssh_user }}"
roles:
- role: reset_proxmox_lxc

View File

@@ -1,8 +1,8 @@
---
- name: Create k3s-node.service.d directory
- name: Create k3s.service.d directory
file:
path: '{{ systemd_dir }}/k3s-node.service.d'
path: '{{ systemd_dir }}/k3s.service.d'
state: directory
owner: root
group: root
@@ -12,7 +12,7 @@
- name: Copy K3s http_proxy conf file
template:
src: "http_proxy.conf.j2"
dest: "{{ systemd_dir }}/k3s-node.service.d/http_proxy.conf"
dest: "{{ systemd_dir }}/k3s.service.d/http_proxy.conf"
owner: root
group: root
mode: '0755'

View File

@@ -15,6 +15,6 @@
- name: Enable and check K3s service
systemd:
name: k3s-node
daemon_reload: true
daemon_reload: yes
state: restarted
enabled: true
enabled: yes

View File

@@ -1,27 +0,0 @@
---
- name: Create manifests directory on first master
file:
path: /var/lib/rancher/k3s/server/manifests
state: directory
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
- name: Download vip cloud provider manifest to first master
ansible.builtin.get_url:
url: "https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/{{ kube_vip_cloud_provider_tag_version | default('main') }}/manifest/kube-vip-cloud-controller.yaml" # noqa yaml[line-length]
dest: "/var/lib/rancher/k3s/server/manifests/kube-vip-cloud-controller.yaml"
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
- name: Copy kubevip configMap manifest to first master
template:
src: "kubevip.yaml.j2"
dest: "/var/lib/rancher/k3s/server/manifests/kubevip.yaml"
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']

View File

@@ -6,13 +6,6 @@
state: stopped
failed_when: false
# k3s-init won't work if the port is already in use
- name: Stop k3s
systemd:
name: k3s
state: stopped
failed_when: false
- name: Clean previous runs of k3s-init # noqa command-instead-of-module
# The systemd module does not support "reset-failed", so we need to resort to command.
command: systemctl reset-failed k3s-init
@@ -29,12 +22,6 @@
- name: Deploy metallb manifest
include_tasks: metallb.yml
tags: metallb
when: kube_vip_lb_ip_range is not defined and (not cilium_bgp or cilium_iface is not defined)
- name: Deploy kube-vip manifest
include_tasks: kube-vip.yml
tags: kubevip
when: kube_vip_lb_ip_range is defined
- name: Init cluster inside the transient k3s-init service
command:
@@ -42,7 +29,7 @@
-p Restart=on-failure \
--unit=k3s-init \
k3s server {{ server_init_args }}"
creates: "{{ systemd_dir }}/k3s-init.service"
creates: "{{ systemd_dir }}/k3s.service"
- name: Verification
when: not ansible_check_mode
@@ -80,9 +67,9 @@
- name: Enable and check K3s service
systemd:
name: k3s
daemon_reload: true
daemon_reload: yes
state: restarted
enabled: true
enabled: yes
- name: Wait for node-token
wait_for:
@@ -123,7 +110,7 @@
copy:
src: /etc/rancher/k3s/k3s.yaml
dest: "{{ ansible_user_dir }}/.kube/config"
remote_src: true
remote_src: yes
owner: "{{ ansible_user_id }}"
mode: "u=rw,g=,o="

View File

@@ -1,13 +0,0 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: kubevip
namespace: kube-system
data:
{% if kube_vip_lb_ip_range is string %}
{# kube_vip_lb_ip_range was used in the legacy way: single string instead of a list #}
{# => transform to list with single element #}
{% set kube_vip_lb_ip_range = [kube_vip_lb_ip_range] %}
{% endif %}
range-global: {{ kube_vip_lb_ip_range | join(',') }}

View File

@@ -43,7 +43,7 @@ spec:
- name: vip_ddns
value: "false"
- name: svc_enable
value: "{{ 'true' if kube_vip_lb_ip_range is defined else 'false' }}"
value: "false"
- name: vip_leaderelection
value: "true"
- name: vip_leaseduration

View File

@@ -1,6 +1,6 @@
---
# Timeout to wait for MetalLB services to come up
metal_lb_available_timeout: 240s
metal_lb_available_timeout: 120s
# Name of the master group
group_name_master: master

View File

@@ -1,114 +0,0 @@
---
- name: Deploy Calico to cluster
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
run_once: true
block:
- name: Create manifests directory on first master
file:
path: /tmp/k3s
state: directory
owner: root
group: root
mode: 0755
- name: "Download to first master: manifest for Tigera Operator and Calico CRDs"
ansible.builtin.get_url:
url: "https://raw.githubusercontent.com/projectcalico/calico/{{ calico_tag }}/manifests/tigera-operator.yaml"
dest: "/tmp/k3s/tigera-operator.yaml"
owner: root
group: root
mode: 0755
- name: Copy Calico custom resources manifest to first master
ansible.builtin.template:
src: "calico.crs.j2"
dest: /tmp/k3s/custom-resources.yaml
owner: root
group: root
mode: 0755
- name: Deploy or replace Tigera Operator
block:
- name: Deploy Tigera Operator
ansible.builtin.command:
cmd: kubectl create -f /tmp/k3s/tigera-operator.yaml
register: create_operator
changed_when: "'created' in create_operator.stdout"
failed_when: "'Error' in create_operator.stderr and 'already exists' not in create_operator.stderr"
rescue:
- name: Replace existing Tigera Operator
ansible.builtin.command:
cmd: kubectl replace -f /tmp/k3s/tigera-operator.yaml
register: replace_operator
changed_when: "'replaced' in replace_operator.stdout"
failed_when: "'Error' in replace_operator.stderr"
- name: Wait for Tigera Operator resources
command: >-
k3s kubectl wait {{ item.type }}/{{ item.name }}
--namespace='tigera-operator'
--for=condition=Available=True
--timeout=7s
register: tigera_result
changed_when: false
until: tigera_result is succeeded
retries: 7
delay: 7
with_items:
- {name: tigera-operator, type: deployment}
loop_control:
label: "{{ item.type }}/{{ item.name }}"
- name: Deploy Calico custom resources
block:
- name: Deploy custom resources for Calico
ansible.builtin.command:
cmd: kubectl create -f /tmp/k3s/custom-resources.yaml
register: create_cr
changed_when: "'created' in create_cr.stdout"
failed_when: "'Error' in create_cr.stderr and 'already exists' not in create_cr.stderr"
rescue:
- name: Apply new Calico custom resource manifest
ansible.builtin.command:
cmd: kubectl apply -f /tmp/k3s/custom-resources.yaml
register: apply_cr
changed_when: "'configured' in apply_cr.stdout or 'created' in apply_cr.stdout"
failed_when: "'Error' in apply_cr.stderr"
- name: Wait for Calico system resources to be available
command: >-
{% if item.type == 'daemonset' %}
k3s kubectl wait pods
--namespace='{{ item.namespace }}'
--selector={{ item.selector }}
--for=condition=Ready
{% else %}
k3s kubectl wait {{ item.type }}/{{ item.name }}
--namespace='{{ item.namespace }}'
--for=condition=Available
{% endif %}
--timeout=7s
register: cr_result
changed_when: false
until: cr_result is succeeded
retries: 30
delay: 7
with_items:
- {name: calico-typha, type: deployment, namespace: calico-system}
- {name: calico-kube-controllers, type: deployment, namespace: calico-system}
- {name: csi-node-driver, type: daemonset, selector: 'k8s-app=csi-node-driver', namespace: calico-system}
- {name: calico-node, type: daemonset, selector: 'k8s-app=calico-node', namespace: calico-system}
- {name: calico-apiserver, type: deployment, namespace: calico-apiserver}
loop_control:
label: "{{ item.type }}/{{ item.name }}"
- name: Patch Felix configuration for eBPF mode
ansible.builtin.command:
cmd: >
kubectl patch felixconfiguration default
--type='merge'
--patch='{"spec": {"bpfKubeProxyIptablesCleanupEnabled": false}}'
register: patch_result
changed_when: "'felixconfiguration.projectcalico.org/default patched' in patch_result.stdout"
failed_when: "'Error' in patch_result.stderr"
when: calico_ebpf

View File

@@ -1,253 +0,0 @@
---
- name: Prepare Cilium CLI on first master and deploy CNI
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
run_once: true
block:
- name: Create tmp directory on first master
file:
path: /tmp/k3s
state: directory
owner: root
group: root
mode: 0755
- name: Check if Cilium CLI is installed
ansible.builtin.command: cilium version
register: cilium_cli_installed
failed_when: false
changed_when: false
ignore_errors: true
- name: Check for Cilium CLI version in command output
set_fact:
installed_cli_version: >-
{{
cilium_cli_installed.stdout_lines
| join(' ')
| regex_findall('cilium-cli: (v\d+\.\d+\.\d+)')
| first
| default('unknown')
}}
when: cilium_cli_installed.rc == 0
- name: Get latest stable Cilium CLI version file
ansible.builtin.get_url:
url: "https://raw.githubusercontent.com/cilium/cilium-cli/main/stable.txt"
dest: "/tmp/k3s/cilium-cli-stable.txt"
owner: root
group: root
mode: 0755
- name: Read Cilium CLI stable version from file
ansible.builtin.command: cat /tmp/k3s/cilium-cli-stable.txt
register: cli_ver
changed_when: false
- name: Log installed Cilium CLI version
ansible.builtin.debug:
msg: "Installed Cilium CLI version: {{ installed_cli_version | default('Not installed') }}"
- name: Log latest stable Cilium CLI version
ansible.builtin.debug:
msg: "Latest Cilium CLI version: {{ cli_ver.stdout }}"
- name: Determine if Cilium CLI needs installation or update
set_fact:
cilium_cli_needs_update: >-
{{
cilium_cli_installed.rc != 0 or
(cilium_cli_installed.rc == 0 and
installed_cli_version != cli_ver.stdout)
}}
- name: Install or update Cilium CLI
when: cilium_cli_needs_update
block:
- name: Set architecture variable
ansible.builtin.set_fact:
cli_arch: "{{ 'arm64' if ansible_architecture == 'aarch64' else 'amd64' }}"
- name: Download Cilium CLI and checksum
ansible.builtin.get_url:
url: "{{ cilium_base_url }}/cilium-linux-{{ cli_arch }}{{ item }}"
dest: "/tmp/k3s/cilium-linux-{{ cli_arch }}{{ item }}"
owner: root
group: root
mode: 0755
loop:
- ".tar.gz"
- ".tar.gz.sha256sum"
vars:
cilium_base_url: "https://github.com/cilium/cilium-cli/releases/download/{{ cli_ver.stdout }}"
- name: Verify the downloaded tarball
ansible.builtin.shell: |
cd /tmp/k3s && sha256sum --check cilium-linux-{{ cli_arch }}.tar.gz.sha256sum
args:
executable: /bin/bash
changed_when: false
- name: Extract Cilium CLI to /usr/local/bin
ansible.builtin.unarchive:
src: "/tmp/k3s/cilium-linux-{{ cli_arch }}.tar.gz"
dest: /usr/local/bin
remote_src: true
- name: Remove downloaded tarball and checksum file
ansible.builtin.file:
path: "{{ item }}"
state: absent
loop:
- "/tmp/k3s/cilium-linux-{{ cli_arch }}.tar.gz"
- "/tmp/k3s/cilium-linux-{{ cli_arch }}.tar.gz.sha256sum"
- name: Wait for connectivity to kube VIP
ansible.builtin.command: ping -c 1 {{ apiserver_endpoint }}
register: ping_result
until: ping_result.rc == 0
retries: 21
delay: 1
ignore_errors: true
changed_when: false
- name: Fail if kube VIP not reachable
ansible.builtin.fail:
msg: "API endpoint {{ apiserver_endpoint }} is not reachable"
when: ping_result.rc != 0
- name: Test for existing Cilium install
ansible.builtin.command: k3s kubectl -n kube-system get daemonsets cilium
register: cilium_installed
failed_when: false
changed_when: false
ignore_errors: true
- name: Check existing Cilium install
when: cilium_installed.rc == 0
block:
- name: Check Cilium version
ansible.builtin.command: cilium version
register: cilium_version
failed_when: false
changed_when: false
ignore_errors: true
- name: Parse installed Cilium version
set_fact:
installed_cilium_version: >-
{{
cilium_version.stdout_lines
| join(' ')
| regex_findall('cilium image.+(\d+\.\d+\.\d+)')
| first
| default('unknown')
}}
- name: Determine if Cilium needs update
set_fact:
cilium_needs_update: >-
{{ 'v' + installed_cilium_version != cilium_tag }}
- name: Log result
ansible.builtin.debug:
msg: >
Installed Cilium version: {{ installed_cilium_version }},
Target Cilium version: {{ cilium_tag }},
Update needed: {{ cilium_needs_update }}
- name: Install Cilium
ansible.builtin.command: >-
{% if cilium_installed.rc != 0 %}
cilium install
{% else %}
cilium upgrade
{% endif %}
--version "{{ cilium_tag }}"
--helm-set operator.replicas="1"
{{ '--helm-set devices=' + cilium_iface if cilium_iface != 'auto' else '' }}
--helm-set ipam.operator.clusterPoolIPv4PodCIDRList={{ cluster_cidr }}
{% if cilium_mode == "native" or (cilium_bgp and cilium_exportPodCIDR != 'false') %}
--helm-set ipv4NativeRoutingCIDR={{ cluster_cidr }}
{% endif %}
--helm-set k8sServiceHost={{ apiserver_endpoint }}
--helm-set k8sServicePort="6443"
--helm-set routingMode={{ cilium_mode | default("native") }}
--helm-set autoDirectNodeRoutes={{ "true" if cilium_mode == "native" else "false" }}
--helm-set kubeProxyReplacement={{ kube_proxy_replacement | default("true") }}
--helm-set bpf.masquerade={{ enable_bpf_masquerade | default("true") }}
--helm-set bgpControlPlane.enabled={{ cilium_bgp | default("false") }}
--helm-set hubble.enabled={{ "true" if cilium_hubble else "false" }}
--helm-set hubble.relay.enabled={{ "true" if cilium_hubble else "false" }}
--helm-set hubble.ui.enabled={{ "true" if cilium_hubble else "false" }}
{% if kube_proxy_replacement is not false %}
--helm-set bpf.loadBalancer.algorithm={{ bpf_lb_algorithm | default("maglev") }}
--helm-set bpf.loadBalancer.mode={{ bpf_lb_mode | default("hybrid") }}
{% endif %}
environment:
KUBECONFIG: /home/{{ ansible_user }}/.kube/config
register: cilium_install_result
changed_when: cilium_install_result.rc == 0
when: cilium_installed.rc != 0 or cilium_needs_update
- name: Wait for Cilium resources
command: >-
{% if item.type == 'daemonset' %}
k3s kubectl wait pods
--namespace=kube-system
--selector='k8s-app=cilium'
--for=condition=Ready
{% else %}
k3s kubectl wait {{ item.type }}/{{ item.name }}
--namespace=kube-system
--for=condition=Available
{% endif %}
--timeout=7s
register: cr_result
changed_when: false
until: cr_result is succeeded
retries: 30
delay: 7
with_items:
- {name: cilium-operator, type: deployment}
- {name: cilium, type: daemonset, selector: 'k8s-app=cilium'}
- {name: hubble-relay, type: deployment, check_hubble: true}
- {name: hubble-ui, type: deployment, check_hubble: true}
loop_control:
label: "{{ item.type }}/{{ item.name }}"
when: >-
not item.check_hubble | default(false) or (item.check_hubble | default(false) and cilium_hubble)
- name: Configure Cilium BGP
when: cilium_bgp
block:
- name: Copy BGP manifests to first master
ansible.builtin.template:
src: "cilium.crs.j2"
dest: /tmp/k3s/cilium-bgp.yaml
owner: root
group: root
mode: 0755
- name: Apply BGP manifests
ansible.builtin.command:
cmd: kubectl apply -f /tmp/k3s/cilium-bgp.yaml
register: apply_cr
changed_when: "'configured' in apply_cr.stdout or 'created' in apply_cr.stdout"
failed_when: "'is invalid' in apply_cr.stderr"
ignore_errors: true
- name: Print error message if BGP manifests application fails
ansible.builtin.debug:
msg: "{{ apply_cr.stderr }}"
when: "'is invalid' in apply_cr.stderr"
- name: Test for BGP config resources
ansible.builtin.command: "{{ item }}"
loop:
- k3s kubectl get CiliumBGPPeeringPolicy.cilium.io
- k3s kubectl get CiliumLoadBalancerIPPool.cilium.io
changed_when: false
loop_control:
label: "{{ item }}"

View File

@@ -1,18 +1,7 @@
---
- name: Deploy calico
include_tasks: calico.yml
tags: calico
when: calico_iface is defined and cilium_iface is not defined
- name: Deploy cilium
include_tasks: cilium.yml
tags: cilium
when: cilium_iface is defined
- name: Deploy metallb pool
include_tasks: metallb.yml
tags: metallb
when: kube_vip_lb_ip_range is not defined and (not cilium_bgp or cilium_iface is not defined)
- name: Remove tmp directory used for manifests
file:

View File

@@ -8,27 +8,6 @@
with_items: "{{ groups[group_name_master | default('master')] }}"
run_once: true
- name: Delete outdated metallb replicas
shell: |-
set -o pipefail
REPLICAS=$(k3s kubectl --namespace='metallb-system' get replicasets \
-l 'component=controller,app=metallb' \
-o jsonpath='{.items[0].spec.template.spec.containers[0].image}, {.items[0].metadata.name}' 2>/dev/null || true)
REPLICAS_SETS=$(echo ${REPLICAS} | grep -v '{{ metal_lb_controller_tag_version }}' | sed -e "s/^.*\s//g")
if [ -n "${REPLICAS_SETS}" ] ; then
for REPLICAS in "${REPLICAS_SETS}"
do
k3s kubectl --namespace='metallb-system' \
delete rs "${REPLICAS}"
done
fi
args:
executable: /bin/bash
changed_when: false
run_once: true
with_items: "{{ groups[group_name_master | default('master')] }}"
- name: Copy metallb CRs manifest to first master
template:
src: "metallb.crs.j2"

View File

@@ -1,41 +0,0 @@
# This section includes base Calico installation configuration.
# For more information, see: https://docs.tigera.io/calico/latest/reference/installation/api#operator.tigera.io/v1.Installation
apiVersion: operator.tigera.io/v1
kind: Installation
metadata:
name: default
spec:
# Configures Calico networking.
calicoNetwork:
# Note: The ipPools section cannot be modified post-install.
ipPools:
- blockSize: {{ calico_blockSize | default('26') }}
cidr: {{ cluster_cidr | default('10.52.0.0/16') }}
encapsulation: {{ calico_encapsulation | default('VXLANCrossSubnet') }}
natOutgoing: {{ calico_natOutgoing | default('Enabled') }}
nodeSelector: {{ calico_nodeSelector | default('all()') }}
nodeAddressAutodetectionV4:
interface: {{ calico_iface }}
linuxDataplane: {{ 'BPF' if calico_ebpf else 'Iptables' }}
---
# This section configures the Calico API server.
# For more information, see: https://docs.tigera.io/calico/latest/reference/installation/api#operator.tigera.io/v1.APIServer
apiVersion: operator.tigera.io/v1
kind: APIServer
metadata:
name: default
spec: {}
{% if calico_ebpf %}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kubernetes-services-endpoint
namespace: tigera-operator
data:
KUBERNETES_SERVICE_HOST: '{{ apiserver_endpoint }}'
KUBERNETES_SERVICE_PORT: '6443'
{% endif %}

View File

@@ -1,29 +0,0 @@
apiVersion: "cilium.io/v2alpha1"
kind: CiliumBGPPeeringPolicy
metadata:
name: 01-bgp-peering-policy
spec: # CiliumBGPPeeringPolicySpec
virtualRouters: # []CiliumBGPVirtualRouter
- localASN: {{ cilium_bgp_my_asn }}
exportPodCIDR: {{ cilium_exportPodCIDR | default('true') }}
neighbors: # []CiliumBGPNeighbor
- peerAddress: '{{ cilium_bgp_peer_address + "/32"}}'
peerASN: {{ cilium_bgp_peer_asn }}
eBGPMultihopTTL: 10
connectRetryTimeSeconds: 120
holdTimeSeconds: 90
keepAliveTimeSeconds: 30
gracefulRestart:
enabled: true
restartTimeSeconds: 120
serviceSelector:
matchExpressions:
- {key: somekey, operator: NotIn, values: ['never-used-value']}
---
apiVersion: "cilium.io/v2alpha1"
kind: CiliumLoadBalancerIPPool
metadata:
name: "01-lb-pool"
spec:
cidrs:
- cidr: "{{ cilium_bgp_lb_cidr }}"

View File

@@ -14,7 +14,7 @@
name: net.ipv4.ip_forward
value: "1"
state: present
reload: true
reload: yes
tags: sysctl
- name: Enable IPv6 forwarding
@@ -22,7 +22,7 @@
name: net.ipv6.conf.all.forwarding
value: "1"
state: present
reload: true
reload: yes
tags: sysctl
- name: Enable IPv6 router advertisements
@@ -30,7 +30,7 @@
name: net.ipv6.conf.all.accept_ra
value: "2"
state: present
reload: true
reload: yes
tags: sysctl
- name: Add br_netfilter to /etc/modules-load.d/
@@ -51,7 +51,7 @@
name: "{{ item }}"
value: "1"
state: present
reload: true
reload: yes
when: ansible_os_family == "RedHat"
loop:
- net.bridge.bridge-nf-call-iptables

View File

@@ -17,27 +17,21 @@
when:
grep_cpuinfo_raspberrypi.rc == 0 or grep_device_tree_model_raspberrypi.rc == 0
- name: Set detected_distribution to Raspbian (ARM64 on Raspbian, Debian Buster/Bullseye/Bookworm)
- name: Set detected_distribution to Raspbian
set_fact:
detected_distribution: Raspbian
vars:
allowed_descriptions:
- "[Rr]aspbian.*"
- "Debian.*buster"
- "Debian.*bullseye"
- "Debian.*bookworm"
when:
- ansible_facts.architecture is search("aarch64")
- raspberry_pi|default(false)
- ansible_facts.lsb.description|default("") is match(allowed_descriptions | join('|'))
when: >
raspberry_pi|default(false) and
( ansible_facts.lsb.id|default("") == "Raspbian" or
ansible_facts.lsb.description|default("") is match("[Rr]aspbian.*") )
- name: Set detected_distribution to Raspbian (ARM64 on Debian Bookworm)
- name: Set detected_distribution to Raspbian (ARM64 on Debian Buster)
set_fact:
detected_distribution: Raspbian
when:
- ansible_facts.architecture is search("aarch64")
- raspberry_pi|default(false)
- ansible_facts.lsb.description|default("") is match("Debian.*bookworm")
- ansible_facts.lsb.description|default("") is match("Debian.*buster")
- name: Set detected_distribution_major_version
set_fact:
@@ -45,6 +39,14 @@
when:
- detected_distribution | default("") == "Raspbian"
- name: Set detected_distribution to Raspbian (ARM64 on Debian Bullseye)
set_fact:
detected_distribution: Raspbian
when:
- ansible_facts.architecture is search("aarch64")
- raspberry_pi|default(false)
- ansible_facts.lsb.description|default("") is match("Debian.*bullseye")
- name: Execute OS related tasks on the Raspberry Pi - {{ action_ }}
include_tasks: "{{ item }}"
with_first_found:

View File

@@ -2,7 +2,7 @@
- name: Enable cgroup via boot commandline if not already enabled for Rocky
lineinfile:
path: /boot/cmdline.txt
backrefs: true
backrefs: yes
regexp: '^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$'
line: '\1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory'
notify: reboot

View File

@@ -2,7 +2,7 @@
- name: Enable cgroup via boot commandline if not already enabled for Ubuntu on a Raspberry Pi
lineinfile:
path: /boot/firmware/cmdline.txt
backrefs: true
backrefs: yes
regexp: '^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$'
line: '\1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory'
notify: reboot

View File

@@ -3,7 +3,7 @@
systemd:
name: "{{ item }}"
state: stopped
enabled: false
enabled: no
failed_when: false
with_items:
- k3s
@@ -45,22 +45,19 @@
- /var/lib/rancher/k3s
- /var/lib/rancher/
- /var/lib/cni/
- /etc/cni/net.d
- name: Remove K3s http_proxy files
file:
name: "{{ item }}"
state: absent
with_items:
- "{{ systemd_dir }}/k3s.service.d/http_proxy.conf"
- "{{ systemd_dir }}/k3s.service.d"
- "{{ systemd_dir }}/k3s-node.service.d/http_proxy.conf"
- "{{ systemd_dir }}/k3s-node.service.d"
when: proxy_env is defined
- name: Reload daemon_reload
systemd:
daemon_reload: true
daemon_reload: yes
- name: Remove tmp directory used for manifests
file:

View File

@@ -2,7 +2,7 @@
- name: Prepare Proxmox cluster
hosts: proxmox
gather_facts: true
become: true
become: yes
environment: "{{ proxy_env | default({}) }}"
roles:
- role: proxmox_lxc
@@ -10,7 +10,7 @@
- name: Prepare k3s nodes
hosts: k3s_cluster
gather_facts: true
gather_facts: yes
environment: "{{ proxy_env | default({}) }}"
roles:
- role: lxc
@@ -46,14 +46,3 @@
roles:
- role: k3s_server_post
become: true
- name: Storing kubeconfig in the playbook directory
hosts: master
environment: "{{ proxy_env | default({}) }}"
tasks:
- name: Copying kubeconfig from {{ hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] }}
ansible.builtin.fetch:
src: "{{ ansible_user_dir }}/.kube/config"
dest: ./kubeconfig
flat: true
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']