Compare commits

..

16 Commits

Author SHA1 Message Date
Techno Tim
394e3342ae Merge branch 'master' into k3s-1-26 2023-12-11 20:36:11 -06:00
Timothy Stewart
3db77957b1 feat(k3s): Updated to v1.26.11+k3s2 2023-12-09 22:42:35 -06:00
Timothy Stewart
86541ac52a feat(k3s): Updated to v1.26.7+k3s1 2023-08-17 22:43:18 -05:00
Techno Tim
b81b3962d2 Merge branch 'master' into k3s-1-26 2023-08-14 11:54:46 -05:00
Techno Tim
c738b8d040 Merge branch 'master' into k3s-1-26 2023-07-23 15:03:21 -05:00
Techno Tim
c5eb0a0ad1 Merge branch 'master' into k3s-1-26 2023-07-20 21:34:29 -05:00
Techno Tim
1350fea9bd Merge branch 'master' into k3s-1-26 2023-04-28 18:31:01 -05:00
Timothy Stewart
49e97d0981 feat(k3s): Updated to v1.26.4+k3s1 2023-04-28 18:29:27 -05:00
Techno Tim
47b08b21d8 Merge branch 'master' into k3s-1-26 2023-04-16 16:49:42 -05:00
Timothy Stewart
0f7a722ac5 feat(k3s): Updated to v1.26.3+k3s1 2023-04-16 15:18:24 -05:00
Techno Tim
39fbf86df6 Merge branch 'master' into k3s-1-26 2023-03-15 11:15:12 -05:00
Timothy Stewart
495bdfdde9 feat(k3s): Updated to v1.26.2+k3s1 2023-03-13 18:56:27 -05:00
Timothy Stewart
08917be44a Merge branch 'master' into k3s-1-26 2023-03-13 18:56:05 -05:00
Techno Tim
874cc9f1f4 Merge branch 'master' into k3s-1-26 2023-02-13 19:56:42 -06:00
Techno Tim
73b76da984 Merge branch 'master' into k3s-1-26 2023-02-06 22:27:41 -06:00
Timothy Stewart
4502ebd439 feat(k3s): Updated to v1.26.0+k3s2 2023-01-19 22:35:03 -06:00
85 changed files with 492 additions and 2147 deletions

View File

@@ -1,21 +1,17 @@
--- ---
profile: production
exclude_paths: exclude_paths:
# default paths # default paths
- .cache/ - '.cache/'
- .github/ - '.github/'
- test/fixtures/formatting-before/ - 'test/fixtures/formatting-before/'
- test/fixtures/formatting-prettier/ - 'test/fixtures/formatting-prettier/'
# The "converge" and "reset" playbooks use import_playbook in # The "converge" and "reset" playbooks use import_playbook in
# conjunction with the "env" lookup plugin, which lets the # conjunction with the "env" lookup plugin, which lets the
# syntax check of ansible-lint fail. # syntax check of ansible-lint fail.
- molecule/**/converge.yml - 'molecule/**/converge.yml'
- molecule/**/prepare.yml - 'molecule/**/prepare.yml'
- molecule/**/reset.yml - 'molecule/**/reset.yml'
# The file was generated by galaxy ansible - don't mess with it.
- galaxy.yml
skip_list: skip_list:
- var-naming[no-role-prefix] - 'fqcn-builtins'

View File

@@ -1,5 +1,5 @@
<!-- It's a good idea to check this post first for general troubleshooting https://github.com/timothystewart6/k3s-ansible/discussions/19 --> <!-- It's a good idea to check this post first for general troubleshooting https://github.com/techno-tim/k3s-ansible/discussions/19 -->
<!--- Provide a general summary of the issue in the Title above --> <!--- Provide a general summary of the issue in the Title above -->
@@ -37,11 +37,6 @@ systemd_dir: ""
flannel_iface: "" flannel_iface: ""
#calico_iface: ""
calico_ebpf: ""
calico_cidr: ""
calico_tag: ""
apiserver_endpoint: "" apiserver_endpoint: ""
k3s_token: "NA" k3s_token: "NA"
@@ -51,9 +46,6 @@ extra_agent_args: ""
kube_vip_tag_version: "" kube_vip_tag_version: ""
kube_vip_cloud_provider_tag_version: ""
kube_vip_lb_ip_range: ""
metal_lb_speaker_tag_version: "" metal_lb_speaker_tag_version: ""
metal_lb_controller_tag_version: "" metal_lb_controller_tag_version: ""
@@ -82,4 +74,4 @@ node
## Possible Solution ## Possible Solution
<!--- Not obligatory, but suggest a fix/reason for the bug, --> <!--- Not obligatory, but suggest a fix/reason for the bug, -->
- [ ] I've checked the [General Troubleshooting Guide](https://github.com/timothystewart6/k3s-ansible/discussions/20) - [ ] I've checked the [General Troubleshooting Guide](https://github.com/techno-tim/k3s-ansible/discussions/20)

View File

@@ -9,18 +9,3 @@ updates:
ignore: ignore:
- dependency-name: "*" - dependency-name: "*"
update-types: ["version-update:semver-major"] update-types: ["version-update:semver-major"]
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"
rebase-strategy: "auto"
- package-ecosystem: "docker"
directory: "/"
schedule:
interval: "daily"
rebase-strategy: "auto"
ignore:
- dependency-name: "*"
update-types: ["version-update:semver-major"]

View File

@@ -9,17 +9,12 @@ set -euo pipefail
GIT_ROOT=$(git rev-parse --show-toplevel) GIT_ROOT=$(git rev-parse --show-toplevel)
PROVIDER=virtualbox PROVIDER=virtualbox
yq --version # Read all boxes for all platforms from the "molecule.yml" files
all_boxes=$(cat "${GIT_ROOT}"/molecule/*/molecule.yml |
# Define the path to the molecule.yml files yq -r '.platforms[].box' | # Read the "box" property of each node under "platforms"
MOLECULE_YML_PATH="${GIT_ROOT}/molecule/*/molecule.yml" grep --invert-match --regexp=--- | # Filter out file separators
sort |
# Extract and sort unique boxes from all molecule.yml files uniq)
all_boxes=$(for file in $MOLECULE_YML_PATH; do
yq eval '.platforms[].box' "$file"
done | sort -u)
echo all_boxes: "$all_boxes"
# Read the boxes that are currently present on the system (for the current provider) # Read the boxes that are currently present on the system (for the current provider)
present_boxes=$( present_boxes=$(

View File

@@ -1,42 +0,0 @@
---
name: "Cache"
on:
workflow_call:
jobs:
molecule:
name: cache
runs-on: self-hosted
env:
PYTHON_VERSION: "3.11"
steps:
- name: Check out the codebase
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # 4.2.2
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # 6.0.0
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies
- name: Cache Vagrant boxes
id: cache-vagrant
uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # 4.1.2
with:
lookup-only: true #if it exists, we don't need to restore and can skip the next step
path: |
~/.vagrant.d/boxes
key: vagrant-boxes-${{ hashFiles('**/molecule.yml') }}
restore-keys: |
vagrant-boxes
- name: Download Vagrant boxes for all scenarios
# To save some cache space, all scenarios share the same cache key.
# On the other hand, this means that the cache contents should be
# the same across all scenarios. This step ensures that.
if: steps.cache-vagrant.outputs.cache-hit != 'true' # only run if false since this is just a cache step
run: |
./.github/download-boxes.sh
vagrant box list

View File

@@ -2,26 +2,14 @@
name: "CI" name: "CI"
on: on:
pull_request: pull_request:
types: push:
- opened branches:
- synchronize - master
paths-ignore: paths-ignore:
- '**/.gitignore' - '**/README.md'
- '**/FUNDING.yml'
- '**/host.ini'
- '**/*.md'
- '**/.editorconfig'
- '**/ansible.example.cfg'
- '**/deploy.sh'
- '**/LICENSE'
- '**/reboot.sh'
- '**/reset.sh'
jobs: jobs:
pre:
uses: ./.github/workflows/cache.yml
lint: lint:
uses: ./.github/workflows/lint.yml uses: ./.github/workflows/lint.yml
needs: [pre]
test: test:
uses: ./.github/workflows/test.yml uses: ./.github/workflows/test.yml
needs: [pre, lint] needs: [lint]

View File

@@ -5,27 +5,37 @@ on:
jobs: jobs:
pre-commit-ci: pre-commit-ci:
name: Pre-Commit name: Pre-Commit
runs-on: self-hosted runs-on: ubuntu-latest
env: env:
PYTHON_VERSION: "3.11" PYTHON_VERSION: "3.11"
steps: steps:
- name: Check out the codebase - name: Check out the codebase
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # 4.2.2 uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v3 2.5.0
with: with:
ref: ${{ github.event.pull_request.head.sha }} ref: ${{ github.event.pull_request.head.sha }}
- name: Set up Python ${{ env.PYTHON_VERSION }} - name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # 6.0.0 uses: actions/setup-python@75f3110429a8c05be0e1bf360334e4cced2b63fa # 2.3.3
with: with:
python-version: ${{ env.PYTHON_VERSION }} python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies cache: 'pip' # caching pip dependencies
- name: Restore Ansible cache - name: Cache pip
uses: actions/cache/restore@6849a6489940f00c2f30c0fb92c6274307ccb58a # 4.1.2 uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # 3.0.11
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('./requirements.txt') }}
restore-keys: |
${{ runner.os }}-pip-
- name: Cache Ansible
uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # 3.0.11
with: with:
path: ~/.ansible/collections path: ~/.ansible/collections
key: ansible-${{ hashFiles('collections/requirements.yml') }} key: ${{ runner.os }}-ansible-${{ hashFiles('collections/requirements.txt') }}
restore-keys: |
${{ runner.os }}-ansible-
- name: Install dependencies - name: Install dependencies
run: | run: |
@@ -37,17 +47,21 @@ jobs:
python3 -m pip install -r requirements.txt python3 -m pip install -r requirements.txt
echo "::endgroup::" echo "::endgroup::"
echo "::group::Install Ansible role requirements from collections/requirements.yml"
ansible-galaxy install -r collections/requirements.yml
echo "::endgroup::"
- name: Run pre-commit - name: Run pre-commit
uses: pre-commit/action@2c7b3805fd2a0fd8c1884dcaebf91fc102a13ecd # 3.0.1 uses: pre-commit/action@646c83fcd040023954eafda54b4db0192ce70507 # 3.0.0
ensure-pinned-actions: ensure-pinned-actions:
name: Ensure SHA Pinned Actions name: Ensure SHA Pinned Actions
runs-on: self-hosted runs-on: ubuntu-latest
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # 4.2.2 uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v3 2.5.0
- name: Ensure SHA pinned actions - name: Ensure SHA pinned actions
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@38608ef4fb69adae7f1eac6eeb88e67b7d083bfd # 3.0.16 uses: zgosalvez/github-actions-ensure-sha-pinned-actions@af2eb3226618e2494e3d9084f515ad6dcf16e229 # 2.0.1
with: with:
allowlist: | allowlist: |
aws-actions/ aws-actions/

View File

@@ -5,51 +5,23 @@ on:
jobs: jobs:
molecule: molecule:
name: Molecule name: Molecule
runs-on: self-hosted runs-on: macos-12
strategy: strategy:
matrix: matrix:
scenario: scenario:
- default - default
# - ipv6 - ipv6
- single_node - single_node
- calico
- cilium
- kube-vip
fail-fast: false fail-fast: false
env: env:
PYTHON_VERSION: "3.11" PYTHON_VERSION: "3.11"
steps: steps:
- name: Check out the codebase - name: Check out the codebase
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # 4.2.2 uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v3 2.5.0
with: with:
ref: ${{ github.event.pull_request.head.sha }} ref: ${{ github.event.pull_request.head.sha }}
# these steps are necessary if not using ephemeral nodes
- name: Delete old Vagrant box versions
if: always() # do this even if a step before has failed
run: vagrant box prune --force
- name: Remove all local Vagrant boxes
if: always() # do this even if a step before has failed
run: if vagrant box list 2>/dev/null; then vagrant box list | cut -f 1 -d ' ' | xargs -L 1 vagrant box remove -f 2>/dev/null && echo "All Vagrant boxes removed." || echo "No Vagrant boxes found."; else echo "No Vagrant boxes found."; fi
- name: Remove all Virtualbox VMs
if: always() # do this even if a step before has failed
run: VBoxManage list vms | awk -F'"' '{print $2}' | xargs -I {} VBoxManage unregistervm --delete "{}"
- name: Remove all Virtualbox HDs
if: always() # do this even if a step before has failed
run: VBoxManage list hdds | awk -F':' '/^UUID:/ {print $2}' | xargs -I {} VBoxManage closemedium disk "{}" --delete
- name: Remove all Virtualbox Networks
if: always() # do this even if a step before has failed
run: VBoxManage list hostonlyifs | grep '^Name:' | awk '{print $2}' | grep '^vboxnet' | xargs -I {} VBoxManage hostonlyif remove {}
- name: Remove Virtualbox network config
if: always() # do this even if a step before has failed
run: sudo rm /etc/vbox/networks.conf || true
- name: Configure VirtualBox - name: Configure VirtualBox
run: |- run: |-
sudo mkdir -p /etc/vbox sudo mkdir -p /etc/vbox
@@ -58,19 +30,35 @@ jobs:
* fdad:bad:ba55::/64 * fdad:bad:ba55::/64
EOF EOF
- name: Cache pip
uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # 3.0.11
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('./requirements.txt') }}
restore-keys: |
${{ runner.os }}-pip-
- name: Cache Vagrant boxes
uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # 3.0.11
with:
path: |
~/.vagrant.d/boxes
key: vagrant-boxes-${{ hashFiles('**/molecule.yml') }}
restore-keys: |
vagrant-boxes
- name: Download Vagrant boxes for all scenarios
# To save some cache space, all scenarios share the same cache key.
# On the other hand, this means that the cache contents should be
# the same across all scenarios. This step ensures that.
run: ./.github/download-boxes.sh
- name: Set up Python ${{ env.PYTHON_VERSION }} - name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # 6.0.0 uses: actions/setup-python@75f3110429a8c05be0e1bf360334e4cced2b63fa # 2.3.3
with: with:
python-version: ${{ env.PYTHON_VERSION }} python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies cache: 'pip' # caching pip dependencies
- name: Restore vagrant Boxes cache
uses: actions/cache/restore@6849a6489940f00c2f30c0fb92c6274307ccb58a # 4.1.2
with:
path: ~/.vagrant.d/boxes
key: vagrant-boxes-${{ hashFiles('**/molecule.yml') }}
fail-on-cache-miss: true
- name: Install dependencies - name: Install dependencies
run: | run: |
echo "::group::Upgrade pip" echo "::group::Upgrade pip"
@@ -87,40 +75,18 @@ jobs:
env: env:
ANSIBLE_K3S_LOG_DIR: ${{ runner.temp }}/logs/k3s-ansible/${{ matrix.scenario }} ANSIBLE_K3S_LOG_DIR: ${{ runner.temp }}/logs/k3s-ansible/${{ matrix.scenario }}
ANSIBLE_SSH_RETRIES: 4 ANSIBLE_SSH_RETRIES: 4
ANSIBLE_TIMEOUT: 120 ANSIBLE_TIMEOUT: 60
PY_COLORS: 1 PY_COLORS: 1
ANSIBLE_FORCE_COLOR: 1 ANSIBLE_FORCE_COLOR: 1
# these steps are necessary if not using ephemeral nodes
- name: Delete old Vagrant box versions
if: always() # do this even if a step before has failed
run: vagrant box prune --force
- name: Remove all local Vagrant boxes
if: always() # do this even if a step before has failed
run: if vagrant box list 2>/dev/null; then vagrant box list | cut -f 1 -d ' ' | xargs -L 1 vagrant box remove -f 2>/dev/null && echo "All Vagrant boxes removed." || echo "No Vagrant boxes found."; else echo "No Vagrant boxes found."; fi
- name: Remove all Virtualbox VMs
if: always() # do this even if a step before has failed
run: VBoxManage list vms | awk -F'"' '{print $2}' | xargs -I {} VBoxManage unregistervm --delete "{}"
- name: Remove all Virtualbox HDs
if: always() # do this even if a step before has failed
run: VBoxManage list hdds | awk -F':' '/^UUID:/ {print $2}' | xargs -I {} VBoxManage closemedium disk "{}" --delete
- name: Remove all Virtualbox Networks
if: always() # do this even if a step before has failed
run: VBoxManage list hostonlyifs | grep '^Name:' | awk '{print $2}' | grep '^vboxnet' | xargs -I {} VBoxManage hostonlyif remove {}
- name: Remove Virtualbox network config
if: always() # do this even if a step before has failed
run: sudo rm /etc/vbox/networks.conf || true
- name: Upload log files - name: Upload log files
if: always() # do this even if a step before has failed if: always() # do this even if a step before has failed
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # 4.4.3 uses: actions/upload-artifact@83fd05a356d7e2593de66fc9913b3002723633cb # 3.1.1
with: with:
name: logs name: logs
path: | path: |
${{ runner.temp }}/logs ${{ runner.temp }}/logs
overwrite: true
- name: Delete old box versions
if: always() # do this even if a step before has failed
run: vagrant box prune --force

1
.gitignore vendored
View File

@@ -1,4 +1,3 @@
.env/ .env/
*.log *.log
ansible.cfg ansible.cfg
kubeconfig

View File

@@ -1,7 +1,7 @@
--- ---
repos: repos:
- repo: https://github.com/pre-commit/pre-commit-hooks - repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.5.0 rev: f71fa2c1f9cf5cb705f73dffe4b21f7c61470ba9 # frozen: v4.4.0
hooks: hooks:
- id: requirements-txt-fixer - id: requirements-txt-fixer
- id: sort-simple-yaml - id: sort-simple-yaml
@@ -12,24 +12,24 @@ repos:
- id: trailing-whitespace - id: trailing-whitespace
args: [--markdown-linebreak-ext=md] args: [--markdown-linebreak-ext=md]
- repo: https://github.com/adrienverge/yamllint.git - repo: https://github.com/adrienverge/yamllint.git
rev: v1.33.0 rev: b05e028c5881819161d11cb543fd96a30c06cceb # frozen: v1.32.0
hooks: hooks:
- id: yamllint - id: yamllint
args: [-c=.yamllint] args: [-c=.yamllint]
- repo: https://github.com/ansible-community/ansible-lint.git - repo: https://github.com/ansible-community/ansible-lint.git
rev: v6.22.2 rev: 3293b64b939c0de16ef8cb81dd49255e475bf89a # frozen: v6.17.2
hooks: hooks:
- id: ansible-lint - id: ansible-lint
- repo: https://github.com/shellcheck-py/shellcheck-py - repo: https://github.com/shellcheck-py/shellcheck-py
rev: v0.9.0.6 rev: 375289a39f5708101b1f916eb729e8d6da96993f # frozen: v0.9.0.5
hooks: hooks:
- id: shellcheck - id: shellcheck
- repo: https://github.com/Lucas-C/pre-commit-hooks - repo: https://github.com/Lucas-C/pre-commit-hooks
rev: v1.5.4 rev: 12885e376b93dc4536ad68d156065601e4433665 # frozen: v1.5.1
hooks: hooks:
- id: remove-crlf - id: remove-crlf
- id: remove-tabs - id: remove-tabs
- repo: https://github.com/sirosen/texthooks - repo: https://github.com/sirosen/texthooks
rev: 0.6.4 rev: c4ffd3e31669dd4fa4d31a23436cc13839730084 # frozen: 0.5.0
hooks: hooks:
- id: fix-smartquotes - id: fix-smartquotes

View File

@@ -2,19 +2,8 @@
extends: default extends: default
rules: rules:
comments:
min-spaces-from-content: 1
comments-indentation: false
braces:
max-spaces-inside: 1
octal-values:
forbid-implicit-octal: true
forbid-explicit-octal: true
line-length: line-length:
max: 120 max: 120
level: warning level: warning
truthy: truthy:
allowed-values: ["true", "false"] allowed-values: ['true', 'false', 'yes', 'no']
ignore:
- galaxy.yml

112
README.md
View File

@@ -96,102 +96,16 @@ ansible-playbook reset.yml -i inventory/my-cluster/hosts.ini
To copy your `kube config` locally so that you can access your **Kubernetes** cluster run: To copy your `kube config` locally so that you can access your **Kubernetes** cluster run:
```bash ```bash
scp debian@master_ip:/etc/rancher/k3s/k3s.yaml ~/.kube/config scp debian@master_ip:~/.kube/config ~/.kube/config
``` ```
If you get file Permission denied, go into the node and temporarly run:
```bash
sudo chmod 777 /etc/rancher/k3s/k3s.yaml
```
Then copy with the scp command and reset the permissions back to:
```bash
sudo chmod 600 /etc/rancher/k3s/k3s.yaml
```
You'll then want to modify the config to point to master IP by running:
```bash
sudo nano ~/.kube/config
```
Then change `server: https://127.0.0.1:6443` to match your master IP: `server: https://192.168.1.222:6443`
### 🔨 Testing your cluster ### 🔨 Testing your cluster
See the commands [here](https://technotim.live/posts/k3s-etcd-ansible/#testing-your-cluster). See the commands [here](https://technotim.live/posts/k3s-etcd-ansible/#testing-your-cluster).
### Variables
| Role(s) | Variable | Type | Default | Required | Description |
|---|---|---|---|---|---|
| `download` | `k3s_version` | string | ❌ | Required | K3s binaries version |
| `k3s_agent`, `k3s_server`, `k3s_server_post` | `apiserver_endpoint` | string | ❌ | Required | Virtual ip-address configured on each master |
| `k3s_agent` | `extra_agent_args` | string | `null` | Not required | Extra arguments for agents nodes |
| `k3s_agent`, `k3s_server` | `group_name_master` | string | `null` | Not required | Name othe master group |
| `k3s_agent` | `k3s_token` | string | `null` | Not required | Token used to communicate between masters |
| `k3s_agent`, `k3s_server` | `proxy_env` | dict | `null` | Not required | Internet proxy configurations |
| `k3s_agent`, `k3s_server` | `proxy_env.HTTP_PROXY` | string | ❌ | Required | HTTP internet proxy |
| `k3s_agent`, `k3s_server` | `proxy_env.HTTPS_PROXY` | string | ❌ | Required | HTTP internet proxy |
| `k3s_agent`, `k3s_server` | `proxy_env.NO_PROXY` | string | ❌ | Required | Addresses that will not use the proxies |
| `k3s_agent`, `k3s_server`, `reset` | `systemd_dir` | string | `/etc/systemd/system` | Not required | Path to systemd services |
| `k3s_custom_registries` | `custom_registries_yaml` | string | ❌ | Required | YAML block defining custom registries. The following is an example that pulls all images used in this playbook through your private registries. It also allows you to pull your own images from your private registry, without having to use imagePullSecrets in your deployments. If all you need is your own images and you don't care about caching the docker/quay/ghcr.io images, you can just remove those from the mirrors: section. |
| `k3s_server`, `k3s_server_post` | `cilium_bgp` | bool | `~` | Not required | Enable cilium BGP control plane for LB services and pod cidrs. Disables the use of MetalLB. |
| `k3s_server`, `k3s_server_post` | `cilium_iface` | string | ❌ | Not required | The network interface used for when Cilium is enabled |
| `k3s_server` | `extra_server_args` | string | `""` | Not required | Extra arguments for server nodes |
| `k3s_server` | `k3s_create_kubectl_symlink` | bool | `false` | Not required | Create the kubectl -> k3s symlink |
| `k3s_server` | `k3s_create_crictl_symlink` | bool | `true` | Not required | Create the crictl -> k3s symlink |
| `k3s_server` | `kube_vip_arp` | bool | `true` | Not required | Enables kube-vip ARP broadcasts |
| `k3s_server` | `kube_vip_bgp` | bool | `false` | Not required | Enables kube-vip BGP peering |
| `k3s_server` | `kube_vip_bgp_routerid` | string | `"127.0.0.1"` | Not required | Defines the router ID for the kube-vip BGP server |
| `k3s_server` | `kube_vip_bgp_as` | string | `"64513"` | Not required | Defines the AS for the kube-vip BGP server |
| `k3s_server` | `kube_vip_bgp_peeraddress` | string | `"192.168.30.1"` | Not required | Defines the address for the kube-vip BGP peer |
| `k3s_server` | `kube_vip_bgp_peeras` | string | `"64512"` | Not required | Defines the AS for the kube-vip BGP peer |
| `k3s_server` | `kube_vip_bgp_peers` | list | `[]` | Not required | List of BGP peer ASN & address pairs |
| `k3s_server` | `kube_vip_bgp_peers_groups` | list | `['k3s_master']` | Not required | Inventory group in which to search for additional `kube_vip_bgp_peers` parameters to merge. |
| `k3s_server` | `kube_vip_iface` | string | `~` | Not required | Explicitly define an interface that ALL control nodes should use to propagate the VIP, define it here. Otherwise, kube-vip will determine the right interface automatically at runtime. |
| `k3s_server` | `kube_vip_tag_version` | string | `v0.7.2` | Not required | Image tag for kube-vip |
| `k3s_server` | `kube_vip_cloud_provider_tag_version` | string | `main` | Not required | Tag for kube-vip-cloud-provider manifest when enable |
| `k3s_server`, `k3_server_post` | `kube_vip_lb_ip_range` | string | `~` | Not required | IP range for kube-vip load balancer |
| `k3s_server`, `k3s_server_post` | `metal_lb_controller_tag_version` | string | `v0.14.3` | Not required | Image tag for MetalLB |
| `k3s_server` | `metal_lb_speaker_tag_version` | string | `v0.14.3` | Not required | Image tag for MetalLB |
| `k3s_server` | `metal_lb_type` | string | `native` | Not required | Use FRR mode or native. Valid values are `frr` and `native` |
| `k3s_server` | `retry_count` | int | `20` | Not required | Amount of retries when verifying that nodes joined |
| `k3s_server` | `server_init_args` | string | ❌ | Not required | Arguments for server nodes |
| `k3s_server_post` | `bpf_lb_algorithm` | string | `maglev` | Not required | BPF lb algorithm |
| `k3s_server_post` | `bpf_lb_mode` | string | `hybrid` | Not required | BPF lb mode |
| `k3s_server_post` | `calico_blocksize` | int | `26` | Not required | IP pool block size |
| `k3s_server_post` | `calico_ebpf` | bool | `false` | Not required | Use eBPF dataplane instead of iptables |
| `k3s_server_post` | `calico_encapsulation` | string | `VXLANCrossSubnet` | Not required | IP pool encapsulation |
| `k3s_server_post` | `calico_natOutgoing` | string | `Enabled` | Not required | IP pool NAT outgoing |
| `k3s_server_post` | `calico_nodeSelector` | string | `all()` | Not required | IP pool node selector |
| `k3s_server_post` | `calico_iface` | string | `~` | Not required | The network interface used for when Calico is enabled |
| `k3s_server_post` | `calico_tag` | string | `v3.27.2` | Not required | Calico version tag |
| `k3s_server_post` | `cilium_bgp_my_asn` | int | `64513` | Not required | Local ASN for BGP peer |
| `k3s_server_post` | `cilium_bgp_peer_asn` | int | `64512` | Not required | BGP peer ASN |
| `k3s_server_post` | `cilium_bgp_peer_address` | string | `~` | Not required | BGP peer address |
| `k3s_server_post` | `cilium_bgp_neighbors` | list | `[]` | Not required | List of BGP peer ASN & address pairs |
| `k3s_server_post` | `cilium_bgp_neighbors_groups` | list | `['k3s_all']` | Not required | Inventory group in which to search for additional `cilium_bgp_neighbors` parameters to merge. |
| `k3s_server_post` | `cilium_bgp_lb_cidr` | string | `192.168.31.0/24` | Not required | BGP load balancer IP range |
| `k3s_server_post` | `cilium_exportPodCIDR` | bool | `true` | Not required | Export pod CIDR |
| `k3s_server_post` | `cilium_hubble` | bool | `true` | Not required | Enable Cilium Hubble |
| `k3s_server_post` | `cilium_hubble` | bool | `true` | Not required | Enable Cilium Hubble |
| `k3s_server_post` | `cilium_mode` | string | `native` | Not required | Inner-node communication mode (choices are `native` and `routed`) |
| `k3s_server_post` | `cluster_cidr` | string | `10.52.0.0/16` | Not required | Inner-cluster IP range |
| `k3s_server_post` | `enable_bpf_masquerade` | bool | `true` | Not required | Use IP masquerading |
| `k3s_server_post` | `kube_proxy_replacement` | bool | `true` | Not required | Replace the native kube-proxy with Cilium |
| `k3s_server_post` | `metal_lb_available_timeout` | string | `240s` | Not required | Wait for MetalLB resources |
| `k3s_server_post` | `metal_lb_ip_range` | string | `192.168.30.80-192.168.30.90` | Not required | MetalLB ip range for load balancer |
| `k3s_server_post` | `metal_lb_controller_tag_version` | string | `v0.14.3` | Not required | Image tag for MetalLB |
| `k3s_server_post` | `metal_lb_mode` | string | `layer2` | Not required | Metallb mode (choices are `bgp` and `layer2`) |
| `k3s_server_post` | `metal_lb_bgp_my_asn` | string | `~` | Not required | BGP ASN configurations |
| `k3s_server_post` | `metal_lb_bgp_peer_asn` | string | `~` | Not required | BGP peer ASN configurations |
| `k3s_server_post` | `metal_lb_bgp_peer_address` | string | `~` | Not required | BGP peer address |
| `lxc` | `custom_reboot_command` | string | `~` | Not required | Command to run on reboot |
| `prereq` | `system_timezone` | string | `null` | Not required | Timezone to be set on all nodes |
| `proxmox_lxc`, `reset_proxmox_lxc` | `proxmox_lxc_ct_ids` | list | ❌ | Required | Proxmox container ID list |
| `raspberrypi` | `state` | string | `present` | Not required | Indicates whether the k3s prerequisites for Raspberry Pi should be set up (possible values are `present` and `absent`) |
### Troubleshooting ### Troubleshooting
Be sure to see [this post](https://github.com/timothystewart6/k3s-ansible/discussions/20) on how to troubleshoot common problems Be sure to see [this post](https://github.com/techno-tim/k3s-ansible/discussions/20) on how to troubleshoot common problems
### Testing the playbook using molecule ### Testing the playbook using molecule
@@ -204,28 +118,6 @@ You can find more information about it [here](molecule/README.md).
This repo uses `pre-commit` and `pre-commit-hooks` to lint and fix common style and syntax errors. Be sure to install python packages and then run `pre-commit install`. For more information, see [pre-commit](https://pre-commit.com/) This repo uses `pre-commit` and `pre-commit-hooks` to lint and fix common style and syntax errors. Be sure to install python packages and then run `pre-commit install`. For more information, see [pre-commit](https://pre-commit.com/)
## 🌌 Ansible Galaxy
This collection can now be used in larger ansible projects.
Instructions:
- create or modify a file `collections/requirements.yml` in your project
```yml
collections:
- name: ansible.utils
- name: community.general
- name: ansible.posix
- name: kubernetes.core
- name: https://github.com/timothystewart6/k3s-ansible.git
type: git
version: master
```
- install via `ansible-galaxy collection install -r ./collections/requirements.yml`
- every role is now available via the prefix `techno_tim.k3s_ansible.` e.g. `techno_tim.k3s_ansible.lxc`
## Thanks 🤝 ## Thanks 🤝
This repo is really standing on the shoulders of giants. Thank you to all those who have contributed and thanks to these repos for code and ideas: This repo is really standing on the shoulders of giants. Thank you to all those who have contributed and thanks to these repos for code and ideas:

View File

@@ -1,81 +0,0 @@
### REQUIRED
# The namespace of the collection. This can be a company/brand/organization or product namespace under which all
# content lives. May only contain alphanumeric lowercase characters and underscores. Namespaces cannot start with
# underscores or numbers and cannot contain consecutive underscores
namespace: techno_tim
# The name of the collection. Has the same character restrictions as 'namespace'
name: k3s_ansible
# The version of the collection. Must be compatible with semantic versioning
version: 1.0.0
# The path to the Markdown (.md) readme file. This path is relative to the root of the collection
readme: README.md
# A list of the collection's content authors. Can be just the name or in the format 'Full Name <email> (url)
# @nicks:irc/im.site#channel'
authors:
- your name <example@domain.com>
### OPTIONAL but strongly recommended
# A short summary description of the collection
description: >
The easiest way to bootstrap a self-hosted High Availability Kubernetes
cluster. A fully automated HA k3s etcd install with kube-vip, MetalLB,
and more.
# Either a single license or a list of licenses for content inside of a collection. Ansible Galaxy currently only
# accepts L(SPDX,https://spdx.org/licenses/) licenses. This key is mutually exclusive with 'license_file'
license:
- Apache-2.0
# A list of tags you want to associate with the collection for indexing/searching. A tag name has the same character
# requirements as 'namespace' and 'name'
tags:
- etcd
- high-availability
- k8s
- k3s
- k3s-cluster
- kube-vip
- kubernetes
- metallb
- rancher
# Collections that this collection requires to be installed for it to be usable. The key of the dict is the
# collection label 'namespace.name'. The value is a version range
# L(specifiers,https://python-semanticversion.readthedocs.io/en/latest/#requirement-specification). Multiple version
# range specifiers can be set and are separated by ','
dependencies:
ansible.utils: '*'
ansible.posix: '*'
community.general: '*'
kubernetes.core: '*'
# The URL of the originating SCM repository
repository: https://github.com/timothystewart6/k3s-ansible
# The URL to any online docs
documentation: https://github.com/timothystewart6/k3s-ansible
# The URL to the homepage of the collection/project
homepage: https://www.youtube.com/watch?v=CbkEWcUZ7zM
# The URL to the collection issue tracker
issues: https://github.com/timothystewart6/k3s-ansible/issues
# A list of file glob-like patterns used to filter any files or directories that should not be included in the build
# artifact. A pattern is matched from the relative path of the file or directory of the collection directory. This
# uses 'fnmatch' to match the files or directories. Some directories and files like 'galaxy.yml', '*.pyc', '*.retry',
# and '.git' are always filtered. Mutually exclusive with 'manifest'
build_ignore: []
# A dict controlling use of manifest directives used in building the collection artifact. The key 'directives' is a
# list of MANIFEST.in style
# L(directives,https://packaging.python.org/en/latest/guides/using-manifest-in/#manifest-in-commands). The key
# 'omit_default_directives' is a boolean that controls whether the default directives are used. Mutually exclusive
# with 'build_ignore'
# manifest: null

View File

@@ -1,103 +1,53 @@
--- ---
k3s_version: v1.30.2+k3s2 k3s_version: v1.26.11+k3s2
# this is the user that has ssh access to these machines # this is the user that has ssh access to these machines
ansible_user: ansibleuser ansible_user: ansibleuser
systemd_dir: /etc/systemd/system systemd_dir: /etc/systemd/system
# Set your timezone # Set your timezone
system_timezone: Your/Timezone system_timezone: "Your/Timezone"
# interface which will be used for flannel # interface which will be used for flannel
flannel_iface: eth0 flannel_iface: "eth0"
# uncomment calico_iface to use tigera operator/calico cni instead of flannel https://docs.tigera.io/calico/latest/about
# calico_iface: "eth0"
calico_ebpf: false # use eBPF dataplane instead of iptables
calico_tag: v3.28.0 # calico version tag
# uncomment cilium_iface to use cilium cni instead of flannel or calico
# ensure v4.19.57, v5.1.16, v5.2.0 or more recent kernel
# cilium_iface: "eth0"
cilium_mode: native # native when nodes on same subnet or using bgp, else set routed
cilium_tag: v1.16.0 # cilium version tag
cilium_hubble: true # enable hubble observability relay and ui
# if using calico or cilium, you may specify the cluster pod cidr pool
cluster_cidr: 10.52.0.0/16
# enable cilium bgp control plane for lb services and pod cidrs. disables metallb.
cilium_bgp: false
# bgp parameters for cilium cni. only active when cilium_iface is defined and cilium_bgp is true.
cilium_bgp_my_asn: "64513"
cilium_bgp_peer_asn: "64512"
cilium_bgp_peer_address: 192.168.30.1
cilium_bgp_lb_cidr: 192.168.31.0/24 # cidr for cilium loadbalancer ipam
# enable kube-vip ARP broadcasts
kube_vip_arp: true
# enable kube-vip BGP peering
kube_vip_bgp: false
# bgp parameters for kube-vip
kube_vip_bgp_routerid: "127.0.0.1" # Defines the router ID for the BGP server
kube_vip_bgp_as: "64513" # Defines the AS for the BGP server
kube_vip_bgp_peeraddress: "192.168.30.1" # Defines the address for the BGP peer
kube_vip_bgp_peeras: "64512" # Defines the AS for the BGP peer
# apiserver_endpoint is virtual ip-address which will be configured on each master # apiserver_endpoint is virtual ip-address which will be configured on each master
apiserver_endpoint: 192.168.30.222 apiserver_endpoint: "192.168.30.222"
# k3s_token is required masters can talk together securely # k3s_token is required masters can talk together securely
# this token should be alpha numeric only # this token should be alpha numeric only
k3s_token: some-SUPER-DEDEUPER-secret-password k3s_token: "some-SUPER-DEDEUPER-secret-password"
# The IP on which the node is reachable in the cluster. # The IP on which the node is reachable in the cluster.
# Here, a sensible default is provided, you can still override # Here, a sensible default is provided, you can still override
# it for each of your hosts, though. # it for each of your hosts, though.
k3s_node_ip: "{{ ansible_facts[(cilium_iface | default(calico_iface | default(flannel_iface)))]['ipv4']['address'] }}" k3s_node_ip: '{{ ansible_facts[flannel_iface]["ipv4"]["address"] }}'
# Disable the taint manually by setting: k3s_master_taint = false # Disable the taint manually by setting: k3s_master_taint = false
k3s_master_taint: "{{ true if groups['node'] | default([]) | length >= 1 else false }}" k3s_master_taint: "{{ true if groups['node'] | default([]) | length >= 1 else false }}"
# these arguments are recommended for servers as well as agents: # these arguments are recommended for servers as well as agents:
extra_args: >- extra_args: >-
{{ '--flannel-iface=' + flannel_iface if calico_iface is not defined and cilium_iface is not defined else '' }} --flannel-iface={{ flannel_iface }}
--node-ip={{ k3s_node_ip }} --node-ip={{ k3s_node_ip }}
# change these to your liking, the only required are: --disable servicelb, --tls-san {{ apiserver_endpoint }} # change these to your liking, the only required are: --disable servicelb, --tls-san {{ apiserver_endpoint }}
# the contents of the if block is also required if using calico or cilium
extra_server_args: >- extra_server_args: >-
{{ extra_args }} {{ extra_args }}
{{ '--node-taint node-role.kubernetes.io/master=true:NoSchedule' if k3s_master_taint else '' }} {{ '--node-taint node-role.kubernetes.io/master=true:NoSchedule' if k3s_master_taint else '' }}
{% if calico_iface is defined or cilium_iface is defined %}
--flannel-backend=none
--disable-network-policy
--cluster-cidr={{ cluster_cidr | default('10.52.0.0/16') }}
{% endif %}
--tls-san {{ apiserver_endpoint }} --tls-san {{ apiserver_endpoint }}
--disable servicelb --disable servicelb
--disable traefik --disable traefik
extra_agent_args: >- extra_agent_args: >-
{{ extra_args }} {{ extra_args }}
# image tag for kube-vip # image tag for kube-vip
kube_vip_tag_version: v0.8.2 kube_vip_tag_version: "v0.5.12"
# tag for kube-vip-cloud-provider manifest
# kube_vip_cloud_provider_tag_version: "main"
# kube-vip ip range for load balancer
# (uncomment to use kube-vip for services instead of MetalLB)
# kube_vip_lb_ip_range: "192.168.30.80-192.168.30.90"
# metallb type frr or native # metallb type frr or native
metal_lb_type: native metal_lb_type: "native"
# metallb mode layer2 or bgp # metallb mode layer2 or bgp
metal_lb_mode: layer2 metal_lb_mode: "layer2"
# bgp options # bgp options
# metal_lb_bgp_my_asn: "64513" # metal_lb_bgp_my_asn: "64513"
@@ -105,20 +55,20 @@ metal_lb_mode: layer2
# metal_lb_bgp_peer_address: "192.168.30.1" # metal_lb_bgp_peer_address: "192.168.30.1"
# image tag for metal lb # image tag for metal lb
metal_lb_speaker_tag_version: v0.14.8 metal_lb_speaker_tag_version: "v0.13.9"
metal_lb_controller_tag_version: v0.14.8 metal_lb_controller_tag_version: "v0.13.9"
# metallb ip range for load balancer # metallb ip range for load balancer
metal_lb_ip_range: 192.168.30.80-192.168.30.90 metal_lb_ip_range: "192.168.30.80-192.168.30.90"
# Only enable if your nodes are proxmox LXC nodes, make sure to configure your proxmox nodes # Only enable if your nodes are proxmox LXC nodes, make sure to configure your proxmox nodes
# in your hosts.ini file. # in your hosts.ini file.
# Please read https://gist.github.com/triangletodd/02f595cd4c0dc9aac5f7763ca2264185 before using this. # Please read https://gist.github.com/triangletodd/02f595cd4c0dc9aac5f7763ca2264185 before using this.
# Most notably, your containers must be privileged, and must not have nesting set to true. # Most notably, your containers must be privileged, and must not have nesting set to true.
# Please note this script disables most of the security of lxc containers, with the trade off being that lxc # Please note this script disables most of the security of lxc containers, with the trade off being that lxc
# containers are significantly more resource efficient compared to full VMs. # containers are significantly more resource efficent compared to full VMs.
# Mixing and matching VMs and lxc containers is not supported, ymmv if you want to do this. # Mixing and matching VMs and lxc containers is not supported, ymmv if you want to do this.
# I would only really recommend using this if you have particularly low powered proxmox nodes where the overhead of # I would only really recommend using this if you have partiularly low powered proxmox nodes where the overhead of
# VMs would use a significant portion of your available resources. # VMs would use a significant portion of your available resources.
proxmox_lxc_configure: false proxmox_lxc_configure: false
# the user that you would use to ssh into the host, for example if you run ssh some-user@my-proxmox-host, # the user that you would use to ssh into the host, for example if you run ssh some-user@my-proxmox-host,
@@ -172,10 +122,6 @@ custom_registries_yaml: |
username: yourusername username: yourusername
password: yourpassword password: yourpassword
# On some distros like Diet Pi, there is no dbus installed. dbus required by the default reboot command.
# Uncomment if you need a custom reboot command
# custom_reboot_command: /usr/sbin/shutdown -r now
# Only enable and configure these if you access the internet through a proxy # Only enable and configure these if you access the internet through a proxy
# proxy_env: # proxy_env:
# HTTP_PROXY: "http://proxy.domain.local:3128" # HTTP_PROXY: "http://proxy.domain.local:3128"

View File

@@ -1,2 +1,2 @@
--- ---
ansible_user: "{{ proxmox_lxc_ssh_user }}" ansible_user: '{{ proxmox_lxc_ssh_user }}'

View File

@@ -13,12 +13,6 @@ We have these scenarios:
To save a bit of test time, this cluster is _not_ highly available, it consists of only one control and one worker node. To save a bit of test time, this cluster is _not_ highly available, it consists of only one control and one worker node.
- **single_node**: - **single_node**:
Very similar to the default scenario, but uses only a single node for all cluster functionality. Very similar to the default scenario, but uses only a single node for all cluster functionality.
- **calico**:
The same as single node, but uses calico cni instead of flannel.
- **cilium**:
The same as single node, but uses cilium cni instead of flannel.
- **kube-vip**
The same as single node, but uses kube-vip as service loadbalancer instead of MetalLB
## How to execute ## How to execute

View File

@@ -1,49 +0,0 @@
---
dependency:
name: galaxy
driver:
name: vagrant
platforms:
- name: control1
box: generic/ubuntu2204
memory: 4096
cpus: 4
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: vagrant
ssh.password: vagrant
groups:
- k3s_cluster
- master
interfaces:
- network_name: private_network
ip: 192.168.30.62
provisioner:
name: ansible
env:
ANSIBLE_VERBOSITY: 1
playbooks:
converge: ../resources/converge.yml
side_effect: ../resources/reset.yml
verify: ../resources/verify.yml
inventory:
links:
group_vars: ../../inventory/sample/group_vars
scenario:
test_sequence:
- dependency
- cleanup
- destroy
- syntax
- create
- prepare
- converge
# idempotence is not possible with the playbook in its current form.
- verify
# We are repurposing side_effect here to test the reset playbook.
# This is why we do not run it before verify (which tests the cluster),
# but after the verify step.
- side_effect
- cleanup
- destroy

View File

@@ -1,16 +0,0 @@
---
- name: Apply overrides
hosts: all
tasks:
- name: Override host variables
ansible.builtin.set_fact:
# See:
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
calico_iface: eth1
# The test VMs might be a bit slow, so we give them more time to join the cluster:
retry_count: 45
# Make sure that our IP ranges do not collide with those of the other scenarios
apiserver_endpoint: 192.168.30.224
metal_lb_ip_range: 192.168.30.100-192.168.30.109

View File

@@ -1,49 +0,0 @@
---
dependency:
name: galaxy
driver:
name: vagrant
platforms:
- name: control1
box: generic/ubuntu2204
memory: 4096
cpus: 4
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: vagrant
ssh.password: vagrant
groups:
- k3s_cluster
- master
interfaces:
- network_name: private_network
ip: 192.168.30.63
provisioner:
name: ansible
env:
ANSIBLE_VERBOSITY: 1
playbooks:
converge: ../resources/converge.yml
side_effect: ../resources/reset.yml
verify: ../resources/verify.yml
inventory:
links:
group_vars: ../../inventory/sample/group_vars
scenario:
test_sequence:
- dependency
- cleanup
- destroy
- syntax
- create
- prepare
- converge
# idempotence is not possible with the playbook in its current form.
- verify
# We are repurposing side_effect here to test the reset playbook.
# This is why we do not run it before verify (which tests the cluster),
# but after the verify step.
- side_effect
- cleanup
- destroy

View File

@@ -1,16 +0,0 @@
---
- name: Apply overrides
hosts: all
tasks:
- name: Override host variables
ansible.builtin.set_fact:
# See:
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
cilium_iface: eth1
# The test VMs might be a bit slow, so we give them more time to join the cluster:
retry_count: 45
# Make sure that our IP ranges do not collide with those of the other scenarios
apiserver_endpoint: 192.168.30.225
metal_lb_ip_range: 192.168.30.110-192.168.30.119

View File

@@ -4,9 +4,10 @@ dependency:
driver: driver:
name: vagrant name: vagrant
platforms: platforms:
- name: control1 - name: control1
box: generic/ubuntu2204 box: generic/ubuntu2204
memory: 1024 memory: 2048
cpus: 2 cpus: 2
groups: groups:
- k3s_cluster - k3s_cluster
@@ -17,12 +18,12 @@ platforms:
config_options: config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04, # We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405 # see: https://github.com/chef/bento/issues/1405
ssh.username: vagrant ssh.username: "vagrant"
ssh.password: vagrant ssh.password: "vagrant"
- name: control2 - name: control2
box: generic/debian12 box: generic/debian11
memory: 1024 memory: 2048
cpus: 2 cpus: 2
groups: groups:
- k3s_cluster - k3s_cluster
@@ -33,7 +34,7 @@ platforms:
- name: control3 - name: control3
box: generic/rocky9 box: generic/rocky9
memory: 1024 memory: 2048
cpus: 2 cpus: 2
groups: groups:
- k3s_cluster - k3s_cluster
@@ -44,7 +45,7 @@ platforms:
- name: node1 - name: node1
box: generic/ubuntu2204 box: generic/ubuntu2204
memory: 1024 memory: 2048
cpus: 2 cpus: 2
groups: groups:
- k3s_cluster - k3s_cluster
@@ -55,12 +56,12 @@ platforms:
config_options: config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04, # We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405 # see: https://github.com/chef/bento/issues/1405
ssh.username: vagrant ssh.username: "vagrant"
ssh.password: vagrant ssh.password: "vagrant"
- name: node2 - name: node2
box: generic/rocky9 box: generic/rocky9
memory: 1024 memory: 2048
cpus: 2 cpus: 2
groups: groups:
- k3s_cluster - k3s_cluster
@@ -71,8 +72,6 @@ platforms:
provisioner: provisioner:
name: ansible name: ansible
env:
ANSIBLE_VERBOSITY: 1
playbooks: playbooks:
converge: ../resources/converge.yml converge: ../resources/converge.yml
side_effect: ../resources/reset.yml side_effect: ../resources/reset.yml
@@ -83,6 +82,7 @@ provisioner:
scenario: scenario:
test_sequence: test_sequence:
- dependency - dependency
- lint
- cleanup - cleanup
- destroy - destroy
- syntax - syntax

View File

@@ -17,6 +17,6 @@
# and security needs. # and security needs.
ansible.builtin.systemd: ansible.builtin.systemd:
name: firewalld name: firewalld
enabled: false enabled: no
state: stopped state: stopped
become: true become: true

View File

@@ -6,7 +6,7 @@ driver:
platforms: platforms:
- name: control1 - name: control1
box: generic/ubuntu2204 box: generic/ubuntu2204
memory: 1024 memory: 2048
cpus: 2 cpus: 2
groups: groups:
- k3s_cluster - k3s_cluster
@@ -17,12 +17,12 @@ platforms:
config_options: config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04, # We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405 # see: https://github.com/chef/bento/issues/1405
ssh.username: vagrant ssh.username: "vagrant"
ssh.password: vagrant ssh.password: "vagrant"
- name: control2 - name: control2
box: generic/ubuntu2204 box: generic/ubuntu2204
memory: 1024 memory: 2048
cpus: 2 cpus: 2
groups: groups:
- k3s_cluster - k3s_cluster
@@ -33,12 +33,12 @@ platforms:
config_options: config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04, # We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405 # see: https://github.com/chef/bento/issues/1405
ssh.username: vagrant ssh.username: "vagrant"
ssh.password: vagrant ssh.password: "vagrant"
- name: node1 - name: node1
box: generic/ubuntu2204 box: generic/ubuntu2204
memory: 1024 memory: 2048
cpus: 2 cpus: 2
groups: groups:
- k3s_cluster - k3s_cluster
@@ -49,12 +49,10 @@ platforms:
config_options: config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04, # We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405 # see: https://github.com/chef/bento/issues/1405
ssh.username: vagrant ssh.username: "vagrant"
ssh.password: vagrant ssh.password: "vagrant"
provisioner: provisioner:
name: ansible name: ansible
env:
ANSIBLE_VERBOSITY: 1
playbooks: playbooks:
converge: ../resources/converge.yml converge: ../resources/converge.yml
side_effect: ../resources/reset.yml side_effect: ../resources/reset.yml
@@ -65,6 +63,7 @@ provisioner:
scenario: scenario:
test_sequence: test_sequence:
- dependency - dependency
- lint
- cleanup - cleanup
- destroy - destroy
- syntax - syntax

View File

@@ -38,7 +38,7 @@
dest: /etc/netplan/55-flannel-ipv4.yaml dest: /etc/netplan/55-flannel-ipv4.yaml
owner: root owner: root
group: root group: root
mode: "0644" mode: 0644
register: netplan_template register: netplan_template
- name: Apply netplan configuration - name: Apply netplan configuration

View File

@@ -1,49 +0,0 @@
---
dependency:
name: galaxy
driver:
name: vagrant
platforms:
- name: control1
box: generic/ubuntu2204
memory: 4096
cpus: 4
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: vagrant
ssh.password: vagrant
groups:
- k3s_cluster
- master
interfaces:
- network_name: private_network
ip: 192.168.30.62
provisioner:
name: ansible
env:
ANSIBLE_VERBOSITY: 1
playbooks:
converge: ../resources/converge.yml
side_effect: ../resources/reset.yml
verify: ../resources/verify.yml
inventory:
links:
group_vars: ../../inventory/sample/group_vars
scenario:
test_sequence:
- dependency
- cleanup
- destroy
- syntax
- create
- prepare
- converge
# idempotence is not possible with the playbook in its current form.
- verify
# We are repurposing side_effect here to test the reset playbook.
# This is why we do not run it before verify (which tests the cluster),
# but after the verify step.
- side_effect
- cleanup
- destroy

View File

@@ -1,17 +0,0 @@
---
- name: Apply overrides
hosts: all
tasks:
- name: Override host variables
ansible.builtin.set_fact:
# See:
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
flannel_iface: eth1
# The test VMs might be a bit slow, so we give them more time to join the cluster:
retry_count: 45
# Make sure that our IP ranges do not collide with those of the other scenarios
apiserver_endpoint: 192.168.30.225
# Use kube-vip instead of MetalLB
kube_vip_lb_ip_range: 192.168.30.110-192.168.30.119

View File

@@ -27,7 +27,7 @@
name: nginx name: nginx
namespace: "{{ testing_namespace }}" namespace: "{{ testing_namespace }}"
kubeconfig: "{{ kubecfg_path }}" kubeconfig: "{{ kubecfg_path }}"
vars: vars: &load_balancer_metadata
metallb_ip: status.loadBalancer.ingress[0].ip metallb_ip: status.loadBalancer.ingress[0].ip
metallb_port: spec.ports[0].port metallb_port: spec.ports[0].port
register: nginx_services register: nginx_services
@@ -35,7 +35,7 @@
- name: Assert that the nginx welcome page is available - name: Assert that the nginx welcome page is available
ansible.builtin.uri: ansible.builtin.uri:
url: http://{{ ip | ansible.utils.ipwrap }}:{{ port_ }}/ url: http://{{ ip | ansible.utils.ipwrap }}:{{ port_ }}/
return_content: true return_content: yes
register: result register: result
failed_when: "'Welcome to nginx!' not in result.content" failed_when: "'Welcome to nginx!' not in result.content"
vars: vars:

View File

@@ -9,7 +9,7 @@
ansible.builtin.assert: ansible.builtin.assert:
that: found_nodes == expected_nodes that: found_nodes == expected_nodes
success_msg: "Found nodes as expected: {{ found_nodes }}" success_msg: "Found nodes as expected: {{ found_nodes }}"
fail_msg: Expected nodes {{ expected_nodes }}, but found nodes {{ found_nodes }} fail_msg: "Expected nodes {{ expected_nodes }}, but found nodes {{ found_nodes }}"
vars: vars:
found_nodes: >- found_nodes: >-
{{ cluster_nodes | json_query('resources[*].metadata.name') | unique | sort }} {{ cluster_nodes | json_query('resources[*].metadata.name') | unique | sort }}

View File

@@ -11,8 +11,8 @@ platforms:
config_options: config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04, # We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405 # see: https://github.com/chef/bento/issues/1405
ssh.username: vagrant ssh.username: "vagrant"
ssh.password: vagrant ssh.password: "vagrant"
groups: groups:
- k3s_cluster - k3s_cluster
- master - master
@@ -21,8 +21,6 @@ platforms:
ip: 192.168.30.50 ip: 192.168.30.50
provisioner: provisioner:
name: ansible name: ansible
env:
ANSIBLE_VERBOSITY: 1
playbooks: playbooks:
converge: ../resources/converge.yml converge: ../resources/converge.yml
side_effect: ../resources/reset.yml side_effect: ../resources/reset.yml
@@ -33,6 +31,7 @@ provisioner:
scenario: scenario:
test_sequence: test_sequence:
- dependency - dependency
- lint
- cleanup - cleanup
- destroy - destroy
- syntax - syntax

View File

@@ -12,5 +12,5 @@
retry_count: 45 retry_count: 45
# Make sure that our IP ranges do not collide with those of the default scenario # Make sure that our IP ranges do not collide with those of the default scenario
apiserver_endpoint: 192.168.30.223 apiserver_endpoint: "192.168.30.223"
metal_lb_ip_range: 192.168.30.91-192.168.30.99 metal_lb_ip_range: "192.168.30.91-192.168.30.99"

View File

@@ -1,10 +1,9 @@
--- ---
- name: Reboot k3s_cluster - name: Reboot k3s_cluster
hosts: k3s_cluster hosts: k3s_cluster
gather_facts: true gather_facts: yes
tasks: tasks:
- name: Reboot the nodes (and Wait upto 5 mins max) - name: Reboot the nodes (and Wait upto 5 mins max)
become: true become: true
ansible.builtin.reboot: reboot:
reboot_command: "{{ custom_reboot_command | default(omit) }}"
reboot_timeout: 300 reboot_timeout: 300

View File

@@ -1,10 +1,10 @@
ansible-core>=2.16.2 ansible-core>=2.13.5
jmespath>=1.0.1 jmespath>=1.0.1
jsonpatch>=1.33 jsonpatch>=1.32
kubernetes>=29.0.0 kubernetes>=25.3.0
molecule-plugins[vagrant] molecule-vagrant>=1.0.0
molecule>=6.0.3 molecule>=4.0.3
netaddr>=0.10.1 netaddr>=0.8.0
pre-commit>=3.6.0 pre-commit>=2.20.0
pre-commit-hooks>=4.5.0 pre-commit-hooks>=1.3.1
pyyaml>=6.0.1 pyyaml>=6.0

View File

@@ -4,165 +4,174 @@
# #
# pip-compile requirements.in # pip-compile requirements.in
# #
ansible-compat==4.1.11 ansible-compat==3.0.1
# via molecule # via molecule
ansible-core==2.18.0 ansible-core==2.15.4
# via # via
# -r requirements.in # -r requirements.in
# ansible-compat # ansible-compat
# molecule arrow==1.2.3
attrs==23.2.0 # via jinja2-time
# via attrs==22.1.0
# jsonschema # via jsonschema
# referencing binaryornot==0.4.4
bracex==2.4 # via cookiecutter
# via wcmatch cachetools==5.2.0
cachetools==5.3.2
# via google-auth # via google-auth
certifi==2023.11.17 certifi==2022.9.24
# via # via
# kubernetes # kubernetes
# requests # requests
cffi==1.16.0 cffi==1.15.1
# via cryptography # via cryptography
cfgv==3.4.0 cfgv==3.3.1
# via pre-commit # via pre-commit
charset-normalizer==3.3.2 chardet==5.0.0
# via binaryornot
charset-normalizer==2.1.1
# via requests # via requests
click==8.1.7 click==8.1.3
# via # via
# click-help-colors # click-help-colors
# cookiecutter
# molecule # molecule
click-help-colors==0.9.4 click-help-colors==0.9.1
# via molecule # via molecule
cryptography==41.0.7 commonmark==0.9.1
# via rich
cookiecutter==2.1.1
# via molecule
cryptography==38.0.3
# via ansible-core # via ansible-core
distlib==0.3.8 distlib==0.3.6
# via virtualenv # via virtualenv
distro==1.8.0
# via selinux
enrich==1.2.7 enrich==1.2.7
# via molecule # via molecule
filelock==3.13.1 filelock==3.8.0
# via virtualenv # via virtualenv
google-auth==2.26.2 google-auth==2.14.0
# via kubernetes # via kubernetes
identify==2.5.33 identify==2.5.8
# via pre-commit # via pre-commit
idna==3.6 idna==3.4
# via requests # via requests
jinja2==3.1.3 jinja2==3.1.2
# via # via
# ansible-core # ansible-core
# cookiecutter
# jinja2-time
# molecule # molecule
# molecule-vagrant
jinja2-time==0.2.0
# via cookiecutter
jmespath==1.0.1 jmespath==1.0.1
# via -r requirements.in # via -r requirements.in
jsonpatch==1.33 jsonpatch==1.33
# via -r requirements.in # via -r requirements.in
jsonpointer==2.4 jsonpointer==2.3
# via jsonpatch # via jsonpatch
jsonschema==4.21.1 jsonschema==4.17.0
# via # via
# ansible-compat # ansible-compat
# molecule # molecule
jsonschema-specifications==2023.12.1 kubernetes==25.3.0
# via jsonschema
kubernetes==29.0.0
# via -r requirements.in # via -r requirements.in
markdown-it-py==3.0.0 markupsafe==2.1.1
# via rich
markupsafe==2.1.4
# via jinja2 # via jinja2
mdurl==0.1.2 molecule==4.0.4
# via markdown-it-py
molecule==6.0.3
# via # via
# -r requirements.in # -r requirements.in
# molecule-plugins # molecule-vagrant
molecule-plugins[vagrant]==23.5.3 molecule-vagrant==1.0.0
# via -r requirements.in # via -r requirements.in
netaddr==0.10.1 netaddr==0.9.0
# via -r requirements.in # via -r requirements.in
nodeenv==1.8.0 nodeenv==1.7.0
# via pre-commit # via pre-commit
oauthlib==3.2.2 oauthlib==3.2.2
# via # via requests-oauthlib
# kubernetes packaging==21.3
# requests-oauthlib
packaging==23.2
# via # via
# ansible-compat # ansible-compat
# ansible-core # ansible-core
# molecule # molecule
platformdirs==4.1.0 platformdirs==2.5.2
# via virtualenv # via virtualenv
pluggy==1.3.0 pluggy==1.0.0
# via molecule # via molecule
pre-commit==3.8.0 pre-commit==2.21.0
# via -r requirements.in # via -r requirements.in
pre-commit-hooks==4.6.0 pre-commit-hooks==4.5.0
# via -r requirements.in # via -r requirements.in
pyasn1==0.5.1 pyasn1==0.4.8
# via # via
# pyasn1-modules # pyasn1-modules
# rsa # rsa
pyasn1-modules==0.3.0 pyasn1-modules==0.2.8
# via google-auth # via google-auth
pycparser==2.21 pycparser==2.21
# via cffi # via cffi
pygments==2.17.2 pygments==2.13.0
# via rich # via rich
pyparsing==3.0.9
# via packaging
pyrsistent==0.19.2
# via jsonschema
python-dateutil==2.8.2 python-dateutil==2.8.2
# via kubernetes # via
# arrow
# kubernetes
python-slugify==6.1.2
# via cookiecutter
python-vagrant==1.0.0 python-vagrant==1.0.0
# via molecule-plugins # via molecule-vagrant
pyyaml==6.0.2 pyyaml==6.0.1
# via # via
# -r requirements.in # -r requirements.in
# ansible-compat # ansible-compat
# ansible-core # ansible-core
# cookiecutter
# kubernetes # kubernetes
# molecule # molecule
# molecule-vagrant
# pre-commit # pre-commit
referencing==0.32.1 requests==2.28.1
# via
# jsonschema
# jsonschema-specifications
requests==2.31.0
# via # via
# cookiecutter
# kubernetes # kubernetes
# requests-oauthlib # requests-oauthlib
requests-oauthlib==1.3.1 requests-oauthlib==1.3.1
# via kubernetes # via kubernetes
resolvelib==1.0.1 resolvelib==0.8.1
# via ansible-core # via ansible-core
rich==13.7.0 rich==12.6.0
# via # via
# enrich # enrich
# molecule # molecule
rpds-py==0.17.1
# via
# jsonschema
# referencing
rsa==4.9 rsa==4.9
# via google-auth # via google-auth
ruamel-yaml==0.18.5 ruamel-yaml==0.17.21
# via pre-commit-hooks # via pre-commit-hooks
ruamel-yaml-clib==0.2.8 selinux==0.2.1
# via ruamel-yaml # via molecule-vagrant
six==1.16.0 six==1.16.0
# via # via
# google-auth
# kubernetes # kubernetes
# python-dateutil # python-dateutil
subprocess-tee==0.4.1 subprocess-tee==0.4.1
# via ansible-compat # via ansible-compat
urllib3==2.1.0 text-unidecode==1.3
# via python-slugify
urllib3==1.26.12
# via # via
# kubernetes # kubernetes
# requests # requests
virtualenv==20.25.0 virtualenv==20.16.6
# via pre-commit # via pre-commit
wcmatch==8.5 websocket-client==1.4.2
# via molecule
websocket-client==1.7.0
# via kubernetes # via kubernetes
# The following packages are considered to be unsafe in a requirements file: # The following packages are considered to be unsafe in a requirements file:

View File

@@ -1,7 +1,7 @@
--- ---
- name: Reset k3s cluster - name: Reset k3s cluster
hosts: k3s_cluster hosts: k3s_cluster
gather_facts: true gather_facts: yes
roles: roles:
- role: reset - role: reset
become: true become: true
@@ -11,14 +11,13 @@
post_tasks: post_tasks:
- name: Reboot and wait for node to come back up - name: Reboot and wait for node to come back up
become: true become: true
ansible.builtin.reboot: reboot:
reboot_command: "{{ custom_reboot_command | default(omit) }}"
reboot_timeout: 3600 reboot_timeout: 3600
- name: Revert changes to Proxmox cluster - name: Revert changes to Proxmox cluster
hosts: proxmox hosts: proxmox
gather_facts: true gather_facts: true
become: true become: yes
remote_user: "{{ proxmox_lxc_ssh_user }}" remote_user: "{{ proxmox_lxc_ssh_user }}"
roles: roles:
- role: reset_proxmox_lxc - role: reset_proxmox_lxc

View File

@@ -1,8 +0,0 @@
---
argument_specs:
main:
short_description: Manage the downloading of K3S binaries
options:
k3s_version:
description: The desired version of K3S
required: true

View File

@@ -1,34 +1,36 @@
--- ---
- name: Download k3s binary x64 - name: Download k3s binary x64
ansible.builtin.get_url: get_url:
url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s
checksum: sha256:https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/sha256sum-amd64.txt checksum: sha256:https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/sha256sum-amd64.txt
dest: /usr/local/bin/k3s dest: /usr/local/bin/k3s
owner: root owner: root
group: root group: root
mode: "0755" mode: 0755
when: ansible_facts.architecture == "x86_64" when: ansible_facts.architecture == "x86_64"
- name: Download k3s binary arm64 - name: Download k3s binary arm64
ansible.builtin.get_url: get_url:
url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s-arm64 url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s-arm64
checksum: sha256:https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/sha256sum-arm64.txt checksum: sha256:https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/sha256sum-arm64.txt
dest: /usr/local/bin/k3s dest: /usr/local/bin/k3s
owner: root owner: root
group: root group: root
mode: "0755" mode: 0755
when: when:
- ( ansible_facts.architecture is search("arm") and ansible_facts.userspace_bits == "64" ) - ( ansible_facts.architecture is search("arm") and
or ansible_facts.architecture is search("aarch64") ansible_facts.userspace_bits == "64" ) or
ansible_facts.architecture is search("aarch64")
- name: Download k3s binary armhf - name: Download k3s binary armhf
ansible.builtin.get_url: get_url:
url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s-armhf url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s-armhf
checksum: sha256:https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/sha256sum-arm.txt checksum: sha256:https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/sha256sum-arm.txt
dest: /usr/local/bin/k3s dest: /usr/local/bin/k3s
owner: root owner: root
group: root group: root
mode: "0755" mode: 0755
when: when:
- ansible_facts.architecture is search("arm") - ansible_facts.architecture is search("arm")
- ansible_facts.userspace_bits == "32" - ansible_facts.userspace_bits == "32"

View File

@@ -1,4 +0,0 @@
---
extra_agent_args: ""
group_name_master: master
systemd_dir: /etc/systemd/system

View File

@@ -1,39 +0,0 @@
---
argument_specs:
main:
short_description: Setup k3s agents
options:
apiserver_endpoint:
description: Virtual ip-address configured on each master
required: true
extra_agent_args:
description: Extra arguments for agents nodes
group_name_master:
description: Name of the master group
default: master
k3s_token:
description: Token used to communicate between masters
proxy_env:
type: dict
description:
- Internet proxy configurations.
- See https://docs.k3s.io/advanced#configuring-an-http-proxy for details
default: ~
options:
HTTP_PROXY:
description: HTTP internet proxy
required: true
HTTPS_PROXY:
description: HTTPS internet proxy
required: true
NO_PROXY:
description: Addresses that will not use the proxies
required: true
systemd_dir:
description: Path to systemd services
default: /etc/systemd/system

View File

@@ -1,18 +1,18 @@
--- ---
- name: Create k3s-node.service.d directory
ansible.builtin.file: - name: Create k3s.service.d directory
path: "{{ systemd_dir }}/k3s-node.service.d" file:
path: '{{ systemd_dir }}/k3s.service.d'
state: directory state: directory
owner: root owner: root
group: root group: root
mode: "0755" mode: '0755'
when: proxy_env is defined
- name: Copy K3s http_proxy conf file - name: Copy K3s http_proxy conf file
ansible.builtin.template: template:
src: http_proxy.conf.j2 src: "http_proxy.conf.j2"
dest: "{{ systemd_dir }}/k3s-node.service.d/http_proxy.conf" dest: "{{ systemd_dir }}/k3s.service.d/http_proxy.conf"
owner: root owner: root
group: root group: root
mode: "0755" mode: '0755'
when: proxy_env is defined

View File

@@ -1,36 +1,20 @@
--- ---
- name: Check for PXE-booted system
block:
- name: Check if system is PXE-booted
ansible.builtin.command:
cmd: cat /proc/cmdline
register: boot_cmdline
changed_when: false
check_mode: false
- name: Set fact for PXE-booted system
ansible.builtin.set_fact:
is_pxe_booted: "{{ 'root=/dev/nfs' in boot_cmdline.stdout }}"
when: boot_cmdline.stdout is defined
- name: Include http_proxy configuration tasks
ansible.builtin.include_tasks: http_proxy.yml
- name: Deploy K3s http_proxy conf - name: Deploy K3s http_proxy conf
ansible.builtin.include_tasks: http_proxy.yml include_tasks: http_proxy.yml
when: proxy_env is defined when: proxy_env is defined
- name: Configure the k3s service - name: Copy K3s service file
ansible.builtin.template: template:
src: k3s.service.j2 src: "k3s.service.j2"
dest: "{{ systemd_dir }}/k3s-node.service" dest: "{{ systemd_dir }}/k3s-node.service"
owner: root owner: root
group: root group: root
mode: "0755" mode: 0755
- name: Manage k3s service - name: Enable and check K3s service
ansible.builtin.systemd: systemd:
name: k3s-node name: k3s-node
daemon_reload: true daemon_reload: yes
state: restarted state: restarted
enabled: true enabled: yes

View File

@@ -7,14 +7,11 @@ After=network-online.target
Type=notify Type=notify
ExecStartPre=-/sbin/modprobe br_netfilter ExecStartPre=-/sbin/modprobe br_netfilter
ExecStartPre=-/sbin/modprobe overlay ExecStartPre=-/sbin/modprobe overlay
# Conditional snapshotter based on PXE boot status ExecStart=/usr/local/bin/k3s agent --server https://{{ apiserver_endpoint | ansible.utils.ipwrap }}:6443 --token {{ hostvars[groups[group_name_master | default('master')][0]]['token'] | default(k3s_token) }} {{ extra_agent_args | default("") }}
ExecStart=/usr/local/bin/k3s agent \
--server https://{{ apiserver_endpoint | ansible.utils.ipwrap }}:6443 \
{% if is_pxe_booted | default(false) %}--snapshotter native \
{% endif %}--token {{ hostvars[groups[group_name_master | default('master')][0]]['token'] | default(k3s_token) }} \
{{ extra_agent_args }}
KillMode=process KillMode=process
Delegate=yes Delegate=yes
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=1048576 LimitNOFILE=1048576
LimitNPROC=infinity LimitNPROC=infinity
LimitCORE=infinity LimitCORE=infinity

View File

@@ -0,0 +1,6 @@
---
# Indicates whether custom registries for k3s should be configured
# Possible values:
# - present
# - absent
state: present

View File

@@ -1,20 +0,0 @@
---
argument_specs:
main:
short_description: Configure the use of a custom container registry
options:
custom_registries_yaml:
description:
- YAML block defining custom registries.
- >
The following is an example that pulls all images used in
this playbook through your private registries.
- >
It also allows you to pull your own images from your private
registry, without having to use imagePullSecrets in your
deployments.
- >
If all you need is your own images and you don't care about
caching the docker/quay/ghcr.io images, you can just remove
those from the mirrors: section.
required: true

View File

@@ -1,16 +1,17 @@
--- ---
- name: Create directory /etc/rancher/k3s - name: Create directory /etc/rancher/k3s
ansible.builtin.file: file:
path: /etc/{{ item }} path: "/etc/{{ item }}"
state: directory state: directory
mode: "0755" mode: '0755'
loop: loop:
- rancher - rancher
- rancher/k3s - rancher/k3s
- name: Insert registries into /etc/rancher/k3s/registries.yaml - name: Insert registries into /etc/rancher/k3s/registries.yaml
ansible.builtin.blockinfile: blockinfile:
path: /etc/rancher/k3s/registries.yaml path: /etc/rancher/k3s/registries.yaml
block: "{{ custom_registries_yaml }}" block: "{{ custom_registries_yaml }}"
mode: "0600" mode: '0600'
create: true create: true

View File

@@ -1,30 +1,12 @@
--- ---
extra_server_args: "" # If you want to explicitly define an interface that ALL control nodes
# should use to propagate the VIP, define it here. Otherwise, kube-vip
k3s_kubectl_binary: k3s kubectl # will determine the right interface automatically at runtime.
kube_vip_iface: null
# Name of the master group
group_name_master: master group_name_master: master
kube_vip_arp: true
kube_vip_iface:
kube_vip_cloud_provider_tag_version: main
kube_vip_tag_version: v0.7.2
kube_vip_bgp: false
kube_vip_bgp_routerid: 127.0.0.1
kube_vip_bgp_as: "64513"
kube_vip_bgp_peeraddress: 192.168.30.1
kube_vip_bgp_peeras: "64512"
kube_vip_bgp_peers: []
kube_vip_bgp_peers_groups: ['k3s_master']
metal_lb_controller_tag_version: v0.14.3
metal_lb_speaker_tag_version: v0.14.3
metal_lb_type: native
retry_count: 20
# yamllint disable rule:line-length # yamllint disable rule:line-length
server_init_args: >- server_init_args: >-
{% if groups[group_name_master | default('master')] | length > 1 %} {% if groups[group_name_master | default('master')] | length > 1 %}
@@ -35,6 +17,4 @@ server_init_args: >-
{% endif %} {% endif %}
--token {{ k3s_token }} --token {{ k3s_token }}
{% endif %} {% endif %}
{{ extra_server_args }} {{ extra_server_args | default('') }}
systemd_dir: /etc/systemd/system

View File

@@ -1,135 +0,0 @@
---
argument_specs:
main:
short_description: Setup k3s servers
options:
apiserver_endpoint:
description: Virtual ip-address configured on each master
required: true
cilium_bgp:
description:
- Enable cilium BGP control plane for LB services and pod cidrs.
- Disables the use of MetalLB.
type: bool
default: ~
cilium_iface:
description: The network interface used for when Cilium is enabled
default: ~
extra_server_args:
description: Extra arguments for server nodes
default: ""
group_name_master:
description: Name of the master group
default: master
k3s_create_kubectl_symlink:
description: Create the kubectl -> k3s symlink
default: false
type: bool
k3s_create_crictl_symlink:
description: Create the crictl -> k3s symlink
default: false
type: bool
kube_vip_arp:
description: Enables kube-vip ARP broadcasts
default: true
type: bool
kube_vip_bgp:
description: Enables kube-vip BGP peering
default: false
type: bool
kube_vip_bgp_routerid:
description: Defines the router ID for the kube-vip BGP server
default: "127.0.0.1"
kube_vip_bgp_as:
description: Defines the AS for the kube-vip BGP server
default: "64513"
kube_vip_bgp_peeraddress:
description: Defines the address for the kube-vip BGP peer
default: "192.168.30.1"
kube_vip_bgp_peeras:
description: Defines the AS for the kube-vip BGP peer
default: "64512"
kube_vip_bgp_peers:
description: List of BGP peer ASN & address pairs
default: []
kube_vip_bgp_peers_groups:
description: Inventory group in which to search for additional kube_vip_bgp_peers parameters to merge.
default: ['k3s_master']
kube_vip_iface:
description:
- Explicitly define an interface that ALL control nodes
- should use to propagate the VIP, define it here.
- Otherwise, kube-vip will determine the right interface
- automatically at runtime.
default: ~
kube_vip_tag_version:
description: Image tag for kube-vip
default: v0.7.2
kube_vip_cloud_provider_tag_version:
description: Tag for kube-vip-cloud-provider manifest when enabled
default: main
kube_vip_lb_ip_range:
description: IP range for kube-vip load balancer
default: ~
metal_lb_controller_tag_version:
description: Image tag for MetalLB
default: v0.14.3
metal_lb_speaker_tag_version:
description: Image tag for MetalLB
default: v0.14.3
metal_lb_type:
choices:
- frr
- native
default: native
description: Use FRR mode or native. Valid values are `frr` and `native`
proxy_env:
type: dict
description:
- Internet proxy configurations.
- See https://docs.k3s.io/advanced#configuring-an-http-proxy for details
default: ~
options:
HTTP_PROXY:
description: HTTP internet proxy
required: true
HTTPS_PROXY:
description: HTTPS internet proxy
required: true
NO_PROXY:
description: Addresses that will not use the proxies
required: true
retry_count:
description: Amount of retries when verifying that nodes joined
type: int
default: 20
server_init_args:
description: Arguments for server nodes
systemd_dir:
description: Path to systemd services
default: /etc/systemd/system

View File

@@ -23,6 +23,6 @@
ansible.builtin.template: ansible.builtin.template:
src: content.j2 src: content.j2
dest: "{{ log_destination }}/k3s-init@{{ ansible_hostname }}.log" dest: "{{ log_destination }}/k3s-init@{{ ansible_hostname }}.log"
mode: "0644" mode: 0644
vars: vars:
content: "{{ k3s_init_log.stdout }}" content: "{{ k3s_init_log.stdout }}"

View File

@@ -1,16 +1,18 @@
--- ---
- name: Create k3s.service.d directory - name: Create k3s.service.d directory
ansible.builtin.file: file:
path: "{{ systemd_dir }}/k3s.service.d" path: '{{ systemd_dir }}/k3s.service.d'
state: directory state: directory
owner: root owner: root
group: root group: root
mode: "0755" mode: '0755'
- name: Copy K3s http_proxy conf file - name: Copy K3s http_proxy conf file
ansible.builtin.template: template:
src: http_proxy.conf.j2 src: "http_proxy.conf.j2"
dest: "{{ systemd_dir }}/k3s.service.d/http_proxy.conf" dest: "{{ systemd_dir }}/k3s.service.d/http_proxy.conf"
owner: root owner: root
group: root group: root
mode: "0755" mode: '0755'

View File

@@ -1,27 +0,0 @@
---
- name: Create manifests directory on first master
ansible.builtin.file:
path: /var/lib/rancher/k3s/server/manifests
state: directory
owner: root
group: root
mode: "0644"
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
- name: Download vip cloud provider manifest to first master
ansible.builtin.get_url:
url: https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/{{ kube_vip_cloud_provider_tag_version | default('main') }}/manifest/kube-vip-cloud-controller.yaml # noqa yaml[line-length]
dest: /var/lib/rancher/k3s/server/manifests/kube-vip-cloud-controller.yaml
owner: root
group: root
mode: "0644"
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
- name: Copy kubevip configMap manifest to first master
ansible.builtin.template:
src: kubevip.yaml.j2
dest: /var/lib/rancher/k3s/server/manifests/kubevip.yaml
owner: root
group: root
mode: "0644"
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']

View File

@@ -1,50 +1,42 @@
--- ---
- name: Stop k3s-init
ansible.builtin.systemd:
name: k3s-init
state: stopped
failed_when: false
# k3s-init won't work if the port is already in use - name: Stop k3s-init
- name: Stop k3s systemd:
ansible.builtin.systemd: name: k3s-init
name: k3s
state: stopped state: stopped
failed_when: false failed_when: false
- name: Clean previous runs of k3s-init # noqa command-instead-of-module - name: Clean previous runs of k3s-init # noqa command-instead-of-module
# The systemd module does not support "reset-failed", so we need to resort to command. # The systemd module does not support "reset-failed", so we need to resort to command.
ansible.builtin.command: systemctl reset-failed k3s-init command: systemctl reset-failed k3s-init
failed_when: false failed_when: false
changed_when: false changed_when: false
- name: Deploy K3s http_proxy conf - name: Deploy K3s http_proxy conf
ansible.builtin.include_tasks: http_proxy.yml include_tasks: http_proxy.yml
when: proxy_env is defined when: proxy_env is defined
- name: Deploy vip manifest - name: Deploy vip manifest
ansible.builtin.include_tasks: vip.yml include_tasks: vip.yml
- name: Deploy metallb manifest
ansible.builtin.include_tasks: metallb.yml
tags: metallb
when: kube_vip_lb_ip_range is not defined and (not cilium_bgp or cilium_iface is not defined)
- name: Deploy kube-vip manifest - name: Deploy metallb manifest
ansible.builtin.include_tasks: kube-vip.yml include_tasks: metallb.yml
tags: kubevip tags: metallb
when: kube_vip_lb_ip_range is defined
- name: Init cluster inside the transient k3s-init service - name: Init cluster inside the transient k3s-init service
ansible.builtin.command: command:
cmd: systemd-run -p RestartSec=2 -p Restart=on-failure --unit=k3s-init k3s server {{ server_init_args }} cmd: "systemd-run -p RestartSec=2 \
creates: "{{ systemd_dir }}/k3s-init.service" -p Restart=on-failure \
--unit=k3s-init \
k3s server {{ server_init_args }}"
creates: "{{ systemd_dir }}/k3s.service"
- name: Verification - name: Verification
when: not ansible_check_mode when: not ansible_check_mode
block: block:
- name: Verify that all nodes actually joined (check k3s-init.service if this fails) - name: Verify that all nodes actually joined (check k3s-init.service if this fails)
ansible.builtin.command: command:
cmd: "{{ k3s_kubectl_binary | default('k3s kubectl') }} get nodes -l 'node-role.kubernetes.io/master=true' -o=jsonpath='{.items[*].metadata.name}'" # yamllint disable-line rule:line-length cmd: k3s kubectl get nodes -l "node-role.kubernetes.io/master=true" -o=jsonpath="{.items[*].metadata.name}"
register: nodes register: nodes
until: nodes.rc == 0 and (nodes.stdout.split() | length) == (groups[group_name_master | default('master')] | length) # yamllint disable-line rule:line-length until: nodes.rc == 0 and (nodes.stdout.split() | length) == (groups[group_name_master | default('master')] | length) # yamllint disable-line rule:line-length
retries: "{{ retry_count | default(20) }}" retries: "{{ retry_count | default(20) }}"
@@ -52,79 +44,79 @@
changed_when: false changed_when: false
always: always:
- name: Save logs of k3s-init.service - name: Save logs of k3s-init.service
ansible.builtin.include_tasks: fetch_k3s_init_logs.yml include_tasks: fetch_k3s_init_logs.yml
when: log_destination when: log_destination
vars: vars:
log_destination: >- log_destination: >-
{{ lookup('ansible.builtin.env', 'ANSIBLE_K3S_LOG_DIR', default=False) }} {{ lookup('ansible.builtin.env', 'ANSIBLE_K3S_LOG_DIR', default=False) }}
- name: Kill the temporary service used for initialization - name: Kill the temporary service used for initialization
ansible.builtin.systemd: systemd:
name: k3s-init name: k3s-init
state: stopped state: stopped
failed_when: false failed_when: false
- name: Copy K3s service file - name: Copy K3s service file
register: k3s_service register: k3s_service
ansible.builtin.template: template:
src: k3s.service.j2 src: "k3s.service.j2"
dest: "{{ systemd_dir }}/k3s.service" dest: "{{ systemd_dir }}/k3s.service"
owner: root owner: root
group: root group: root
mode: "0644" mode: 0644
- name: Enable and check K3s service - name: Enable and check K3s service
ansible.builtin.systemd: systemd:
name: k3s name: k3s
daemon_reload: true daemon_reload: yes
state: restarted state: restarted
enabled: true enabled: yes
- name: Wait for node-token - name: Wait for node-token
ansible.builtin.wait_for: wait_for:
path: /var/lib/rancher/k3s/server/node-token path: /var/lib/rancher/k3s/server/node-token
- name: Register node-token file access mode - name: Register node-token file access mode
ansible.builtin.stat: stat:
path: /var/lib/rancher/k3s/server path: /var/lib/rancher/k3s/server
register: p register: p
- name: Change file access node-token - name: Change file access node-token
ansible.builtin.file: file:
path: /var/lib/rancher/k3s/server path: /var/lib/rancher/k3s/server
mode: g+rx,o+rx mode: "g+rx,o+rx"
- name: Read node-token from master - name: Read node-token from master
ansible.builtin.slurp: slurp:
src: /var/lib/rancher/k3s/server/node-token src: /var/lib/rancher/k3s/server/node-token
register: node_token register: node_token
- name: Store Master node-token - name: Store Master node-token
ansible.builtin.set_fact: set_fact:
token: "{{ node_token.content | b64decode | regex_replace('\n', '') }}" token: "{{ node_token.content | b64decode | regex_replace('\n', '') }}"
- name: Restore node-token file access - name: Restore node-token file access
ansible.builtin.file: file:
path: /var/lib/rancher/k3s/server path: /var/lib/rancher/k3s/server
mode: "{{ p.stat.mode }}" mode: "{{ p.stat.mode }}"
- name: Create directory .kube - name: Create directory .kube
ansible.builtin.file: file:
path: "{{ ansible_user_dir }}/.kube" path: "{{ ansible_user_dir }}/.kube"
state: directory state: directory
owner: "{{ ansible_user_id }}" owner: "{{ ansible_user_id }}"
mode: u=rwx,g=rx,o= mode: "u=rwx,g=rx,o="
- name: Copy config file to user home directory - name: Copy config file to user home directory
ansible.builtin.copy: copy:
src: /etc/rancher/k3s/k3s.yaml src: /etc/rancher/k3s/k3s.yaml
dest: "{{ ansible_user_dir }}/.kube/config" dest: "{{ ansible_user_dir }}/.kube/config"
remote_src: true remote_src: yes
owner: "{{ ansible_user_id }}" owner: "{{ ansible_user_id }}"
mode: u=rw,g=,o= mode: "u=rw,g=,o="
- name: Configure kubectl cluster to {{ endpoint_url }} - name: Configure kubectl cluster to {{ endpoint_url }}
ansible.builtin.command: >- command: >-
{{ k3s_kubectl_binary | default('k3s kubectl') }} config set-cluster default k3s kubectl config set-cluster default
--server={{ endpoint_url }} --server={{ endpoint_url }}
--kubeconfig {{ ansible_user_dir }}/.kube/config --kubeconfig {{ ansible_user_dir }}/.kube/config
changed_when: true changed_when: true
@@ -137,33 +129,31 @@
# noqa jinja[invalid] # noqa jinja[invalid]
- name: Create kubectl symlink - name: Create kubectl symlink
ansible.builtin.file: file:
src: /usr/local/bin/k3s src: /usr/local/bin/k3s
dest: /usr/local/bin/kubectl dest: /usr/local/bin/kubectl
state: link state: link
when: k3s_create_kubectl_symlink | default(true) | bool
- name: Create crictl symlink - name: Create crictl symlink
ansible.builtin.file: file:
src: /usr/local/bin/k3s src: /usr/local/bin/k3s
dest: /usr/local/bin/crictl dest: /usr/local/bin/crictl
state: link state: link
when: k3s_create_crictl_symlink | default(true) | bool
- name: Get contents of manifests folder - name: Get contents of manifests folder
ansible.builtin.find: find:
paths: /var/lib/rancher/k3s/server/manifests paths: /var/lib/rancher/k3s/server/manifests
file_type: file file_type: file
register: k3s_server_manifests register: k3s_server_manifests
- name: Get sub dirs of manifests folder - name: Get sub dirs of manifests folder
ansible.builtin.find: find:
paths: /var/lib/rancher/k3s/server/manifests paths: /var/lib/rancher/k3s/server/manifests
file_type: directory file_type: directory
register: k3s_server_manifests_directories register: k3s_server_manifests_directories
- name: Remove manifests and folders that are only needed for bootstrapping cluster so k3s doesn't auto apply on start - name: Remove manifests and folders that are only needed for bootstrapping cluster so k3s doesn't auto apply on start
ansible.builtin.file: file:
path: "{{ item.path }}" path: "{{ item.path }}"
state: absent state: absent
with_items: with_items:

View File

@@ -1,30 +1,30 @@
--- ---
- name: Create manifests directory on first master - name: Create manifests directory on first master
ansible.builtin.file: file:
path: /var/lib/rancher/k3s/server/manifests path: /var/lib/rancher/k3s/server/manifests
state: directory state: directory
owner: root owner: root
group: root group: root
mode: "0644" mode: 0644
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
- name: "Download to first master: manifest for metallb-{{ metal_lb_type }}" - name: "Download to first master: manifest for metallb-{{ metal_lb_type }}"
ansible.builtin.get_url: ansible.builtin.get_url:
url: https://raw.githubusercontent.com/metallb/metallb/{{ metal_lb_controller_tag_version }}/config/manifests/metallb-{{ metal_lb_type }}.yaml # noqa yaml[line-length] url: "https://raw.githubusercontent.com/metallb/metallb/{{ metal_lb_controller_tag_version }}/config/manifests/metallb-{{ metal_lb_type }}.yaml" # noqa yaml[line-length]
dest: /var/lib/rancher/k3s/server/manifests/metallb-crds.yaml dest: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml"
owner: root owner: root
group: root group: root
mode: "0644" mode: 0644
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
- name: Set image versions in manifest for metallb-{{ metal_lb_type }} - name: Set image versions in manifest for metallb-{{ metal_lb_type }}
ansible.builtin.replace: ansible.builtin.replace:
path: /var/lib/rancher/k3s/server/manifests/metallb-crds.yaml path: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml"
regexp: "{{ item.change | ansible.builtin.regex_escape }}" regexp: "{{ item.change | ansible.builtin.regex_escape }}"
replace: "{{ item.to }}" replace: "{{ item.to }}"
with_items: with_items:
- change: metallb/speaker:{{ metal_lb_controller_tag_version }} - change: "metallb/speaker:{{ metal_lb_controller_tag_version }}"
to: metallb/speaker:{{ metal_lb_speaker_tag_version }} to: "metallb/speaker:{{ metal_lb_speaker_tag_version }}"
loop_control: loop_control:
label: "{{ item.change }} => {{ item.to }}" label: "{{ item.change }} => {{ item.to }}"
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']

View File

@@ -1,31 +1,27 @@
--- ---
- name: Set _kube_vip_bgp_peers fact
ansible.builtin.set_fact:
_kube_vip_bgp_peers: "{{ lookup('community.general.merge_variables', '^kube_vip_bgp_peers__.+$', initial_value=kube_vip_bgp_peers, groups=kube_vip_bgp_peers_groups) }}" # yamllint disable-line rule:line-length
- name: Create manifests directory on first master - name: Create manifests directory on first master
ansible.builtin.file: file:
path: /var/lib/rancher/k3s/server/manifests path: /var/lib/rancher/k3s/server/manifests
state: directory state: directory
owner: root owner: root
group: root group: root
mode: "0644" mode: 0644
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
- name: Download vip rbac manifest to first master - name: Download vip rbac manifest to first master
ansible.builtin.get_url: ansible.builtin.get_url:
url: https://kube-vip.io/manifests/rbac.yaml url: "https://raw.githubusercontent.com/kube-vip/kube-vip/{{ kube_vip_tag_version }}/docs/manifests/rbac.yaml"
dest: /var/lib/rancher/k3s/server/manifests/vip-rbac.yaml dest: "/var/lib/rancher/k3s/server/manifests/vip-rbac.yaml"
owner: root owner: root
group: root group: root
mode: "0644" mode: 0644
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
- name: Copy vip manifest to first master - name: Copy vip manifest to first master
ansible.builtin.template: template:
src: vip.yaml.j2 src: "vip.yaml.j2"
dest: /var/lib/rancher/k3s/server/manifests/vip.yaml dest: "/var/lib/rancher/k3s/server/manifests/vip.yaml"
owner: root owner: root
group: root group: root
mode: "0644" mode: 0644
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']

View File

@@ -1,13 +0,0 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: kubevip
namespace: kube-system
data:
{% if kube_vip_lb_ip_range is string %}
{# kube_vip_lb_ip_range was used in the legacy way: single string instead of a list #}
{# => transform to list with single element #}
{% set kube_vip_lb_ip_range = [kube_vip_lb_ip_range] %}
{% endif %}
range-global: {{ kube_vip_lb_ip_range | join(',') }}

View File

@@ -27,9 +27,7 @@ spec:
- manager - manager
env: env:
- name: vip_arp - name: vip_arp
value: "{{ 'true' if kube_vip_arp | default(true) | bool else 'false' }}" value: "true"
- name: bgp_enable
value: "{{ 'true' if kube_vip_bgp | default(false) | bool else 'false' }}"
- name: port - name: port
value: "6443" value: "6443"
{% if kube_vip_iface %} {% if kube_vip_iface %}
@@ -45,7 +43,7 @@ spec:
- name: vip_ddns - name: vip_ddns
value: "false" value: "false"
- name: svc_enable - name: svc_enable
value: "{{ 'true' if kube_vip_lb_ip_range is defined else 'false' }}" value: "false"
- name: vip_leaderelection - name: vip_leaderelection
value: "true" value: "true"
- name: vip_leaseduration - name: vip_leaseduration
@@ -56,29 +54,6 @@ spec:
value: "2" value: "2"
- name: address - name: address
value: {{ apiserver_endpoint }} value: {{ apiserver_endpoint }}
{% if kube_vip_bgp | default(false) | bool %}
{% if kube_vip_bgp_routerid is defined %}
- name: bgp_routerid
value: "{{ kube_vip_bgp_routerid }}"
{% endif %}
{% if _kube_vip_bgp_peers | length > 0 %}
- name: bgppeers
value: "{{ _kube_vip_bgp_peers | map(attribute='peer_address') | zip(_kube_vip_bgp_peers| map(attribute='peer_asn')) | map('join', ',') | join(':') }}" # yamllint disable-line rule:line-length
{% else %}
{% if kube_vip_bgp_as is defined %}
- name: bgp_as
value: "{{ kube_vip_bgp_as }}"
{% endif %}
{% if kube_vip_bgp_peeraddress is defined %}
- name: bgp_peeraddress
value: "{{ kube_vip_bgp_peeraddress }}"
{% endif %}
{% if kube_vip_bgp_peeras is defined %}
- name: bgp_peeras
value: "{{ kube_vip_bgp_peeras }}"
{% endif %}
{% endif %}
{% endif %}
image: ghcr.io/kube-vip/kube-vip:{{ kube_vip_tag_version }} image: ghcr.io/kube-vip/kube-vip:{{ kube_vip_tag_version }}
imagePullPolicy: Always imagePullPolicy: Always
name: kube-vip name: kube-vip

View File

@@ -1,32 +1,6 @@
--- ---
k3s_kubectl_binary: k3s kubectl # Timeout to wait for MetalLB services to come up
metal_lb_available_timeout: 120s
bpf_lb_algorithm: maglev # Name of the master group
bpf_lb_mode: hybrid
calico_blockSize: 26 # noqa var-naming
calico_ebpf: false
calico_encapsulation: VXLANCrossSubnet
calico_natOutgoing: Enabled # noqa var-naming
calico_nodeSelector: all() # noqa var-naming
calico_tag: v3.27.2
cilium_bgp: false
cilium_exportPodCIDR: true # noqa var-naming
cilium_bgp_my_asn: 64513
cilium_bgp_peer_asn: 64512
cilium_bgp_neighbors: []
cilium_bgp_neighbors_groups: ['k3s_all']
cilium_bgp_lb_cidr: 192.168.31.0/24
cilium_hubble: true
cilium_mode: native
cluster_cidr: 10.52.0.0/16
enable_bpf_masquerade: true
kube_proxy_replacement: true
group_name_master: master group_name_master: master
metal_lb_mode: layer2
metal_lb_available_timeout: 240s
metal_lb_controller_tag_version: v0.14.3
metal_lb_ip_range: 192.168.30.80-192.168.30.90

View File

@@ -1,153 +0,0 @@
---
argument_specs:
main:
short_description: Configure k3s cluster
options:
apiserver_endpoint:
description: Virtual ip-address configured on each master
required: true
bpf_lb_algorithm:
description: BPF lb algorithm
default: maglev
bpf_lb_mode:
description: BPF lb mode
default: hybrid
calico_blockSize:
description: IP pool block size
type: int
default: 26
calico_ebpf:
description: Use eBPF dataplane instead of iptables
type: bool
default: false
calico_encapsulation:
description: IP pool encapsulation
default: VXLANCrossSubnet
calico_natOutgoing:
description: IP pool NAT outgoing
default: Enabled
calico_nodeSelector:
description: IP pool node selector
default: all()
calico_iface:
description: The network interface used for when Calico is enabled
default: ~
calico_tag:
description: Calico version tag
default: v3.27.2
cilium_bgp:
description:
- Enable cilium BGP control plane for LB services and pod cidrs.
- Disables the use of MetalLB.
type: bool
default: false
cilium_bgp_my_asn:
description: Local ASN for BGP peer
type: int
default: 64513
cilium_bgp_peer_asn:
description: BGP peer ASN
type: int
default: 64512
cilium_bgp_peer_address:
description: BGP peer address
default: ~
cilium_bgp_neighbors:
description: List of BGP peer ASN & address pairs
default: []
cilium_bgp_neighbors_groups:
description: Inventory group in which to search for additional cilium_bgp_neighbors parameters to merge.
default: ['k3s_all']
cilium_bgp_lb_cidr:
description: BGP load balancer IP range
default: 192.168.31.0/24
cilium_exportPodCIDR:
description: Export pod CIDR
type: bool
default: true
cilium_hubble:
description: Enable Cilium Hubble
type: bool
default: true
cilium_iface:
description: The network interface used for when Cilium is enabled
default: ~
cilium_mode:
description: Inner-node communication mode
default: native
choices:
- native
- routed
cluster_cidr:
description: Inner-cluster IP range
default: 10.52.0.0/16
enable_bpf_masquerade:
description: Use IP masquerading
type: bool
default: true
group_name_master:
description: Name of the master group
default: master
kube_proxy_replacement:
description: Replace the native kube-proxy with Cilium
type: bool
default: true
kube_vip_lb_ip_range:
description: IP range for kube-vip load balancer
default: ~
metal_lb_available_timeout:
description: Wait for MetalLB resources
default: 240s
metal_lb_ip_range:
description: MetalLB ip range for load balancer
default: 192.168.30.80-192.168.30.90
metal_lb_controller_tag_version:
description: Image tag for MetalLB
default: v0.14.3
metal_lb_mode:
description: Metallb mode
default: layer2
choices:
- bgp
- layer2
metal_lb_bgp_my_asn:
description: BGP ASN configurations
default: ~
metal_lb_bgp_peer_asn:
description: BGP peer ASN configurations
default: ~
metal_lb_bgp_peer_address:
description: BGP peer address
default: ~

View File

@@ -1,120 +0,0 @@
---
- name: Deploy Calico to cluster
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
run_once: true
block:
- name: Create manifests directory on first master
ansible.builtin.file:
path: /tmp/k3s
state: directory
owner: root
group: root
mode: "0755"
- name: "Download to first master: manifest for Tigera Operator and Calico CRDs"
ansible.builtin.get_url:
url: https://raw.githubusercontent.com/projectcalico/calico/{{ calico_tag }}/manifests/tigera-operator.yaml
dest: /tmp/k3s/tigera-operator.yaml
owner: root
group: root
mode: "0755"
- name: Copy Calico custom resources manifest to first master
ansible.builtin.template:
src: calico.crs.j2
dest: /tmp/k3s/custom-resources.yaml
owner: root
group: root
mode: "0755"
- name: Deploy or replace Tigera Operator
block:
- name: Deploy Tigera Operator
ansible.builtin.command:
cmd: "{{ k3s_kubectl_binary | default('k3s kubectl') }} create -f /tmp/k3s/tigera-operator.yaml"
register: create_operator
changed_when: "'created' in create_operator.stdout"
failed_when: "'Error' in create_operator.stderr and 'already exists' not in create_operator.stderr"
rescue:
- name: Replace existing Tigera Operator
ansible.builtin.command:
cmd: "{{ k3s_kubectl_binary | default('k3s kubectl') }} replace -f /tmp/k3s/tigera-operator.yaml"
register: replace_operator
changed_when: "'replaced' in replace_operator.stdout"
failed_when: "'Error' in replace_operator.stderr"
- name: Wait for Tigera Operator resources
ansible.builtin.command: >-
{{ k3s_kubectl_binary | default('k3s kubectl') }} wait {{ item.type }}/{{ item.name }}
--namespace='tigera-operator'
--for=condition=Available=True
--timeout=30s
register: tigera_result
changed_when: false
until: tigera_result is succeeded
retries: 7
delay: 7
with_items:
- { name: tigera-operator, type: deployment }
loop_control:
label: "{{ item.type }}/{{ item.name }}"
- name: Deploy Calico custom resources
block:
- name: Deploy custom resources for Calico
ansible.builtin.command:
cmd: "{{ k3s_kubectl_binary | default('k3s kubectl') }} create -f /tmp/k3s/custom-resources.yaml"
register: create_cr
changed_when: "'created' in create_cr.stdout"
failed_when: "'Error' in create_cr.stderr and 'already exists' not in create_cr.stderr"
rescue:
- name: Apply new Calico custom resource manifest
ansible.builtin.command:
cmd: "{{ k3s_kubectl_binary | default('k3s kubectl') }} apply -f /tmp/k3s/custom-resources.yaml"
register: apply_cr
changed_when: "'configured' in apply_cr.stdout or 'created' in apply_cr.stdout"
failed_when: "'Error' in apply_cr.stderr"
- name: Wait for Calico system resources to be available
ansible.builtin.command: >-
{% if item.type == 'daemonset' %}
{{ k3s_kubectl_binary | default('k3s kubectl') }} wait pods
--namespace='{{ item.namespace }}'
--selector={{ item.selector }}
--for=condition=Ready
{% else %}
{{ k3s_kubectl_binary | default('k3s kubectl') }} wait {{ item.type }}/{{ item.name }}
--namespace='{{ item.namespace }}'
--for=condition=Available
{% endif %}
--timeout=30s
register: cr_result
changed_when: false
until: cr_result is succeeded
retries: 30
delay: 7
with_items:
- { name: calico-typha, type: deployment, namespace: calico-system }
- { name: calico-kube-controllers, type: deployment, namespace: calico-system }
- name: csi-node-driver
type: daemonset
selector: k8s-app=csi-node-driver
namespace: calico-system
- name: calico-node
type: daemonset
selector: k8s-app=calico-node
namespace: calico-system
- { name: calico-apiserver, type: deployment, namespace: calico-apiserver }
loop_control:
label: "{{ item.type }}/{{ item.name }}"
- name: Patch Felix configuration for eBPF mode
ansible.builtin.command:
cmd: >
{{ k3s_kubectl_binary | default('k3s kubectl') }} patch felixconfiguration default
--type='merge'
--patch='{"spec": {"bpfKubeProxyIptablesCleanupEnabled": false}}'
register: patch_result
changed_when: "'felixconfiguration.projectcalico.org/default patched' in patch_result.stdout"
failed_when: "'Error' in patch_result.stderr"
when: calico_ebpf

View File

@@ -1,256 +0,0 @@
---
- name: Prepare Cilium CLI on first master and deploy CNI
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
run_once: true
block:
- name: Create tmp directory on first master
ansible.builtin.file:
path: /tmp/k3s
state: directory
owner: root
group: root
mode: "0755"
- name: Check if Cilium CLI is installed
ansible.builtin.command: cilium version
register: cilium_cli_installed
failed_when: false
changed_when: false
ignore_errors: true
- name: Check for Cilium CLI version in command output
ansible.builtin.set_fact:
installed_cli_version: >-
{{
cilium_cli_installed.stdout_lines
| join(' ')
| regex_findall('cilium-cli: (v\d+\.\d+\.\d+)')
| first
| default('unknown')
}}
when: cilium_cli_installed.rc == 0
- name: Get latest stable Cilium CLI version file
ansible.builtin.get_url:
url: https://raw.githubusercontent.com/cilium/cilium-cli/main/stable.txt
dest: /tmp/k3s/cilium-cli-stable.txt
owner: root
group: root
mode: "0755"
- name: Read Cilium CLI stable version from file
ansible.builtin.command: cat /tmp/k3s/cilium-cli-stable.txt
register: cli_ver
changed_when: false
- name: Log installed Cilium CLI version
ansible.builtin.debug:
msg: "Installed Cilium CLI version: {{ installed_cli_version | default('Not installed') }}"
- name: Log latest stable Cilium CLI version
ansible.builtin.debug:
msg: "Latest Cilium CLI version: {{ cli_ver.stdout }}"
- name: Determine if Cilium CLI needs installation or update
ansible.builtin.set_fact:
cilium_cli_needs_update: >-
{{
cilium_cli_installed.rc != 0 or
(cilium_cli_installed.rc == 0 and
installed_cli_version != cli_ver.stdout)
}}
- name: Install or update Cilium CLI
when: cilium_cli_needs_update
block:
- name: Set architecture variable
ansible.builtin.set_fact:
cli_arch: "{{ 'arm64' if ansible_architecture == 'aarch64' else 'amd64' }}"
- name: Download Cilium CLI and checksum
ansible.builtin.get_url:
url: "{{ cilium_base_url }}/cilium-linux-{{ cli_arch }}{{ item }}"
dest: /tmp/k3s/cilium-linux-{{ cli_arch }}{{ item }}
owner: root
group: root
mode: "0755"
loop:
- .tar.gz
- .tar.gz.sha256sum
vars:
cilium_base_url: https://github.com/cilium/cilium-cli/releases/download/{{ cli_ver.stdout }}
- name: Verify the downloaded tarball
ansible.builtin.shell: |
cd /tmp/k3s && sha256sum --check cilium-linux-{{ cli_arch }}.tar.gz.sha256sum
args:
executable: /bin/bash
changed_when: false
- name: Extract Cilium CLI to /usr/local/bin
ansible.builtin.unarchive:
src: /tmp/k3s/cilium-linux-{{ cli_arch }}.tar.gz
dest: /usr/local/bin
remote_src: true
- name: Remove downloaded tarball and checksum file
ansible.builtin.file:
path: "{{ item }}"
state: absent
loop:
- /tmp/k3s/cilium-linux-{{ cli_arch }}.tar.gz
- /tmp/k3s/cilium-linux-{{ cli_arch }}.tar.gz.sha256sum
- name: Wait for connectivity to kube VIP
ansible.builtin.command: ping -c 1 {{ apiserver_endpoint }}
register: ping_result
until: ping_result.rc == 0
retries: 21
delay: 1
ignore_errors: true
changed_when: false
- name: Fail if kube VIP not reachable
ansible.builtin.fail:
msg: API endpoint {{ apiserver_endpoint }} is not reachable
when: ping_result.rc != 0
- name: Test for existing Cilium install
ansible.builtin.command: |
{{ k3s_kubectl_binary | default('k3s kubectl') }} -n kube-system get daemonsets cilium
register: cilium_installed
failed_when: false
changed_when: false
ignore_errors: true
- name: Check existing Cilium install
when: cilium_installed.rc == 0
block:
- name: Check Cilium version
ansible.builtin.command: cilium version
register: cilium_version
failed_when: false
changed_when: false
ignore_errors: true
- name: Parse installed Cilium version
ansible.builtin.set_fact:
installed_cilium_version: >-
{{
cilium_version.stdout_lines
| join(' ')
| regex_findall('cilium image.+(\d+\.\d+\.\d+)')
| first
| default('unknown')
}}
- name: Determine if Cilium needs update
ansible.builtin.set_fact:
cilium_needs_update: >-
{{ 'v' + installed_cilium_version != cilium_tag }}
- name: Log result
ansible.builtin.debug:
msg: >
Installed Cilium version: {{ installed_cilium_version }},
Target Cilium version: {{ cilium_tag }},
Update needed: {{ cilium_needs_update }}
- name: Install Cilium
ansible.builtin.command: >-
{% if cilium_installed.rc != 0 %}
cilium install
{% else %}
cilium upgrade
{% endif %}
--version "{{ cilium_tag }}"
--helm-set operator.replicas="1"
{{ '--helm-set devices=' + cilium_iface if cilium_iface != 'auto' else '' }}
--helm-set ipam.operator.clusterPoolIPv4PodCIDRList={{ cluster_cidr }}
{% if cilium_mode == "native" or (cilium_bgp and cilium_exportPodCIDR != 'false') %}
--helm-set ipv4NativeRoutingCIDR={{ cluster_cidr }}
{% endif %}
--helm-set k8sServiceHost="127.0.0.1"
--helm-set k8sServicePort="6444"
--helm-set routingMode={{ cilium_mode }}
--helm-set autoDirectNodeRoutes={{ "true" if cilium_mode == "native" else "false" }}
--helm-set kubeProxyReplacement={{ kube_proxy_replacement }}
--helm-set bpf.masquerade={{ enable_bpf_masquerade }}
--helm-set bgpControlPlane.enabled={{ cilium_bgp | default("false") }}
--helm-set hubble.enabled={{ "true" if cilium_hubble else "false" }}
--helm-set hubble.relay.enabled={{ "true" if cilium_hubble else "false" }}
--helm-set hubble.ui.enabled={{ "true" if cilium_hubble else "false" }}
{% if kube_proxy_replacement is not false %}
--helm-set bpf.loadBalancer.algorithm={{ bpf_lb_algorithm }}
--helm-set bpf.loadBalancer.mode={{ bpf_lb_mode }}
{% endif %}
environment:
KUBECONFIG: "{{ ansible_user_dir }}/.kube/config"
register: cilium_install_result
changed_when: cilium_install_result.rc == 0
when: cilium_installed.rc != 0 or cilium_needs_update
- name: Wait for Cilium resources
ansible.builtin.command: >-
{% if item.type == 'daemonset' %}
{{ k3s_kubectl_binary | default('k3s kubectl') }} wait pods
--namespace=kube-system
--selector='k8s-app=cilium'
--for=condition=Ready
{% else %}
{{ k3s_kubectl_binary | default('k3s kubectl') }} wait {{ item.type }}/{{ item.name }}
--namespace=kube-system
--for=condition=Available
{% endif %}
--timeout=30s
register: cr_result
changed_when: false
until: cr_result is succeeded
retries: 30
delay: 7
with_items:
- { name: cilium-operator, type: deployment }
- { name: cilium, type: daemonset, selector: k8s-app=cilium }
- { name: hubble-relay, type: deployment, check_hubble: true }
- { name: hubble-ui, type: deployment, check_hubble: true }
loop_control:
label: "{{ item.type }}/{{ item.name }}"
when: >-
not item.check_hubble | default(false) or (item.check_hubble | default(false) and cilium_hubble)
- name: Configure Cilium BGP
when: cilium_bgp
block:
- name: Set _cilium_bgp_neighbors fact
ansible.builtin.set_fact:
_cilium_bgp_neighbors: "{{ lookup('community.general.merge_variables', '^cilium_bgp_neighbors__.+$', initial_value=cilium_bgp_neighbors, groups=cilium_bgp_neighbors_groups) }}" # yamllint disable-line rule:line-length
- name: Copy BGP manifests to first master
ansible.builtin.template:
src: cilium.crs.j2
dest: /tmp/k3s/cilium-bgp.yaml
owner: root
group: root
mode: "0755"
- name: Apply BGP manifests
ansible.builtin.command:
cmd: "{{ k3s_kubectl_binary | default('k3s kubectl') }} apply -f /tmp/k3s/cilium-bgp.yaml"
register: apply_cr
changed_when: "'configured' in apply_cr.stdout or 'created' in apply_cr.stdout"
failed_when: "'is invalid' in apply_cr.stderr"
ignore_errors: true
- name: Print error message if BGP manifests application fails
ansible.builtin.debug:
msg: "{{ apply_cr.stderr }}"
when: "'is invalid' in apply_cr.stderr"
- name: Test for BGP config resources
ansible.builtin.command: "{{ item }}"
loop:
- "{{ k3s_kubectl_binary | default('k3s kubectl') }} get CiliumBGPPeeringPolicy.cilium.io"
- "{{ k3s_kubectl_binary | default('k3s kubectl') }} get CiliumLoadBalancerIPPool.cilium.io"
changed_when: false
loop_control:
label: "{{ item }}"

View File

@@ -1,20 +1,9 @@
--- ---
- name: Deploy calico
ansible.builtin.include_tasks: calico.yml
tags: calico
when: calico_iface is defined and cilium_iface is not defined
- name: Deploy cilium
ansible.builtin.include_tasks: cilium.yml
tags: cilium
when: cilium_iface is defined
- name: Deploy metallb pool - name: Deploy metallb pool
ansible.builtin.include_tasks: metallb.yml include_tasks: metallb.yml
tags: metallb tags: metallb
when: kube_vip_lb_ip_range is not defined and (not cilium_bgp or cilium_iface is not defined)
- name: Remove tmp directory used for manifests - name: Remove tmp directory used for manifests
ansible.builtin.file: file:
path: /tmp/k3s path: /tmp/k3s
state: absent state: absent

View File

@@ -1,53 +1,32 @@
--- ---
- name: Create manifests directory for temp configuration - name: Create manifests directory for temp configuration
ansible.builtin.file: file:
path: /tmp/k3s path: /tmp/k3s
state: directory state: directory
owner: "{{ ansible_user_id }}" owner: "{{ ansible_user_id }}"
mode: "0755" mode: 0755
with_items: "{{ groups[group_name_master | default('master')] }}" with_items: "{{ groups[group_name_master | default('master')] }}"
run_once: true run_once: true
- name: Delete outdated metallb replicas
ansible.builtin.shell: |-
set -o pipefail
REPLICAS=$({{ k3s_kubectl_binary | default('k3s kubectl') }} --namespace='metallb-system' get replicasets \
-l 'component=controller,app=metallb' \
-o jsonpath='{.items[0].spec.template.spec.containers[0].image}, {.items[0].metadata.name}' 2>/dev/null || true)
REPLICAS_SETS=$(echo ${REPLICAS} | grep -v '{{ metal_lb_controller_tag_version }}' | sed -e "s/^.*\s//g")
if [ -n "${REPLICAS_SETS}" ] ; then
for REPLICAS in "${REPLICAS_SETS}"
do
{{ k3s_kubectl_binary | default('k3s kubectl') }} --namespace='metallb-system' \
delete rs "${REPLICAS}"
done
fi
args:
executable: /bin/bash
changed_when: false
run_once: true
with_items: "{{ groups[group_name_master | default('master')] }}"
- name: Copy metallb CRs manifest to first master - name: Copy metallb CRs manifest to first master
ansible.builtin.template: template:
src: metallb.crs.j2 src: "metallb.crs.j2"
dest: /tmp/k3s/metallb-crs.yaml dest: "/tmp/k3s/metallb-crs.yaml"
owner: "{{ ansible_user_id }}" owner: "{{ ansible_user_id }}"
mode: "0755" mode: 0755
with_items: "{{ groups[group_name_master | default('master')] }}" with_items: "{{ groups[group_name_master | default('master')] }}"
run_once: true run_once: true
- name: Test metallb-system namespace - name: Test metallb-system namespace
ansible.builtin.command: >- command: >-
{{ k3s_kubectl_binary | default('k3s kubectl') }} -n metallb-system k3s kubectl -n metallb-system
changed_when: false changed_when: false
with_items: "{{ groups[group_name_master | default('master')] }}" with_items: "{{ groups[group_name_master | default('master')] }}"
run_once: true run_once: true
- name: Wait for MetalLB resources - name: Wait for MetalLB resources
ansible.builtin.command: >- command: >-
{{ k3s_kubectl_binary | default('k3s kubectl') }} wait {{ item.resource }} k3s kubectl wait {{ item.resource }}
--namespace='metallb-system' --namespace='metallb-system'
{% if item.name | default(False) -%}{{ item.name }}{%- endif %} {% if item.name | default(False) -%}{{ item.name }}{%- endif %}
{% if item.selector | default(False) -%}--selector='{{ item.selector }}'{%- endif %} {% if item.selector | default(False) -%}--selector='{{ item.selector }}'{%- endif %}
@@ -83,30 +62,16 @@
loop_control: loop_control:
label: "{{ item.description }}" label: "{{ item.description }}"
- name: Set metallb webhook service name
ansible.builtin.set_fact:
metallb_webhook_service_name: >-
{{
(
(metal_lb_controller_tag_version | regex_replace('^v', ''))
is
version('0.14.4', '<', version_type='semver')
) | ternary(
'webhook-service',
'metallb-webhook-service'
)
}}
- name: Test metallb-system webhook-service endpoint - name: Test metallb-system webhook-service endpoint
ansible.builtin.command: >- command: >-
{{ k3s_kubectl_binary | default('k3s kubectl') }} -n metallb-system get endpoints {{ metallb_webhook_service_name }} k3s kubectl -n metallb-system get endpoints webhook-service
changed_when: false changed_when: false
with_items: "{{ groups[group_name_master | default('master')] }}" with_items: "{{ groups[group_name_master | default('master')] }}"
run_once: true run_once: true
- name: Apply metallb CRs - name: Apply metallb CRs
ansible.builtin.command: >- command: >-
{{ k3s_kubectl_binary | default('k3s kubectl') }} apply -f /tmp/k3s/metallb-crs.yaml k3s kubectl apply -f /tmp/k3s/metallb-crs.yaml
--timeout='{{ metal_lb_available_timeout }}' --timeout='{{ metal_lb_available_timeout }}'
register: this register: this
changed_when: false changed_when: false
@@ -115,8 +80,8 @@
retries: 5 retries: 5
- name: Test metallb-system resources for Layer 2 configuration - name: Test metallb-system resources for Layer 2 configuration
ansible.builtin.command: >- command: >-
{{ k3s_kubectl_binary | default('k3s kubectl') }} -n metallb-system get {{ item }} k3s kubectl -n metallb-system get {{ item }}
changed_when: false changed_when: false
run_once: true run_once: true
when: metal_lb_mode == "layer2" when: metal_lb_mode == "layer2"
@@ -125,8 +90,8 @@
- L2Advertisement - L2Advertisement
- name: Test metallb-system resources for BGP configuration - name: Test metallb-system resources for BGP configuration
ansible.builtin.command: >- command: >-
{{ k3s_kubectl_binary | default('k3s kubectl') }} -n metallb-system get {{ item }} k3s kubectl -n metallb-system get {{ item }}
changed_when: false changed_when: false
run_once: true run_once: true
when: metal_lb_mode == "bgp" when: metal_lb_mode == "bgp"

View File

@@ -1,41 +0,0 @@
# This section includes base Calico installation configuration.
# For more information, see: https://docs.tigera.io/calico/latest/reference/installation/api#operator.tigera.io/v1.Installation
apiVersion: operator.tigera.io/v1
kind: Installation
metadata:
name: default
spec:
# Configures Calico networking.
calicoNetwork:
# Note: The ipPools section cannot be modified post-install.
ipPools:
- blockSize: {{ calico_blockSize }}
cidr: {{ cluster_cidr }}
encapsulation: {{ calico_encapsulation }}
natOutgoing: {{ calico_natOutgoing }}
nodeSelector: {{ calico_nodeSelector }}
nodeAddressAutodetectionV4:
interface: {{ calico_iface }}
linuxDataplane: {{ 'BPF' if calico_ebpf else 'Iptables' }}
---
# This section configures the Calico API server.
# For more information, see: https://docs.tigera.io/calico/latest/reference/installation/api#operator.tigera.io/v1.APIServer
apiVersion: operator.tigera.io/v1
kind: APIServer
metadata:
name: default
spec: {}
{% if calico_ebpf %}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kubernetes-services-endpoint
namespace: tigera-operator
data:
KUBERNETES_SERVICE_HOST: '{{ apiserver_endpoint }}'
KUBERNETES_SERVICE_PORT: '6443'
{% endif %}

View File

@@ -1,48 +0,0 @@
apiVersion: "cilium.io/v2alpha1"
kind: CiliumBGPPeeringPolicy
metadata:
name: 01-bgp-peering-policy
spec: # CiliumBGPPeeringPolicySpec
virtualRouters: # []CiliumBGPVirtualRouter
- localASN: {{ cilium_bgp_my_asn }}
exportPodCIDR: {{ cilium_exportPodCIDR | default('true') }}
neighbors: # []CiliumBGPNeighbor
{% if _cilium_bgp_neighbors | length > 0 %}
{% for item in _cilium_bgp_neighbors %}
- peerAddress: '{{ item.peer_address + "/32"}}'
peerASN: {{ item.peer_asn }}
eBGPMultihopTTL: 10
connectRetryTimeSeconds: 120
holdTimeSeconds: 90
keepAliveTimeSeconds: 30
gracefulRestart:
enabled: true
restartTimeSeconds: 120
{% endfor %}
{% else %}
- peerAddress: '{{ cilium_bgp_peer_address + "/32"}}'
peerASN: {{ cilium_bgp_peer_asn }}
eBGPMultihopTTL: 10
connectRetryTimeSeconds: 120
holdTimeSeconds: 90
keepAliveTimeSeconds: 30
gracefulRestart:
enabled: true
restartTimeSeconds: 120
{% endif %}
serviceSelector:
matchExpressions:
- {key: somekey, operator: NotIn, values: ['never-used-value']}
---
apiVersion: "cilium.io/v2alpha1"
kind: CiliumLoadBalancerIPPool
metadata:
name: "01-lb-pool"
spec:
blocks:
{% if "/" in cilium_bgp_lb_cidr %}
- cidr: {{ cilium_bgp_lb_cidr }}
{% else %}
- start: {{ cilium_bgp_lb_cidr.split('-')[0] }}
stop: {{ cilium_bgp_lb_cidr.split('-')[1] }}
{% endif %}

View File

@@ -1,6 +1,5 @@
--- ---
- name: Reboot server - name: Reboot server
become: true become: true
ansible.builtin.reboot: reboot:
reboot_command: "{{ custom_reboot_command | default(omit) }}"
listen: reboot server listen: reboot server

View File

@@ -1,8 +0,0 @@
---
argument_specs:
main:
short_description: Configure LXC
options:
custom_reboot_command:
default: ~
description: Command to run on reboot

View File

@@ -1,20 +1,20 @@
--- ---
- name: Check for rc.local file - name: Check for rc.local file
ansible.builtin.stat: stat:
path: /etc/rc.local path: /etc/rc.local
register: rcfile register: rcfile
- name: Create rc.local if needed - name: Create rc.local if needed
ansible.builtin.lineinfile: lineinfile:
path: /etc/rc.local path: /etc/rc.local
line: "#!/bin/sh -e" line: "#!/bin/sh -e"
create: true create: true
insertbefore: BOF insertbefore: BOF
mode: u=rwx,g=rx,o=rx mode: "u=rwx,g=rx,o=rx"
when: not rcfile.stat.exists when: not rcfile.stat.exists
- name: Write rc.local file - name: Write rc.local file
ansible.builtin.blockinfile: blockinfile:
path: /etc/rc.local path: /etc/rc.local
content: "{{ lookup('template', 'templates/rc.local.j2') }}" content: "{{ lookup('template', 'templates/rc.local.j2') }}"
state: present state: present

View File

@@ -1,4 +1,4 @@
--- ---
secure_path: secure_path:
RedHat: /sbin:/bin:/usr/sbin:/usr/bin:/usr/local/bin RedHat: '/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/bin'
Suse: /usr/sbin:/usr/bin:/sbin:/bin:/usr/local/bin Suse: '/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/bin'

View File

@@ -1,7 +0,0 @@
---
argument_specs:
main:
short_description: Prerequisites
options:
system_timezone:
description: Timezone to be set on all nodes

View File

@@ -14,7 +14,7 @@
name: net.ipv4.ip_forward name: net.ipv4.ip_forward
value: "1" value: "1"
state: present state: present
reload: true reload: yes
tags: sysctl tags: sysctl
- name: Enable IPv6 forwarding - name: Enable IPv6 forwarding
@@ -22,7 +22,7 @@
name: net.ipv6.conf.all.forwarding name: net.ipv6.conf.all.forwarding
value: "1" value: "1"
state: present state: present
reload: true reload: yes
tags: sysctl tags: sysctl
- name: Enable IPv6 router advertisements - name: Enable IPv6 router advertisements
@@ -30,14 +30,14 @@
name: net.ipv6.conf.all.accept_ra name: net.ipv6.conf.all.accept_ra
value: "2" value: "2"
state: present state: present
reload: true reload: yes
tags: sysctl tags: sysctl
- name: Add br_netfilter to /etc/modules-load.d/ - name: Add br_netfilter to /etc/modules-load.d/
ansible.builtin.copy: copy:
content: br_netfilter content: "br_netfilter"
dest: /etc/modules-load.d/br_netfilter.conf dest: /etc/modules-load.d/br_netfilter.conf
mode: u=rw,g=,o= mode: "u=rw,g=,o="
when: ansible_os_family == "RedHat" when: ansible_os_family == "RedHat"
- name: Load br_netfilter - name: Load br_netfilter
@@ -51,7 +51,7 @@
name: "{{ item }}" name: "{{ item }}"
value: "1" value: "1"
state: present state: present
reload: true reload: yes
when: ansible_os_family == "RedHat" when: ansible_os_family == "RedHat"
loop: loop:
- net.bridge.bridge-nf-call-iptables - net.bridge.bridge-nf-call-iptables
@@ -59,11 +59,11 @@
tags: sysctl tags: sysctl
- name: Add /usr/local/bin to sudo secure_path - name: Add /usr/local/bin to sudo secure_path
ansible.builtin.lineinfile: lineinfile:
line: Defaults secure_path = {{ secure_path[ansible_os_family] }} line: 'Defaults secure_path = {{ secure_path[ansible_os_family] }}'
regexp: Defaults(\s)*secure_path(\s)*= regexp: "Defaults(\\s)*secure_path(\\s)*="
state: present state: present
insertafter: EOF insertafter: EOF
path: /etc/sudoers path: /etc/sudoers
validate: visudo -cf %s validate: 'visudo -cf %s'
when: ansible_os_family in [ "RedHat", "Suse" ] when: ansible_os_family in [ "RedHat", "Suse" ]

View File

@@ -2,12 +2,12 @@
- name: Reboot containers - name: Reboot containers
block: block:
- name: Get container ids from filtered files - name: Get container ids from filtered files
ansible.builtin.set_fact: set_fact:
proxmox_lxc_filtered_ids: >- proxmox_lxc_filtered_ids: >-
{{ proxmox_lxc_filtered_files | map("split", "/") | map("last") | map("split", ".") | map("first") }} {{ proxmox_lxc_filtered_files | map("split", "/") | map("last") | map("split", ".") | map("first") }}
listen: reboot containers listen: reboot containers
- name: Reboot container - name: Reboot container
ansible.builtin.command: pct reboot {{ item }} command: "pct reboot {{ item }}"
loop: "{{ proxmox_lxc_filtered_ids }}" loop: "{{ proxmox_lxc_filtered_ids }}"
changed_when: true changed_when: true
listen: reboot containers listen: reboot containers

View File

@@ -1,9 +0,0 @@
---
argument_specs:
main:
short_description: Proxmox LXC settings
options:
proxmox_lxc_ct_ids:
description: Proxmox container ID list
type: list
required: true

View File

@@ -1,43 +1,44 @@
--- ---
- name: Check for container files that exist on this host - name: Check for container files that exist on this host
ansible.builtin.stat: stat:
path: /etc/pve/lxc/{{ item }}.conf path: "/etc/pve/lxc/{{ item }}.conf"
loop: "{{ proxmox_lxc_ct_ids }}" loop: "{{ proxmox_lxc_ct_ids }}"
register: stat_results register: stat_results
- name: Filter out files that do not exist - name: Filter out files that do not exist
ansible.builtin.set_fact: set_fact:
proxmox_lxc_filtered_files: '{{ stat_results.results | rejectattr("stat.exists", "false") | map(attribute="stat.path") }}' # noqa yaml[line-length] proxmox_lxc_filtered_files:
'{{ stat_results.results | rejectattr("stat.exists", "false") | map(attribute="stat.path") }}'
# https://gist.github.com/triangletodd/02f595cd4c0dc9aac5f7763ca2264185 # https://gist.github.com/triangletodd/02f595cd4c0dc9aac5f7763ca2264185
- name: Ensure lxc config has the right apparmor profile - name: Ensure lxc config has the right apparmor profile
ansible.builtin.lineinfile: lineinfile:
dest: "{{ item }}" dest: "{{ item }}"
regexp: ^lxc.apparmor.profile regexp: "^lxc.apparmor.profile"
line: "lxc.apparmor.profile: unconfined" line: "lxc.apparmor.profile: unconfined"
loop: "{{ proxmox_lxc_filtered_files }}" loop: "{{ proxmox_lxc_filtered_files }}"
notify: reboot containers notify: reboot containers
- name: Ensure lxc config has the right cgroup - name: Ensure lxc config has the right cgroup
ansible.builtin.lineinfile: lineinfile:
dest: "{{ item }}" dest: "{{ item }}"
regexp: ^lxc.cgroup.devices.allow regexp: "^lxc.cgroup.devices.allow"
line: "lxc.cgroup.devices.allow: a" line: "lxc.cgroup.devices.allow: a"
loop: "{{ proxmox_lxc_filtered_files }}" loop: "{{ proxmox_lxc_filtered_files }}"
notify: reboot containers notify: reboot containers
- name: Ensure lxc config has the right cap drop - name: Ensure lxc config has the right cap drop
ansible.builtin.lineinfile: lineinfile:
dest: "{{ item }}" dest: "{{ item }}"
regexp: ^lxc.cap.drop regexp: "^lxc.cap.drop"
line: "lxc.cap.drop: " line: "lxc.cap.drop: "
loop: "{{ proxmox_lxc_filtered_files }}" loop: "{{ proxmox_lxc_filtered_files }}"
notify: reboot containers notify: reboot containers
- name: Ensure lxc config has the right mounts - name: Ensure lxc config has the right mounts
ansible.builtin.lineinfile: lineinfile:
dest: "{{ item }}" dest: "{{ item }}"
regexp: ^lxc.mount.auto regexp: "^lxc.mount.auto"
line: 'lxc.mount.auto: "proc:rw sys:rw"' line: 'lxc.mount.auto: "proc:rw sys:rw"'
loop: "{{ proxmox_lxc_filtered_files }}" loop: "{{ proxmox_lxc_filtered_files }}"
notify: reboot containers notify: reboot containers

View File

@@ -1,5 +1,4 @@
--- ---
- name: Reboot - name: Reboot
ansible.builtin.reboot: reboot:
reboot_command: "{{ custom_reboot_command | default(omit) }}"
listen: reboot listen: reboot

View File

@@ -1,10 +0,0 @@
---
argument_specs:
main:
short_description: Adjust some Raspberry Pi specific requisites
options:
state:
default: present
description:
- Indicates whether the k3s prerequisites for Raspberry Pi should be
- set up (possible values are `present` and `absent`)

View File

@@ -1,51 +1,54 @@
--- ---
- name: Test for raspberry pi /proc/cpuinfo - name: Test for raspberry pi /proc/cpuinfo
ansible.builtin.command: grep -E "Raspberry Pi|BCM2708|BCM2709|BCM2835|BCM2836" /proc/cpuinfo command: grep -E "Raspberry Pi|BCM2708|BCM2709|BCM2835|BCM2836" /proc/cpuinfo
register: grep_cpuinfo_raspberrypi register: grep_cpuinfo_raspberrypi
failed_when: false failed_when: false
changed_when: false changed_when: false
- name: Test for raspberry pi /proc/device-tree/model - name: Test for raspberry pi /proc/device-tree/model
ansible.builtin.command: grep -E "Raspberry Pi" /proc/device-tree/model command: grep -E "Raspberry Pi" /proc/device-tree/model
register: grep_device_tree_model_raspberrypi register: grep_device_tree_model_raspberrypi
failed_when: false failed_when: false
changed_when: false changed_when: false
- name: Set raspberry_pi fact to true - name: Set raspberry_pi fact to true
ansible.builtin.set_fact: set_fact:
raspberry_pi: true raspberry_pi: true
when: grep_cpuinfo_raspberrypi.rc == 0 or grep_device_tree_model_raspberrypi.rc == 0
- name: Set detected_distribution to Raspbian (ARM64 on Raspbian, Debian Buster/Bullseye/Bookworm)
ansible.builtin.set_fact:
detected_distribution: Raspbian
vars:
allowed_descriptions:
- "[Rr]aspbian.*"
- Debian.*buster
- Debian.*bullseye
- Debian.*bookworm
when: when:
- ansible_facts.architecture is search("aarch64") grep_cpuinfo_raspberrypi.rc == 0 or grep_device_tree_model_raspberrypi.rc == 0
- raspberry_pi|default(false)
- ansible_facts.lsb.description|default("") is match(allowed_descriptions | join('|'))
- name: Set detected_distribution to Raspbian (ARM64 on Debian Bookworm) - name: Set detected_distribution to Raspbian
ansible.builtin.set_fact: set_fact:
detected_distribution: Raspbian
when: >
raspberry_pi|default(false) and
( ansible_facts.lsb.id|default("") == "Raspbian" or
ansible_facts.lsb.description|default("") is match("[Rr]aspbian.*") )
- name: Set detected_distribution to Raspbian (ARM64 on Debian Buster)
set_fact:
detected_distribution: Raspbian detected_distribution: Raspbian
when: when:
- ansible_facts.architecture is search("aarch64") - ansible_facts.architecture is search("aarch64")
- raspberry_pi|default(false) - raspberry_pi|default(false)
- ansible_facts.lsb.description|default("") is match("Debian.*bookworm") - ansible_facts.lsb.description|default("") is match("Debian.*buster")
- name: Set detected_distribution_major_version - name: Set detected_distribution_major_version
ansible.builtin.set_fact: set_fact:
detected_distribution_major_version: "{{ ansible_facts.lsb.major_release }}" detected_distribution_major_version: "{{ ansible_facts.lsb.major_release }}"
when: when:
- detected_distribution | default("") == "Raspbian" - detected_distribution | default("") == "Raspbian"
- name: Set detected_distribution to Raspbian (ARM64 on Debian Bullseye)
set_fact:
detected_distribution: Raspbian
when:
- ansible_facts.architecture is search("aarch64")
- raspberry_pi|default(false)
- ansible_facts.lsb.description|default("") is match("Debian.*bullseye")
- name: Execute OS related tasks on the Raspberry Pi - {{ action_ }} - name: Execute OS related tasks on the Raspberry Pi - {{ action_ }}
ansible.builtin.include_tasks: "{{ item }}" include_tasks: "{{ item }}"
with_first_found: with_first_found:
- "{{ action_ }}/{{ detected_distribution }}-{{ detected_distribution_major_version }}.yml" - "{{ action_ }}/{{ detected_distribution }}-{{ detected_distribution_major_version }}.yml"
- "{{ action_ }}/{{ detected_distribution }}.yml" - "{{ action_ }}/{{ detected_distribution }}.yml"

View File

@@ -1,39 +1,19 @@
--- ---
- name: Test for cmdline path
ansible.builtin.stat:
path: /boot/firmware/cmdline.txt
register: boot_cmdline_path
failed_when: false
changed_when: false
- name: Set cmdline path based on Debian version and command result
ansible.builtin.set_fact:
cmdline_path: >-
{{
(
boot_cmdline_path.stat.exists and
ansible_facts.lsb.description | default('') is match('Debian.*(?!(bookworm|sid))')
) | ternary(
'/boot/firmware/cmdline.txt',
'/boot/cmdline.txt'
)
}}
- name: Activating cgroup support - name: Activating cgroup support
ansible.builtin.lineinfile: lineinfile:
path: "{{ cmdline_path }}" path: /boot/cmdline.txt
regexp: ^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$ regexp: '^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$'
line: \1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory line: '\1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory'
backrefs: true backrefs: true
notify: reboot notify: reboot
- name: Install iptables - name: Install iptables
ansible.builtin.apt: apt:
name: iptables name: iptables
state: present state: present
- name: Flush iptables before changing to iptables-legacy - name: Flush iptables before changing to iptables-legacy
ansible.builtin.iptables: iptables:
flush: true flush: true
- name: Changing to iptables-legacy - name: Changing to iptables-legacy

View File

@@ -1,9 +1,9 @@
--- ---
- name: Enable cgroup via boot commandline if not already enabled for Rocky - name: Enable cgroup via boot commandline if not already enabled for Rocky
ansible.builtin.lineinfile: lineinfile:
path: /boot/cmdline.txt path: /boot/cmdline.txt
backrefs: true backrefs: yes
regexp: ^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$ regexp: '^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$'
line: \1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory line: '\1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory'
notify: reboot notify: reboot
when: not ansible_check_mode when: not ansible_check_mode

View File

@@ -1,14 +1,13 @@
--- ---
- name: Enable cgroup via boot commandline if not already enabled for Ubuntu on a Raspberry Pi - name: Enable cgroup via boot commandline if not already enabled for Ubuntu on a Raspberry Pi
ansible.builtin.lineinfile: lineinfile:
path: /boot/firmware/cmdline.txt path: /boot/firmware/cmdline.txt
backrefs: true backrefs: yes
regexp: ^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$ regexp: '^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$'
line: \1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory line: '\1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory'
notify: reboot notify: reboot
- name: Install linux-modules-extra-raspi - name: Install linux-modules-extra-raspi
ansible.builtin.apt: apt:
name: linux-modules-extra-raspi name: linux-modules-extra-raspi
state: present state: present
when: ansible_distribution_version is version('24.04', '<')

View File

@@ -1,6 +1,5 @@
--- ---
- name: Remove linux-modules-extra-raspi - name: Remove linux-modules-extra-raspi
ansible.builtin.apt: apt:
name: linux-modules-extra-raspi name: linux-modules-extra-raspi
state: absent state: absent
when: ansible_distribution_version is version('24.04', '<')

View File

@@ -1,2 +0,0 @@
---
systemd_dir: /etc/systemd/system

View File

@@ -1,8 +0,0 @@
---
argument_specs:
main:
short_description: Reset all nodes
options:
systemd_dir:
description: Path to systemd services
default: /etc/systemd/system

View File

@@ -1,9 +1,9 @@
--- ---
- name: Disable services - name: Disable services
ansible.builtin.systemd: systemd:
name: "{{ item }}" name: "{{ item }}"
state: stopped state: stopped
enabled: false enabled: no
failed_when: false failed_when: false
with_items: with_items:
- k3s - k3s
@@ -12,12 +12,12 @@
- name: RUN pkill -9 -f "k3s/data/[^/]+/bin/containerd-shim-runc" - name: RUN pkill -9 -f "k3s/data/[^/]+/bin/containerd-shim-runc"
register: pkill_containerd_shim_runc register: pkill_containerd_shim_runc
ansible.builtin.command: pkill -9 -f "k3s/data/[^/]+/bin/containerd-shim-runc" command: pkill -9 -f "k3s/data/[^/]+/bin/containerd-shim-runc"
changed_when: pkill_containerd_shim_runc.rc == 0 changed_when: "pkill_containerd_shim_runc.rc == 0"
failed_when: false failed_when: false
- name: Umount k3s filesystems - name: Umount k3s filesystems
ansible.builtin.include_tasks: umount_with_children.yml include_tasks: umount_with_children.yml
with_items: with_items:
- /run/k3s - /run/k3s
- /var/lib/kubelet - /var/lib/kubelet
@@ -30,7 +30,7 @@
loop_var: mounted_fs loop_var: mounted_fs
- name: Remove service files, binaries and data - name: Remove service files, binaries and data
ansible.builtin.file: file:
name: "{{ item }}" name: "{{ item }}"
state: absent state: absent
with_items: with_items:
@@ -45,36 +45,33 @@
- /var/lib/rancher/k3s - /var/lib/rancher/k3s
- /var/lib/rancher/ - /var/lib/rancher/
- /var/lib/cni/ - /var/lib/cni/
- /etc/cni/net.d
- name: Remove K3s http_proxy files - name: Remove K3s http_proxy files
ansible.builtin.file: file:
name: "{{ item }}" name: "{{ item }}"
state: absent state: absent
with_items: with_items:
- "{{ systemd_dir }}/k3s.service.d/http_proxy.conf"
- "{{ systemd_dir }}/k3s.service.d" - "{{ systemd_dir }}/k3s.service.d"
- "{{ systemd_dir }}/k3s-node.service.d/http_proxy.conf"
- "{{ systemd_dir }}/k3s-node.service.d" - "{{ systemd_dir }}/k3s-node.service.d"
when: proxy_env is defined when: proxy_env is defined
- name: Reload daemon_reload - name: Reload daemon_reload
ansible.builtin.systemd: systemd:
daemon_reload: true daemon_reload: yes
- name: Remove tmp directory used for manifests - name: Remove tmp directory used for manifests
ansible.builtin.file: file:
path: /tmp/k3s path: /tmp/k3s
state: absent state: absent
- name: Check if rc.local exists - name: Check if rc.local exists
ansible.builtin.stat: stat:
path: /etc/rc.local path: /etc/rc.local
register: rcfile register: rcfile
- name: Remove rc.local modifications for proxmox lxc containers - name: Remove rc.local modifications for proxmox lxc containers
become: true become: true
ansible.builtin.blockinfile: blockinfile:
path: /etc/rc.local path: /etc/rc.local
content: "{{ lookup('template', 'templates/rc.local.j2') }}" content: "{{ lookup('template', 'templates/rc.local.j2') }}"
create: false create: false
@@ -83,14 +80,14 @@
- name: Check rc.local for cleanup - name: Check rc.local for cleanup
become: true become: true
ansible.builtin.slurp: slurp:
src: /etc/rc.local src: /etc/rc.local
register: rcslurp register: rcslurp
when: proxmox_lxc_configure and rcfile.stat.exists when: proxmox_lxc_configure and rcfile.stat.exists
- name: Cleanup rc.local if we only have a Shebang line - name: Cleanup rc.local if we only have a Shebang line
become: true become: true
ansible.builtin.file: file:
path: /etc/rc.local path: /etc/rc.local
state: absent state: absent
when: proxmox_lxc_configure and rcfile.stat.exists and ((rcslurp.content | b64decode).splitlines() | length) <= 1 when: proxmox_lxc_configure and rcfile.stat.exists and ((rcslurp.content | b64decode).splitlines() | length) <= 1

View File

@@ -1,6 +1,6 @@
--- ---
- name: Get the list of mounted filesystems - name: Get the list of mounted filesystems
ansible.builtin.shell: set -o pipefail && cat /proc/mounts | awk '{ print $2}' | grep -E "^{{ mounted_fs }}" shell: set -o pipefail && cat /proc/mounts | awk '{ print $2}' | grep -E "^{{ mounted_fs }}"
register: get_mounted_filesystems register: get_mounted_filesystems
args: args:
executable: /bin/bash executable: /bin/bash
@@ -12,4 +12,5 @@
ansible.posix.mount: ansible.posix.mount:
path: "{{ item }}" path: "{{ item }}"
state: unmounted state: unmounted
with_items: "{{ get_mounted_filesystems.stdout_lines | reverse | list }}" with_items:
"{{ get_mounted_filesystems.stdout_lines | reverse | list }}"

View File

@@ -1,9 +0,0 @@
---
argument_specs:
main:
short_description: Proxmox LXC settings
options:
proxmox_lxc_ct_ids:
description: Proxmox container ID list
type: list
required: true

View File

@@ -1,45 +1,46 @@
--- ---
- name: Check for container files that exist on this host - name: Check for container files that exist on this host
ansible.builtin.stat: stat:
path: /etc/pve/lxc/{{ item }}.conf path: "/etc/pve/lxc/{{ item }}.conf"
loop: "{{ proxmox_lxc_ct_ids }}" loop: "{{ proxmox_lxc_ct_ids }}"
register: stat_results register: stat_results
- name: Filter out files that do not exist - name: Filter out files that do not exist
ansible.builtin.set_fact: set_fact:
proxmox_lxc_filtered_files: '{{ stat_results.results | rejectattr("stat.exists", "false") | map(attribute="stat.path") }}' # noqa yaml[line-length] proxmox_lxc_filtered_files:
'{{ stat_results.results | rejectattr("stat.exists", "false") | map(attribute="stat.path") }}'
- name: Remove LXC apparmor profile - name: Remove LXC apparmor profile
ansible.builtin.lineinfile: lineinfile:
dest: "{{ item }}" dest: "{{ item }}"
regexp: ^lxc.apparmor.profile regexp: "^lxc.apparmor.profile"
line: "lxc.apparmor.profile: unconfined" line: "lxc.apparmor.profile: unconfined"
state: absent state: absent
loop: "{{ proxmox_lxc_filtered_files }}" loop: "{{ proxmox_lxc_filtered_files }}"
notify: reboot containers notify: reboot containers
- name: Remove lxc cgroups - name: Remove lxc cgroups
ansible.builtin.lineinfile: lineinfile:
dest: "{{ item }}" dest: "{{ item }}"
regexp: ^lxc.cgroup.devices.allow regexp: "^lxc.cgroup.devices.allow"
line: "lxc.cgroup.devices.allow: a" line: "lxc.cgroup.devices.allow: a"
state: absent state: absent
loop: "{{ proxmox_lxc_filtered_files }}" loop: "{{ proxmox_lxc_filtered_files }}"
notify: reboot containers notify: reboot containers
- name: Remove lxc cap drop - name: Remove lxc cap drop
ansible.builtin.lineinfile: lineinfile:
dest: "{{ item }}" dest: "{{ item }}"
regexp: ^lxc.cap.drop regexp: "^lxc.cap.drop"
line: "lxc.cap.drop: " line: "lxc.cap.drop: "
state: absent state: absent
loop: "{{ proxmox_lxc_filtered_files }}" loop: "{{ proxmox_lxc_filtered_files }}"
notify: reboot containers notify: reboot containers
- name: Remove lxc mounts - name: Remove lxc mounts
ansible.builtin.lineinfile: lineinfile:
dest: "{{ item }}" dest: "{{ item }}"
regexp: ^lxc.mount.auto regexp: "^lxc.mount.auto"
line: 'lxc.mount.auto: "proc:rw sys:rw"' line: 'lxc.mount.auto: "proc:rw sys:rw"'
state: absent state: absent
loop: "{{ proxmox_lxc_filtered_files }}" loop: "{{ proxmox_lxc_filtered_files }}"

View File

@@ -1,17 +1,8 @@
--- ---
- name: Pre tasks
hosts: all
pre_tasks:
- name: Verify Ansible is version 2.11 or above. (If this fails you may need to update Ansible)
ansible.builtin.assert:
that: ansible_version.full is version_compare('2.11', '>=')
msg: >
"Ansible is out of date. See here for more info: https://docs.technotim.live/posts/ansible-automation/"
- name: Prepare Proxmox cluster - name: Prepare Proxmox cluster
hosts: proxmox hosts: proxmox
gather_facts: true gather_facts: true
become: true become: yes
environment: "{{ proxy_env | default({}) }}" environment: "{{ proxy_env | default({}) }}"
roles: roles:
- role: proxmox_lxc - role: proxmox_lxc
@@ -19,7 +10,7 @@
- name: Prepare k3s nodes - name: Prepare k3s nodes
hosts: k3s_cluster hosts: k3s_cluster
gather_facts: true gather_facts: yes
environment: "{{ proxy_env | default({}) }}" environment: "{{ proxy_env | default({}) }}"
roles: roles:
- role: lxc - role: lxc
@@ -55,14 +46,3 @@
roles: roles:
- role: k3s_server_post - role: k3s_server_post
become: true become: true
- name: Storing kubeconfig in the playbook directory
hosts: master
environment: "{{ proxy_env | default({}) }}"
tasks:
- name: Copying kubeconfig from {{ hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] }}
ansible.builtin.fetch:
src: "{{ ansible_user_dir }}/.kube/config"
dest: ./kubeconfig
flat: true
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']