mirror of
https://github.com/techno-tim/k3s-ansible.git
synced 2025-12-26 18:52:57 +01:00
Compare commits
97 Commits
v1.27.9+k3
...
be01e1f9ef
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
be01e1f9ef | ||
|
|
d99f6a96f2 | ||
|
|
bd872aecb8 | ||
|
|
fab302fd91 | ||
|
|
eddbcbfb76 | ||
|
|
03ae8de0d5 | ||
|
|
d136fa4486 | ||
|
|
b906cfbf72 | ||
|
|
2c04f38e2c | ||
|
|
3435f43748 | ||
|
|
924a2f528c | ||
|
|
2892ac3858 | ||
|
|
df8e8dd591 | ||
|
|
3a0303d130 | ||
|
|
b077a49e1f | ||
|
|
635f0b21b3 | ||
|
|
4a64ad42df | ||
|
|
d0537736de | ||
|
|
2149827800 | ||
|
|
2d0596209e | ||
|
|
3a20500f9c | ||
|
|
9ce9fecc5b | ||
|
|
668d7fb896 | ||
|
|
6cee0e9051 | ||
|
|
6823ad51d5 | ||
|
|
1a521ea0d9 | ||
|
|
e48bb6df26 | ||
|
|
36893c27fb | ||
|
|
e8cd10d49b | ||
|
|
b86156b995 | ||
|
|
072f1a321d | ||
|
|
2f46a54240 | ||
|
|
bf0418d77f | ||
|
|
d88eb80df0 | ||
|
|
f50d335451 | ||
|
|
d6597150c7 | ||
|
|
353f7ab641 | ||
|
|
c7c727c3dc | ||
|
|
0422bfa2ac | ||
|
|
0333406725 | ||
|
|
f4a19d368b | ||
|
|
02d212c007 | ||
|
|
80095250e9 | ||
|
|
4fe2c92795 | ||
|
|
b3f2a4addc | ||
|
|
cb03ee829e | ||
|
|
9e2e82faeb | ||
|
|
7c1f6cbe42 | ||
|
|
604eb7a6e6 | ||
|
|
a204ed5169 | ||
|
|
b6608ca3e4 | ||
|
|
8252a45dfd | ||
|
|
c99f098c2e | ||
|
|
7867b87d85 | ||
|
|
dfe19f3731 | ||
|
|
a46d97a28d | ||
|
|
dc9d571f17 | ||
|
|
6742551e5c | ||
|
|
fb3478a086 | ||
|
|
518c5bb62a | ||
|
|
3f5d8dfe9f | ||
|
|
efbfadcb93 | ||
|
|
f81ec04ba2 | ||
|
|
8432d3bc66 | ||
|
|
14ae9df1bc | ||
|
|
f175716339 | ||
|
|
955c6f6b4a | ||
|
|
3b74985767 | ||
|
|
9ace193ade | ||
|
|
83a0be3afd | ||
|
|
029eba6102 | ||
|
|
0c8253b3a5 | ||
|
|
326b71dfa2 | ||
|
|
b95d6dd2cc | ||
|
|
e4146b4ca9 | ||
|
|
1fb10faf7f | ||
|
|
ea3b3c776a | ||
|
|
5beca87783 | ||
|
|
6ffc25dfe5 | ||
|
|
bcd37a6904 | ||
|
|
8dd3ffc825 | ||
|
|
f6ba208b5c | ||
|
|
a22d8f7aaf | ||
|
|
05fb6b566d | ||
|
|
3aeb7d69ea | ||
|
|
61bf3971ef | ||
|
|
3f06a11c8d | ||
|
|
3888a29bb1 | ||
|
|
98ef696f31 | ||
|
|
de26a79a4c | ||
|
|
ab7ca9b551 | ||
|
|
c5f71c9e2e | ||
|
|
0f23e7e258 | ||
|
|
121061d875 | ||
|
|
db53f595fd | ||
|
|
7b6b24ce4d | ||
|
|
a5728da35e |
@@ -1,20 +1,21 @@
|
||||
---
|
||||
profile: production
|
||||
exclude_paths:
|
||||
# default paths
|
||||
- '.cache/'
|
||||
- '.github/'
|
||||
- 'test/fixtures/formatting-before/'
|
||||
- 'test/fixtures/formatting-prettier/'
|
||||
- .cache/
|
||||
- .github/
|
||||
- test/fixtures/formatting-before/
|
||||
- test/fixtures/formatting-prettier/
|
||||
|
||||
# The "converge" and "reset" playbooks use import_playbook in
|
||||
# conjunction with the "env" lookup plugin, which lets the
|
||||
# syntax check of ansible-lint fail.
|
||||
- 'molecule/**/converge.yml'
|
||||
- 'molecule/**/prepare.yml'
|
||||
- 'molecule/**/reset.yml'
|
||||
- molecule/**/converge.yml
|
||||
- molecule/**/prepare.yml
|
||||
- molecule/**/reset.yml
|
||||
|
||||
# The file was generated by galaxy ansible - don't mess with it.
|
||||
- 'galaxy.yml'
|
||||
- galaxy.yml
|
||||
|
||||
skip_list:
|
||||
- 'fqcn-builtins'
|
||||
- var-naming[no-role-prefix]
|
||||
|
||||
8
.github/ISSUE_TEMPLATE.md
vendored
8
.github/ISSUE_TEMPLATE.md
vendored
@@ -37,6 +37,11 @@ systemd_dir: ""
|
||||
|
||||
flannel_iface: ""
|
||||
|
||||
#calico_iface: ""
|
||||
calico_ebpf: ""
|
||||
calico_cidr: ""
|
||||
calico_tag: ""
|
||||
|
||||
apiserver_endpoint: ""
|
||||
|
||||
k3s_token: "NA"
|
||||
@@ -46,6 +51,9 @@ extra_agent_args: ""
|
||||
|
||||
kube_vip_tag_version: ""
|
||||
|
||||
kube_vip_cloud_provider_tag_version: ""
|
||||
kube_vip_lb_ip_range: ""
|
||||
|
||||
metal_lb_speaker_tag_version: ""
|
||||
metal_lb_controller_tag_version: ""
|
||||
|
||||
|
||||
17
.github/download-boxes.sh
vendored
17
.github/download-boxes.sh
vendored
@@ -9,12 +9,17 @@ set -euo pipefail
|
||||
GIT_ROOT=$(git rev-parse --show-toplevel)
|
||||
PROVIDER=virtualbox
|
||||
|
||||
# Read all boxes for all platforms from the "molecule.yml" files
|
||||
all_boxes=$(cat "${GIT_ROOT}"/molecule/*/molecule.yml |
|
||||
yq -r '.platforms[].box' | # Read the "box" property of each node under "platforms"
|
||||
grep --invert-match --regexp=--- | # Filter out file separators
|
||||
sort |
|
||||
uniq)
|
||||
yq --version
|
||||
|
||||
# Define the path to the molecule.yml files
|
||||
MOLECULE_YML_PATH="${GIT_ROOT}/molecule/*/molecule.yml"
|
||||
|
||||
# Extract and sort unique boxes from all molecule.yml files
|
||||
all_boxes=$(for file in $MOLECULE_YML_PATH; do
|
||||
yq eval '.platforms[].box' "$file"
|
||||
done | sort -u)
|
||||
|
||||
echo all_boxes: "$all_boxes"
|
||||
|
||||
# Read the boxes that are currently present on the system (for the current provider)
|
||||
present_boxes=$(
|
||||
|
||||
6
.github/workflows/cache.yml
vendored
6
.github/workflows/cache.yml
vendored
@@ -11,19 +11,19 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Check out the codebase
|
||||
uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v3 2.5.0
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # 4.2.0
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
|
||||
- name: Set up Python ${{ env.PYTHON_VERSION }}
|
||||
uses: actions/setup-python@75f3110429a8c05be0e1bf360334e4cced2b63fa # 2.3.3
|
||||
uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # 5.2.0
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
cache: 'pip' # caching pip dependencies
|
||||
|
||||
- name: Cache Vagrant boxes
|
||||
id: cache-vagrant
|
||||
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # 4.0
|
||||
uses: actions/cache@2cdf405574d6ef1f33a1d12acccd3ae82f47b3f2 # 4.1.0
|
||||
with:
|
||||
lookup-only: true #if it exists, we don't need to restore and can skip the next step
|
||||
path: |
|
||||
|
||||
17
.github/workflows/ci.yml
vendored
17
.github/workflows/ci.yml
vendored
@@ -2,11 +2,20 @@
|
||||
name: "CI"
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
types:
|
||||
- opened
|
||||
- synchronize
|
||||
paths-ignore:
|
||||
- '**/README.md'
|
||||
- '**/.gitignore'
|
||||
- '**/FUNDING.yml'
|
||||
- '**/host.ini'
|
||||
- '**/*.md'
|
||||
- '**/.editorconfig'
|
||||
- '**/ansible.example.cfg'
|
||||
- '**/deploy.sh'
|
||||
- '**/LICENSE'
|
||||
- '**/reboot.sh'
|
||||
- '**/reset.sh'
|
||||
jobs:
|
||||
pre:
|
||||
uses: ./.github/workflows/cache.yml
|
||||
|
||||
12
.github/workflows/lint.yml
vendored
12
.github/workflows/lint.yml
vendored
@@ -11,18 +11,18 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Check out the codebase
|
||||
uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v3 2.5.0
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # 4.2.0
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
|
||||
- name: Set up Python ${{ env.PYTHON_VERSION }}
|
||||
uses: actions/setup-python@75f3110429a8c05be0e1bf360334e4cced2b63fa # 2.3.3
|
||||
uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # 5.2.0
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
cache: 'pip' # caching pip dependencies
|
||||
|
||||
- name: Restore Ansible cache
|
||||
uses: actions/cache/restore@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # 4.0
|
||||
uses: actions/cache/restore@2cdf405574d6ef1f33a1d12acccd3ae82f47b3f2 # 4.1.0
|
||||
with:
|
||||
path: ~/.ansible/collections
|
||||
key: ansible-${{ hashFiles('collections/requirements.yml') }}
|
||||
@@ -38,16 +38,16 @@ jobs:
|
||||
echo "::endgroup::"
|
||||
|
||||
- name: Run pre-commit
|
||||
uses: pre-commit/action@646c83fcd040023954eafda54b4db0192ce70507 # 3.0.0
|
||||
uses: pre-commit/action@2c7b3805fd2a0fd8c1884dcaebf91fc102a13ecd # 3.0.1
|
||||
|
||||
ensure-pinned-actions:
|
||||
name: Ensure SHA Pinned Actions
|
||||
runs-on: self-hosted
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v3 2.5.0
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # 4.2.0
|
||||
- name: Ensure SHA pinned actions
|
||||
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@af2eb3226618e2494e3d9084f515ad6dcf16e229 # 2.0.1
|
||||
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@40ba2d51b6b6d8695f2b6bd74e785172d4f8d00f # 3.0.14
|
||||
with:
|
||||
allowlist: |
|
||||
aws-actions/
|
||||
|
||||
43
.github/workflows/test.yml
vendored
43
.github/workflows/test.yml
vendored
@@ -10,18 +10,46 @@ jobs:
|
||||
matrix:
|
||||
scenario:
|
||||
- default
|
||||
- ipv6
|
||||
# - ipv6
|
||||
- single_node
|
||||
- calico
|
||||
- cilium
|
||||
- kube-vip
|
||||
fail-fast: false
|
||||
env:
|
||||
PYTHON_VERSION: "3.11"
|
||||
|
||||
steps:
|
||||
- name: Check out the codebase
|
||||
uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v3 2.5.0
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # 4.2.0
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
|
||||
# these steps are necessary if not using ephemeral nodes
|
||||
- name: Delete old Vagrant box versions
|
||||
if: always() # do this even if a step before has failed
|
||||
run: vagrant box prune --force
|
||||
|
||||
- name: Remove all local Vagrant boxes
|
||||
if: always() # do this even if a step before has failed
|
||||
run: if vagrant box list 2>/dev/null; then vagrant box list | cut -f 1 -d ' ' | xargs -L 1 vagrant box remove -f 2>/dev/null && echo "All Vagrant boxes removed." || echo "No Vagrant boxes found."; else echo "No Vagrant boxes found."; fi
|
||||
|
||||
- name: Remove all Virtualbox VMs
|
||||
if: always() # do this even if a step before has failed
|
||||
run: VBoxManage list vms | awk -F'"' '{print $2}' | xargs -I {} VBoxManage unregistervm --delete "{}"
|
||||
|
||||
- name: Remove all Virtualbox HDs
|
||||
if: always() # do this even if a step before has failed
|
||||
run: VBoxManage list hdds | awk -F':' '/^UUID:/ {print $2}' | xargs -I {} VBoxManage closemedium disk "{}" --delete
|
||||
|
||||
- name: Remove all Virtualbox Networks
|
||||
if: always() # do this even if a step before has failed
|
||||
run: VBoxManage list hostonlyifs | grep '^Name:' | awk '{print $2}' | grep '^vboxnet' | xargs -I {} VBoxManage hostonlyif remove {}
|
||||
|
||||
- name: Remove Virtualbox network config
|
||||
if: always() # do this even if a step before has failed
|
||||
run: sudo rm /etc/vbox/networks.conf || true
|
||||
|
||||
- name: Configure VirtualBox
|
||||
run: |-
|
||||
sudo mkdir -p /etc/vbox
|
||||
@@ -31,13 +59,13 @@ jobs:
|
||||
EOF
|
||||
|
||||
- name: Set up Python ${{ env.PYTHON_VERSION }}
|
||||
uses: actions/setup-python@75f3110429a8c05be0e1bf360334e4cced2b63fa # 2.3.3
|
||||
uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # 5.2.0
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
cache: 'pip' # caching pip dependencies
|
||||
|
||||
- name: Restore vagrant Boxes cache
|
||||
uses: actions/cache/restore@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # 4.0
|
||||
uses: actions/cache/restore@2cdf405574d6ef1f33a1d12acccd3ae82f47b3f2 # 4.1.0
|
||||
with:
|
||||
path: ~/.vagrant.d/boxes
|
||||
key: vagrant-boxes-${{ hashFiles('**/molecule.yml') }}
|
||||
@@ -70,7 +98,7 @@ jobs:
|
||||
|
||||
- name: Remove all local Vagrant boxes
|
||||
if: always() # do this even if a step before has failed
|
||||
run: vagrant box list | cut -f 1 -d ' ' | xargs -L 1 vagrant box remove -f
|
||||
run: if vagrant box list 2>/dev/null; then vagrant box list | cut -f 1 -d ' ' | xargs -L 1 vagrant box remove -f 2>/dev/null && echo "All Vagrant boxes removed." || echo "No Vagrant boxes found."; else echo "No Vagrant boxes found."; fi
|
||||
|
||||
- name: Remove all Virtualbox VMs
|
||||
if: always() # do this even if a step before has failed
|
||||
@@ -86,12 +114,13 @@ jobs:
|
||||
|
||||
- name: Remove Virtualbox network config
|
||||
if: always() # do this even if a step before has failed
|
||||
run: sudo rm /etc/vbox/networks.conf
|
||||
run: sudo rm /etc/vbox/networks.conf || true
|
||||
|
||||
- name: Upload log files
|
||||
if: always() # do this even if a step before has failed
|
||||
uses: actions/upload-artifact@83fd05a356d7e2593de66fc9913b3002723633cb # 3.1.1
|
||||
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # 4.4.0
|
||||
with:
|
||||
name: logs
|
||||
path: |
|
||||
${{ runner.temp }}/logs
|
||||
overwrite: true
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1,3 +1,4 @@
|
||||
.env/
|
||||
*.log
|
||||
ansible.cfg
|
||||
kubeconfig
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: f71fa2c1f9cf5cb705f73dffe4b21f7c61470ba9 # frozen: v4.4.0
|
||||
rev: v4.5.0
|
||||
hooks:
|
||||
- id: requirements-txt-fixer
|
||||
- id: sort-simple-yaml
|
||||
@@ -12,24 +12,24 @@ repos:
|
||||
- id: trailing-whitespace
|
||||
args: [--markdown-linebreak-ext=md]
|
||||
- repo: https://github.com/adrienverge/yamllint.git
|
||||
rev: b05e028c5881819161d11cb543fd96a30c06cceb # frozen: v1.32.0
|
||||
rev: v1.33.0
|
||||
hooks:
|
||||
- id: yamllint
|
||||
args: [-c=.yamllint]
|
||||
- repo: https://github.com/ansible-community/ansible-lint.git
|
||||
rev: 3293b64b939c0de16ef8cb81dd49255e475bf89a # frozen: v6.17.2
|
||||
rev: v6.22.2
|
||||
hooks:
|
||||
- id: ansible-lint
|
||||
- repo: https://github.com/shellcheck-py/shellcheck-py
|
||||
rev: 375289a39f5708101b1f916eb729e8d6da96993f # frozen: v0.9.0.5
|
||||
rev: v0.9.0.6
|
||||
hooks:
|
||||
- id: shellcheck
|
||||
- repo: https://github.com/Lucas-C/pre-commit-hooks
|
||||
rev: 12885e376b93dc4536ad68d156065601e4433665 # frozen: v1.5.1
|
||||
rev: v1.5.4
|
||||
hooks:
|
||||
- id: remove-crlf
|
||||
- id: remove-tabs
|
||||
- repo: https://github.com/sirosen/texthooks
|
||||
rev: c4ffd3e31669dd4fa4d31a23436cc13839730084 # frozen: 0.5.0
|
||||
rev: 0.6.4
|
||||
hooks:
|
||||
- id: fix-smartquotes
|
||||
|
||||
11
.yamllint
11
.yamllint
@@ -2,10 +2,19 @@
|
||||
extends: default
|
||||
|
||||
rules:
|
||||
comments:
|
||||
min-spaces-from-content: 1
|
||||
comments-indentation: false
|
||||
braces:
|
||||
max-spaces-inside: 1
|
||||
octal-values:
|
||||
forbid-implicit-octal: true
|
||||
forbid-explicit-octal: true
|
||||
line-length:
|
||||
max: 120
|
||||
level: warning
|
||||
truthy:
|
||||
allowed-values: ['true', 'false']
|
||||
allowed-values: ["true", "false"]
|
||||
|
||||
ignore:
|
||||
- galaxy.yml
|
||||
|
||||
16
README.md
16
README.md
@@ -96,8 +96,22 @@ ansible-playbook reset.yml -i inventory/my-cluster/hosts.ini
|
||||
To copy your `kube config` locally so that you can access your **Kubernetes** cluster run:
|
||||
|
||||
```bash
|
||||
scp debian@master_ip:~/.kube/config ~/.kube/config
|
||||
scp debian@master_ip:/etc/rancher/k3s/k3s.yaml ~/.kube/config
|
||||
```
|
||||
If you get file Permission denied, go into the node and temporarly run:
|
||||
```bash
|
||||
sudo chmod 777 /etc/rancher/k3s/k3s.yaml
|
||||
```
|
||||
Then copy with the scp command and reset the permissions back to:
|
||||
```bash
|
||||
sudo chmod 600 /etc/rancher/k3s/k3s.yaml
|
||||
```
|
||||
|
||||
You'll then want to modify the config to point to master IP by running:
|
||||
```bash
|
||||
sudo nano ~/.kube/config
|
||||
```
|
||||
Then change `server: https://127.0.0.1:6443` to match your master IP: `server: https://192.168.1.222:6443`
|
||||
|
||||
### 🔨 Testing your cluster
|
||||
|
||||
|
||||
@@ -1,53 +1,103 @@
|
||||
---
|
||||
k3s_version: v1.27.9+k3s1
|
||||
k3s_version: v1.30.2+k3s2
|
||||
# this is the user that has ssh access to these machines
|
||||
ansible_user: ansibleuser
|
||||
systemd_dir: /etc/systemd/system
|
||||
|
||||
# Set your timezone
|
||||
system_timezone: "Your/Timezone"
|
||||
system_timezone: Your/Timezone
|
||||
|
||||
# interface which will be used for flannel
|
||||
flannel_iface: "eth0"
|
||||
flannel_iface: eth0
|
||||
|
||||
# uncomment calico_iface to use tigera operator/calico cni instead of flannel https://docs.tigera.io/calico/latest/about
|
||||
# calico_iface: "eth0"
|
||||
calico_ebpf: false # use eBPF dataplane instead of iptables
|
||||
calico_tag: v3.28.0 # calico version tag
|
||||
|
||||
# uncomment cilium_iface to use cilium cni instead of flannel or calico
|
||||
# ensure v4.19.57, v5.1.16, v5.2.0 or more recent kernel
|
||||
# cilium_iface: "eth0"
|
||||
cilium_mode: native # native when nodes on same subnet or using bgp, else set routed
|
||||
cilium_tag: v1.16.0 # cilium version tag
|
||||
cilium_hubble: true # enable hubble observability relay and ui
|
||||
|
||||
# if using calico or cilium, you may specify the cluster pod cidr pool
|
||||
cluster_cidr: 10.52.0.0/16
|
||||
|
||||
# enable cilium bgp control plane for lb services and pod cidrs. disables metallb.
|
||||
cilium_bgp: false
|
||||
|
||||
# bgp parameters for cilium cni. only active when cilium_iface is defined and cilium_bgp is true.
|
||||
cilium_bgp_my_asn: "64513"
|
||||
cilium_bgp_peer_asn: "64512"
|
||||
cilium_bgp_peer_address: 192.168.30.1
|
||||
cilium_bgp_lb_cidr: 192.168.31.0/24 # cidr for cilium loadbalancer ipam
|
||||
|
||||
# enable kube-vip ARP broadcasts
|
||||
kube_vip_arp: true
|
||||
|
||||
# enable kube-vip BGP peering
|
||||
kube_vip_bgp: false
|
||||
|
||||
# bgp parameters for kube-vip
|
||||
kube_vip_bgp_routerid: "127.0.0.1" # Defines the router ID for the BGP server
|
||||
kube_vip_bgp_as: "64513" # Defines the AS for the BGP server
|
||||
kube_vip_bgp_peeraddress: "192.168.30.1" # Defines the address for the BGP peer
|
||||
kube_vip_bgp_peeras: "64512" # Defines the AS for the BGP peer
|
||||
|
||||
# apiserver_endpoint is virtual ip-address which will be configured on each master
|
||||
apiserver_endpoint: "192.168.30.222"
|
||||
apiserver_endpoint: 192.168.30.222
|
||||
|
||||
# k3s_token is required masters can talk together securely
|
||||
# this token should be alpha numeric only
|
||||
k3s_token: "some-SUPER-DEDEUPER-secret-password"
|
||||
k3s_token: some-SUPER-DEDEUPER-secret-password
|
||||
|
||||
# The IP on which the node is reachable in the cluster.
|
||||
# Here, a sensible default is provided, you can still override
|
||||
# it for each of your hosts, though.
|
||||
k3s_node_ip: '{{ ansible_facts[flannel_iface]["ipv4"]["address"] }}'
|
||||
k3s_node_ip: "{{ ansible_facts[(cilium_iface | default(calico_iface | default(flannel_iface)))]['ipv4']['address'] }}"
|
||||
|
||||
# Disable the taint manually by setting: k3s_master_taint = false
|
||||
k3s_master_taint: "{{ true if groups['node'] | default([]) | length >= 1 else false }}"
|
||||
|
||||
# these arguments are recommended for servers as well as agents:
|
||||
extra_args: >-
|
||||
--flannel-iface={{ flannel_iface }}
|
||||
{{ '--flannel-iface=' + flannel_iface if calico_iface is not defined and cilium_iface is not defined else '' }}
|
||||
--node-ip={{ k3s_node_ip }}
|
||||
|
||||
# change these to your liking, the only required are: --disable servicelb, --tls-san {{ apiserver_endpoint }}
|
||||
# the contents of the if block is also required if using calico or cilium
|
||||
extra_server_args: >-
|
||||
{{ extra_args }}
|
||||
{{ '--node-taint node-role.kubernetes.io/master=true:NoSchedule' if k3s_master_taint else '' }}
|
||||
{% if calico_iface is defined or cilium_iface is defined %}
|
||||
--flannel-backend=none
|
||||
--disable-network-policy
|
||||
--cluster-cidr={{ cluster_cidr | default('10.52.0.0/16') }}
|
||||
{% endif %}
|
||||
--tls-san {{ apiserver_endpoint }}
|
||||
--disable servicelb
|
||||
--disable traefik
|
||||
|
||||
extra_agent_args: >-
|
||||
{{ extra_args }}
|
||||
|
||||
# image tag for kube-vip
|
||||
kube_vip_tag_version: "v0.5.12"
|
||||
kube_vip_tag_version: v0.8.2
|
||||
|
||||
# tag for kube-vip-cloud-provider manifest
|
||||
# kube_vip_cloud_provider_tag_version: "main"
|
||||
|
||||
# kube-vip ip range for load balancer
|
||||
# (uncomment to use kube-vip for services instead of MetalLB)
|
||||
# kube_vip_lb_ip_range: "192.168.30.80-192.168.30.90"
|
||||
|
||||
# metallb type frr or native
|
||||
metal_lb_type: "native"
|
||||
metal_lb_type: native
|
||||
|
||||
# metallb mode layer2 or bgp
|
||||
metal_lb_mode: "layer2"
|
||||
metal_lb_mode: layer2
|
||||
|
||||
# bgp options
|
||||
# metal_lb_bgp_my_asn: "64513"
|
||||
@@ -55,11 +105,11 @@ metal_lb_mode: "layer2"
|
||||
# metal_lb_bgp_peer_address: "192.168.30.1"
|
||||
|
||||
# image tag for metal lb
|
||||
metal_lb_speaker_tag_version: "v0.13.9"
|
||||
metal_lb_controller_tag_version: "v0.13.9"
|
||||
metal_lb_speaker_tag_version: v0.14.8
|
||||
metal_lb_controller_tag_version: v0.14.8
|
||||
|
||||
# metallb ip range for load balancer
|
||||
metal_lb_ip_range: "192.168.30.80-192.168.30.90"
|
||||
metal_lb_ip_range: 192.168.30.80-192.168.30.90
|
||||
|
||||
# Only enable if your nodes are proxmox LXC nodes, make sure to configure your proxmox nodes
|
||||
# in your hosts.ini file.
|
||||
@@ -122,6 +172,10 @@ custom_registries_yaml: |
|
||||
username: yourusername
|
||||
password: yourpassword
|
||||
|
||||
# On some distros like Diet Pi, there is no dbus installed. dbus required by the default reboot command.
|
||||
# Uncomment if you need a custom reboot command
|
||||
# custom_reboot_command: /usr/sbin/shutdown -r now
|
||||
|
||||
# Only enable and configure these if you access the internet through a proxy
|
||||
# proxy_env:
|
||||
# HTTP_PROXY: "http://proxy.domain.local:3128"
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
---
|
||||
ansible_user: '{{ proxmox_lxc_ssh_user }}'
|
||||
ansible_user: "{{ proxmox_lxc_ssh_user }}"
|
||||
|
||||
@@ -13,6 +13,12 @@ We have these scenarios:
|
||||
To save a bit of test time, this cluster is _not_ highly available, it consists of only one control and one worker node.
|
||||
- **single_node**:
|
||||
Very similar to the default scenario, but uses only a single node for all cluster functionality.
|
||||
- **calico**:
|
||||
The same as single node, but uses calico cni instead of flannel.
|
||||
- **cilium**:
|
||||
The same as single node, but uses cilium cni instead of flannel.
|
||||
- **kube-vip**
|
||||
The same as single node, but uses kube-vip as service loadbalancer instead of MetalLB
|
||||
|
||||
## How to execute
|
||||
|
||||
|
||||
49
molecule/calico/molecule.yml
Normal file
49
molecule/calico/molecule.yml
Normal file
@@ -0,0 +1,49 @@
|
||||
---
|
||||
dependency:
|
||||
name: galaxy
|
||||
driver:
|
||||
name: vagrant
|
||||
platforms:
|
||||
- name: control1
|
||||
box: generic/ubuntu2204
|
||||
memory: 4096
|
||||
cpus: 4
|
||||
config_options:
|
||||
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||
# see: https://github.com/chef/bento/issues/1405
|
||||
ssh.username: vagrant
|
||||
ssh.password: vagrant
|
||||
groups:
|
||||
- k3s_cluster
|
||||
- master
|
||||
interfaces:
|
||||
- network_name: private_network
|
||||
ip: 192.168.30.62
|
||||
provisioner:
|
||||
name: ansible
|
||||
env:
|
||||
ANSIBLE_VERBOSITY: 1
|
||||
playbooks:
|
||||
converge: ../resources/converge.yml
|
||||
side_effect: ../resources/reset.yml
|
||||
verify: ../resources/verify.yml
|
||||
inventory:
|
||||
links:
|
||||
group_vars: ../../inventory/sample/group_vars
|
||||
scenario:
|
||||
test_sequence:
|
||||
- dependency
|
||||
- cleanup
|
||||
- destroy
|
||||
- syntax
|
||||
- create
|
||||
- prepare
|
||||
- converge
|
||||
# idempotence is not possible with the playbook in its current form.
|
||||
- verify
|
||||
# We are repurposing side_effect here to test the reset playbook.
|
||||
# This is why we do not run it before verify (which tests the cluster),
|
||||
# but after the verify step.
|
||||
- side_effect
|
||||
- cleanup
|
||||
- destroy
|
||||
16
molecule/calico/overrides.yml
Normal file
16
molecule/calico/overrides.yml
Normal file
@@ -0,0 +1,16 @@
|
||||
---
|
||||
- name: Apply overrides
|
||||
hosts: all
|
||||
tasks:
|
||||
- name: Override host variables
|
||||
ansible.builtin.set_fact:
|
||||
# See:
|
||||
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
|
||||
calico_iface: eth1
|
||||
|
||||
# The test VMs might be a bit slow, so we give them more time to join the cluster:
|
||||
retry_count: 45
|
||||
|
||||
# Make sure that our IP ranges do not collide with those of the other scenarios
|
||||
apiserver_endpoint: 192.168.30.224
|
||||
metal_lb_ip_range: 192.168.30.100-192.168.30.109
|
||||
49
molecule/cilium/molecule.yml
Normal file
49
molecule/cilium/molecule.yml
Normal file
@@ -0,0 +1,49 @@
|
||||
---
|
||||
dependency:
|
||||
name: galaxy
|
||||
driver:
|
||||
name: vagrant
|
||||
platforms:
|
||||
- name: control1
|
||||
box: generic/ubuntu2204
|
||||
memory: 4096
|
||||
cpus: 4
|
||||
config_options:
|
||||
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||
# see: https://github.com/chef/bento/issues/1405
|
||||
ssh.username: vagrant
|
||||
ssh.password: vagrant
|
||||
groups:
|
||||
- k3s_cluster
|
||||
- master
|
||||
interfaces:
|
||||
- network_name: private_network
|
||||
ip: 192.168.30.63
|
||||
provisioner:
|
||||
name: ansible
|
||||
env:
|
||||
ANSIBLE_VERBOSITY: 1
|
||||
playbooks:
|
||||
converge: ../resources/converge.yml
|
||||
side_effect: ../resources/reset.yml
|
||||
verify: ../resources/verify.yml
|
||||
inventory:
|
||||
links:
|
||||
group_vars: ../../inventory/sample/group_vars
|
||||
scenario:
|
||||
test_sequence:
|
||||
- dependency
|
||||
- cleanup
|
||||
- destroy
|
||||
- syntax
|
||||
- create
|
||||
- prepare
|
||||
- converge
|
||||
# idempotence is not possible with the playbook in its current form.
|
||||
- verify
|
||||
# We are repurposing side_effect here to test the reset playbook.
|
||||
# This is why we do not run it before verify (which tests the cluster),
|
||||
# but after the verify step.
|
||||
- side_effect
|
||||
- cleanup
|
||||
- destroy
|
||||
16
molecule/cilium/overrides.yml
Normal file
16
molecule/cilium/overrides.yml
Normal file
@@ -0,0 +1,16 @@
|
||||
---
|
||||
- name: Apply overrides
|
||||
hosts: all
|
||||
tasks:
|
||||
- name: Override host variables
|
||||
ansible.builtin.set_fact:
|
||||
# See:
|
||||
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
|
||||
cilium_iface: eth1
|
||||
|
||||
# The test VMs might be a bit slow, so we give them more time to join the cluster:
|
||||
retry_count: 45
|
||||
|
||||
# Make sure that our IP ranges do not collide with those of the other scenarios
|
||||
apiserver_endpoint: 192.168.30.225
|
||||
metal_lb_ip_range: 192.168.30.110-192.168.30.119
|
||||
@@ -4,7 +4,6 @@ dependency:
|
||||
driver:
|
||||
name: vagrant
|
||||
platforms:
|
||||
|
||||
- name: control1
|
||||
box: generic/ubuntu2204
|
||||
memory: 1024
|
||||
@@ -18,8 +17,8 @@ platforms:
|
||||
config_options:
|
||||
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||
# see: https://github.com/chef/bento/issues/1405
|
||||
ssh.username: "vagrant"
|
||||
ssh.password: "vagrant"
|
||||
ssh.username: vagrant
|
||||
ssh.password: vagrant
|
||||
|
||||
- name: control2
|
||||
box: generic/debian12
|
||||
@@ -56,8 +55,8 @@ platforms:
|
||||
config_options:
|
||||
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||
# see: https://github.com/chef/bento/issues/1405
|
||||
ssh.username: "vagrant"
|
||||
ssh.password: "vagrant"
|
||||
ssh.username: vagrant
|
||||
ssh.password: vagrant
|
||||
|
||||
- name: node2
|
||||
box: generic/rocky9
|
||||
|
||||
@@ -17,8 +17,8 @@ platforms:
|
||||
config_options:
|
||||
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||
# see: https://github.com/chef/bento/issues/1405
|
||||
ssh.username: "vagrant"
|
||||
ssh.password: "vagrant"
|
||||
ssh.username: vagrant
|
||||
ssh.password: vagrant
|
||||
|
||||
- name: control2
|
||||
box: generic/ubuntu2204
|
||||
@@ -33,8 +33,8 @@ platforms:
|
||||
config_options:
|
||||
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||
# see: https://github.com/chef/bento/issues/1405
|
||||
ssh.username: "vagrant"
|
||||
ssh.password: "vagrant"
|
||||
ssh.username: vagrant
|
||||
ssh.password: vagrant
|
||||
|
||||
- name: node1
|
||||
box: generic/ubuntu2204
|
||||
@@ -49,8 +49,8 @@ platforms:
|
||||
config_options:
|
||||
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||
# see: https://github.com/chef/bento/issues/1405
|
||||
ssh.username: "vagrant"
|
||||
ssh.password: "vagrant"
|
||||
ssh.username: vagrant
|
||||
ssh.password: vagrant
|
||||
provisioner:
|
||||
name: ansible
|
||||
env:
|
||||
|
||||
@@ -38,7 +38,7 @@
|
||||
dest: /etc/netplan/55-flannel-ipv4.yaml
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
mode: "0644"
|
||||
register: netplan_template
|
||||
|
||||
- name: Apply netplan configuration
|
||||
|
||||
49
molecule/kube-vip/molecule.yml
Normal file
49
molecule/kube-vip/molecule.yml
Normal file
@@ -0,0 +1,49 @@
|
||||
---
|
||||
dependency:
|
||||
name: galaxy
|
||||
driver:
|
||||
name: vagrant
|
||||
platforms:
|
||||
- name: control1
|
||||
box: generic/ubuntu2204
|
||||
memory: 4096
|
||||
cpus: 4
|
||||
config_options:
|
||||
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||
# see: https://github.com/chef/bento/issues/1405
|
||||
ssh.username: vagrant
|
||||
ssh.password: vagrant
|
||||
groups:
|
||||
- k3s_cluster
|
||||
- master
|
||||
interfaces:
|
||||
- network_name: private_network
|
||||
ip: 192.168.30.62
|
||||
provisioner:
|
||||
name: ansible
|
||||
env:
|
||||
ANSIBLE_VERBOSITY: 1
|
||||
playbooks:
|
||||
converge: ../resources/converge.yml
|
||||
side_effect: ../resources/reset.yml
|
||||
verify: ../resources/verify.yml
|
||||
inventory:
|
||||
links:
|
||||
group_vars: ../../inventory/sample/group_vars
|
||||
scenario:
|
||||
test_sequence:
|
||||
- dependency
|
||||
- cleanup
|
||||
- destroy
|
||||
- syntax
|
||||
- create
|
||||
- prepare
|
||||
- converge
|
||||
# idempotence is not possible with the playbook in its current form.
|
||||
- verify
|
||||
# We are repurposing side_effect here to test the reset playbook.
|
||||
# This is why we do not run it before verify (which tests the cluster),
|
||||
# but after the verify step.
|
||||
- side_effect
|
||||
- cleanup
|
||||
- destroy
|
||||
17
molecule/kube-vip/overrides.yml
Normal file
17
molecule/kube-vip/overrides.yml
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
- name: Apply overrides
|
||||
hosts: all
|
||||
tasks:
|
||||
- name: Override host variables
|
||||
ansible.builtin.set_fact:
|
||||
# See:
|
||||
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
|
||||
flannel_iface: eth1
|
||||
|
||||
# The test VMs might be a bit slow, so we give them more time to join the cluster:
|
||||
retry_count: 45
|
||||
|
||||
# Make sure that our IP ranges do not collide with those of the other scenarios
|
||||
apiserver_endpoint: 192.168.30.225
|
||||
# Use kube-vip instead of MetalLB
|
||||
kube_vip_lb_ip_range: 192.168.30.110-192.168.30.119
|
||||
@@ -27,7 +27,7 @@
|
||||
name: nginx
|
||||
namespace: "{{ testing_namespace }}"
|
||||
kubeconfig: "{{ kubecfg_path }}"
|
||||
vars: &load_balancer_metadata
|
||||
vars:
|
||||
metallb_ip: status.loadBalancer.ingress[0].ip
|
||||
metallb_port: spec.ports[0].port
|
||||
register: nginx_services
|
||||
@@ -43,10 +43,10 @@
|
||||
{{ nginx_services.resources[0].status.loadBalancer.ingress[0].ip }}
|
||||
port_: >-
|
||||
{{ nginx_services.resources[0].spec.ports[0].port }}
|
||||
# Deactivated linter rules:
|
||||
# - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap
|
||||
# would be undefined. This will not be the case during playbook execution.
|
||||
# noqa jinja[invalid]
|
||||
# Deactivated linter rules:
|
||||
# - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap
|
||||
# would be undefined. This will not be the case during playbook execution.
|
||||
# noqa jinja[invalid]
|
||||
|
||||
always:
|
||||
- name: "Remove namespace: {{ testing_namespace }}"
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
ansible.builtin.assert:
|
||||
that: found_nodes == expected_nodes
|
||||
success_msg: "Found nodes as expected: {{ found_nodes }}"
|
||||
fail_msg: "Expected nodes {{ expected_nodes }}, but found nodes {{ found_nodes }}"
|
||||
fail_msg: Expected nodes {{ expected_nodes }}, but found nodes {{ found_nodes }}
|
||||
vars:
|
||||
found_nodes: >-
|
||||
{{ cluster_nodes | json_query('resources[*].metadata.name') | unique | sort }}
|
||||
@@ -22,7 +22,7 @@
|
||||
| unique
|
||||
| sort
|
||||
}}
|
||||
# Deactivated linter rules:
|
||||
# Deactivated linter rules:
|
||||
# - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap
|
||||
# would be undefined. This will not be the case during playbook execution.
|
||||
# noqa jinja[invalid]
|
||||
|
||||
@@ -11,8 +11,8 @@ platforms:
|
||||
config_options:
|
||||
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||
# see: https://github.com/chef/bento/issues/1405
|
||||
ssh.username: "vagrant"
|
||||
ssh.password: "vagrant"
|
||||
ssh.username: vagrant
|
||||
ssh.password: vagrant
|
||||
groups:
|
||||
- k3s_cluster
|
||||
- master
|
||||
|
||||
@@ -12,5 +12,5 @@
|
||||
retry_count: 45
|
||||
|
||||
# Make sure that our IP ranges do not collide with those of the default scenario
|
||||
apiserver_endpoint: "192.168.30.223"
|
||||
metal_lb_ip_range: "192.168.30.91-192.168.30.99"
|
||||
apiserver_endpoint: 192.168.30.223
|
||||
metal_lb_ip_range: 192.168.30.91-192.168.30.99
|
||||
|
||||
@@ -5,5 +5,6 @@
|
||||
tasks:
|
||||
- name: Reboot the nodes (and Wait upto 5 mins max)
|
||||
become: true
|
||||
reboot:
|
||||
ansible.builtin.reboot:
|
||||
reboot_command: "{{ custom_reboot_command | default(omit) }}"
|
||||
reboot_timeout: 300
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
#
|
||||
ansible-compat==4.1.11
|
||||
# via molecule
|
||||
ansible-core==2.16.2
|
||||
ansible-core==2.17.4
|
||||
# via
|
||||
# -r requirements.in
|
||||
# ansible-compat
|
||||
@@ -77,7 +77,7 @@ molecule==6.0.3
|
||||
# via
|
||||
# -r requirements.in
|
||||
# molecule-plugins
|
||||
molecule-plugins[vagrant]==23.5.0
|
||||
molecule-plugins[vagrant]==23.5.3
|
||||
# via -r requirements.in
|
||||
netaddr==0.10.1
|
||||
# via -r requirements.in
|
||||
@@ -96,9 +96,9 @@ platformdirs==4.1.0
|
||||
# via virtualenv
|
||||
pluggy==1.3.0
|
||||
# via molecule
|
||||
pre-commit==3.6.0
|
||||
pre-commit==3.8.0
|
||||
# via -r requirements.in
|
||||
pre-commit-hooks==4.5.0
|
||||
pre-commit-hooks==4.6.0
|
||||
# via -r requirements.in
|
||||
pyasn1==0.5.1
|
||||
# via
|
||||
@@ -114,7 +114,7 @@ python-dateutil==2.8.2
|
||||
# via kubernetes
|
||||
python-vagrant==1.0.0
|
||||
# via molecule-plugins
|
||||
pyyaml==6.0.1
|
||||
pyyaml==6.0.2
|
||||
# via
|
||||
# -r requirements.in
|
||||
# ansible-compat
|
||||
|
||||
@@ -7,11 +7,12 @@
|
||||
become: true
|
||||
- role: raspberrypi
|
||||
become: true
|
||||
vars: {state: absent}
|
||||
vars: { state: absent }
|
||||
post_tasks:
|
||||
- name: Reboot and wait for node to come back up
|
||||
become: true
|
||||
reboot:
|
||||
ansible.builtin.reboot:
|
||||
reboot_command: "{{ custom_reboot_command | default(omit) }}"
|
||||
reboot_timeout: 3600
|
||||
|
||||
- name: Revert changes to Proxmox cluster
|
||||
|
||||
8
roles/download/meta/main.yml
Normal file
8
roles/download/meta/main.yml
Normal file
@@ -0,0 +1,8 @@
|
||||
---
|
||||
argument_specs:
|
||||
main:
|
||||
short_description: Manage the downloading of K3S binaries
|
||||
options:
|
||||
k3s_version:
|
||||
description: The desired version of K3S
|
||||
required: true
|
||||
@@ -1,36 +1,34 @@
|
||||
---
|
||||
|
||||
- name: Download k3s binary x64
|
||||
get_url:
|
||||
ansible.builtin.get_url:
|
||||
url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s
|
||||
checksum: sha256:https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/sha256sum-amd64.txt
|
||||
dest: /usr/local/bin/k3s
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0755
|
||||
mode: "0755"
|
||||
when: ansible_facts.architecture == "x86_64"
|
||||
|
||||
- name: Download k3s binary arm64
|
||||
get_url:
|
||||
ansible.builtin.get_url:
|
||||
url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s-arm64
|
||||
checksum: sha256:https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/sha256sum-arm64.txt
|
||||
dest: /usr/local/bin/k3s
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0755
|
||||
mode: "0755"
|
||||
when:
|
||||
- ( ansible_facts.architecture is search("arm") and
|
||||
ansible_facts.userspace_bits == "64" ) or
|
||||
ansible_facts.architecture is search("aarch64")
|
||||
- ( ansible_facts.architecture is search("arm") and ansible_facts.userspace_bits == "64" )
|
||||
or ansible_facts.architecture is search("aarch64")
|
||||
|
||||
- name: Download k3s binary armhf
|
||||
get_url:
|
||||
ansible.builtin.get_url:
|
||||
url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s-armhf
|
||||
checksum: sha256:https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/sha256sum-arm.txt
|
||||
dest: /usr/local/bin/k3s
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0755
|
||||
mode: "0755"
|
||||
when:
|
||||
- ansible_facts.architecture is search("arm")
|
||||
- ansible_facts.userspace_bits == "32"
|
||||
|
||||
4
roles/k3s_agent/defaults/main.yml
Normal file
4
roles/k3s_agent/defaults/main.yml
Normal file
@@ -0,0 +1,4 @@
|
||||
---
|
||||
extra_agent_args: ""
|
||||
group_name_master: master
|
||||
systemd_dir: /etc/systemd/system
|
||||
34
roles/k3s_agent/meta/main.yml
Normal file
34
roles/k3s_agent/meta/main.yml
Normal file
@@ -0,0 +1,34 @@
|
||||
---
|
||||
argument_specs:
|
||||
main:
|
||||
short_description: Setup k3s agents
|
||||
options:
|
||||
apiserver_endpoint:
|
||||
description: Virtual ip-address configured on each master
|
||||
required: true
|
||||
|
||||
extra_agent_args:
|
||||
description: Extra arguments for agents nodes
|
||||
|
||||
group_name_master:
|
||||
description: Name of the master group
|
||||
default: master
|
||||
|
||||
k3s_token:
|
||||
description: Token used to communicate between masters
|
||||
|
||||
proxy_env:
|
||||
type: dict
|
||||
description: Internet proxy configurations
|
||||
default: ~
|
||||
options:
|
||||
HTTP_PROXY:
|
||||
required: true
|
||||
HTTPS_PROXY:
|
||||
required: true
|
||||
NO_PROXY:
|
||||
required: true
|
||||
|
||||
systemd_dir:
|
||||
description: Path to systemd services
|
||||
default: /etc/systemd/system
|
||||
@@ -1,18 +1,18 @@
|
||||
---
|
||||
|
||||
- name: Create k3s-node.service.d directory
|
||||
file:
|
||||
path: '{{ systemd_dir }}/k3s-node.service.d'
|
||||
ansible.builtin.file:
|
||||
path: "{{ systemd_dir }}/k3s-node.service.d"
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0755'
|
||||
|
||||
mode: "0755"
|
||||
when: proxy_env is defined
|
||||
|
||||
- name: Copy K3s http_proxy conf file
|
||||
template:
|
||||
src: "http_proxy.conf.j2"
|
||||
ansible.builtin.template:
|
||||
src: http_proxy.conf.j2
|
||||
dest: "{{ systemd_dir }}/k3s-node.service.d/http_proxy.conf"
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0755'
|
||||
mode: "0755"
|
||||
when: proxy_env is defined
|
||||
|
||||
@@ -1,19 +1,35 @@
|
||||
---
|
||||
- name: Check for PXE-booted system
|
||||
block:
|
||||
- name: Check if system is PXE-booted
|
||||
ansible.builtin.command:
|
||||
cmd: cat /proc/cmdline
|
||||
register: boot_cmdline
|
||||
changed_when: false
|
||||
check_mode: false
|
||||
|
||||
- name: Set fact for PXE-booted system
|
||||
ansible.builtin.set_fact:
|
||||
is_pxe_booted: "{{ 'root=/dev/nfs' in boot_cmdline.stdout }}"
|
||||
when: boot_cmdline.stdout is defined
|
||||
|
||||
- name: Include http_proxy configuration tasks
|
||||
ansible.builtin.include_tasks: http_proxy.yml
|
||||
|
||||
- name: Deploy K3s http_proxy conf
|
||||
include_tasks: http_proxy.yml
|
||||
ansible.builtin.include_tasks: http_proxy.yml
|
||||
when: proxy_env is defined
|
||||
|
||||
- name: Copy K3s service file
|
||||
template:
|
||||
src: "k3s.service.j2"
|
||||
- name: Configure the k3s service
|
||||
ansible.builtin.template:
|
||||
src: k3s.service.j2
|
||||
dest: "{{ systemd_dir }}/k3s-node.service"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0755
|
||||
mode: "0755"
|
||||
|
||||
- name: Enable and check K3s service
|
||||
systemd:
|
||||
- name: Manage k3s service
|
||||
ansible.builtin.systemd:
|
||||
name: k3s-node
|
||||
daemon_reload: true
|
||||
state: restarted
|
||||
|
||||
@@ -7,11 +7,14 @@ After=network-online.target
|
||||
Type=notify
|
||||
ExecStartPre=-/sbin/modprobe br_netfilter
|
||||
ExecStartPre=-/sbin/modprobe overlay
|
||||
ExecStart=/usr/local/bin/k3s agent --server https://{{ apiserver_endpoint | ansible.utils.ipwrap }}:6443 --token {{ hostvars[groups[group_name_master | default('master')][0]]['token'] | default(k3s_token) }} {{ extra_agent_args | default("") }}
|
||||
# Conditional snapshotter based on PXE boot status
|
||||
ExecStart=/usr/local/bin/k3s agent \
|
||||
--server https://{{ apiserver_endpoint | ansible.utils.ipwrap }}:6443 \
|
||||
{% if is_pxe_booted | default(false) %}--snapshotter native \
|
||||
{% endif %}--token {{ hostvars[groups[group_name_master | default('master')][0]]['token'] | default(k3s_token) }} \
|
||||
{{ extra_agent_args }}
|
||||
KillMode=process
|
||||
Delegate=yes
|
||||
# Having non-zero Limit*s causes performance problems due to accounting overhead
|
||||
# in the kernel. We recommend using cgroups to do container-local accounting.
|
||||
LimitNOFILE=1048576
|
||||
LimitNPROC=infinity
|
||||
LimitCORE=infinity
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
---
|
||||
# Indicates whether custom registries for k3s should be configured
|
||||
# Possible values:
|
||||
# - present
|
||||
# - absent
|
||||
state: present
|
||||
20
roles/k3s_custom_registries/meta/main.yml
Normal file
20
roles/k3s_custom_registries/meta/main.yml
Normal file
@@ -0,0 +1,20 @@
|
||||
---
|
||||
argument_specs:
|
||||
main:
|
||||
short_description: Configure the use of a custom container registry
|
||||
options:
|
||||
custom_registries_yaml:
|
||||
description:
|
||||
- YAML block defining custom registries.
|
||||
- >
|
||||
The following is an example that pulls all images used in
|
||||
this playbook through your private registries.
|
||||
- >
|
||||
It also allows you to pull your own images from your private
|
||||
registry, without having to use imagePullSecrets in your
|
||||
deployments.
|
||||
- >
|
||||
If all you need is your own images and you don't care about
|
||||
caching the docker/quay/ghcr.io images, you can just remove
|
||||
those from the mirrors: section.
|
||||
required: true
|
||||
@@ -1,17 +1,16 @@
|
||||
---
|
||||
|
||||
- name: Create directory /etc/rancher/k3s
|
||||
file:
|
||||
path: "/etc/{{ item }}"
|
||||
ansible.builtin.file:
|
||||
path: /etc/{{ item }}
|
||||
state: directory
|
||||
mode: '0755'
|
||||
mode: "0755"
|
||||
loop:
|
||||
- rancher
|
||||
- rancher/k3s
|
||||
|
||||
- name: Insert registries into /etc/rancher/k3s/registries.yaml
|
||||
blockinfile:
|
||||
ansible.builtin.blockinfile:
|
||||
path: /etc/rancher/k3s/registries.yaml
|
||||
block: "{{ custom_registries_yaml }}"
|
||||
mode: '0600'
|
||||
mode: "0600"
|
||||
create: true
|
||||
|
||||
@@ -1,12 +1,27 @@
|
||||
---
|
||||
# If you want to explicitly define an interface that ALL control nodes
|
||||
# should use to propagate the VIP, define it here. Otherwise, kube-vip
|
||||
# will determine the right interface automatically at runtime.
|
||||
kube_vip_iface: null
|
||||
extra_server_args: ""
|
||||
|
||||
k3s_kubectl_binary: k3s kubectl
|
||||
|
||||
# Name of the master group
|
||||
group_name_master: master
|
||||
|
||||
kube_vip_arp: true
|
||||
kube_vip_iface:
|
||||
kube_vip_cloud_provider_tag_version: main
|
||||
kube_vip_tag_version: v0.7.2
|
||||
|
||||
kube_vip_bgp: false
|
||||
kube_vip_bgp_routerid: 127.0.0.1
|
||||
kube_vip_bgp_as: "64513"
|
||||
kube_vip_bgp_peeraddress: 192.168.30.1
|
||||
kube_vip_bgp_peeras: "64512"
|
||||
|
||||
metal_lb_controller_tag_version: v0.14.3
|
||||
metal_lb_speaker_tag_version: v0.14.3
|
||||
metal_lb_type: native
|
||||
|
||||
retry_count: 20
|
||||
|
||||
# yamllint disable rule:line-length
|
||||
server_init_args: >-
|
||||
{% if groups[group_name_master | default('master')] | length > 1 %}
|
||||
@@ -17,4 +32,6 @@ server_init_args: >-
|
||||
{% endif %}
|
||||
--token {{ k3s_token }}
|
||||
{% endif %}
|
||||
{{ extra_server_args | default('') }}
|
||||
{{ extra_server_args }}
|
||||
|
||||
systemd_dir: /etc/systemd/system
|
||||
|
||||
121
roles/k3s_server/meta/main.yml
Normal file
121
roles/k3s_server/meta/main.yml
Normal file
@@ -0,0 +1,121 @@
|
||||
---
|
||||
argument_specs:
|
||||
main:
|
||||
short_description: Setup k3s servers
|
||||
options:
|
||||
apiserver_endpoint:
|
||||
description: Virtual ip-address configured on each master
|
||||
required: true
|
||||
|
||||
cilium_bgp:
|
||||
description:
|
||||
- Enable cilium BGP control plane for LB services and pod cidrs.
|
||||
- Disables the use of MetalLB.
|
||||
type: bool
|
||||
default: ~
|
||||
|
||||
cilium_iface:
|
||||
description: The network interface used for when Cilium is enabled
|
||||
default: ~
|
||||
|
||||
extra_server_args:
|
||||
description: Extra arguments for server nodes
|
||||
default: ""
|
||||
|
||||
group_name_master:
|
||||
description: Name of the master group
|
||||
default: master
|
||||
|
||||
k3s_create_kubectl_symlink:
|
||||
description: Create the kubectl -> k3s symlink
|
||||
default: false
|
||||
type: bool
|
||||
|
||||
k3s_create_crictl_symlink:
|
||||
description: Create the crictl -> k3s symlink
|
||||
default: false
|
||||
type: bool
|
||||
|
||||
kube_vip_arp:
|
||||
description: Enables kube-vip ARP broadcasts
|
||||
default: true
|
||||
type: bool
|
||||
|
||||
kube_vip_bgp:
|
||||
description: Enables kube-vip BGP peering
|
||||
default: false
|
||||
type: bool
|
||||
|
||||
kube_vip_bgp_routerid:
|
||||
description: Defines the router ID for the kube-vip BGP server
|
||||
default: "127.0.0.1"
|
||||
|
||||
kube_vip_bgp_as:
|
||||
description: Defines the AS for the kube-vip BGP server
|
||||
default: "64513"
|
||||
|
||||
kube_vip_bgp_peeraddress:
|
||||
description: Defines the address for the kube-vip BGP peer
|
||||
default: "192.168.30.1"
|
||||
|
||||
kube_vip_bgp_peeras:
|
||||
description: Defines the AS for the kube-vip BGP peer
|
||||
default: "64512"
|
||||
|
||||
kube_vip_iface:
|
||||
description:
|
||||
- Explicitly define an interface that ALL control nodes
|
||||
- should use to propagate the VIP, define it here.
|
||||
- Otherwise, kube-vip will determine the right interface
|
||||
- automatically at runtime.
|
||||
default: ~
|
||||
|
||||
kube_vip_tag_version:
|
||||
description: Image tag for kube-vip
|
||||
default: v0.7.2
|
||||
|
||||
kube_vip_cloud_provider_tag_version:
|
||||
description: Tag for kube-vip-cloud-provider manifest when enabled
|
||||
default: main
|
||||
|
||||
kube_vip_lb_ip_range:
|
||||
description: IP range for kube-vip load balancer
|
||||
default: ~
|
||||
|
||||
metal_lb_controller_tag_version:
|
||||
description: Image tag for MetalLB
|
||||
default: v0.14.3
|
||||
|
||||
metal_lb_speaker_tag_version:
|
||||
description: Image tag for MetalLB
|
||||
default: v0.14.3
|
||||
|
||||
metal_lb_type:
|
||||
choices:
|
||||
- frr
|
||||
- native
|
||||
default: native
|
||||
|
||||
proxy_env:
|
||||
type: dict
|
||||
description: Internet proxy configurations
|
||||
default: ~
|
||||
options:
|
||||
HTTP_PROXY:
|
||||
required: true
|
||||
HTTPS_PROXY:
|
||||
required: true
|
||||
NO_PROXY:
|
||||
required: true
|
||||
|
||||
retry_count:
|
||||
description: Amount of retries when verifying that nodes joined
|
||||
type: int
|
||||
default: 20
|
||||
|
||||
server_init_args:
|
||||
description: Arguments for server nodes
|
||||
|
||||
systemd_dir:
|
||||
description: Path to systemd services
|
||||
default: /etc/systemd/system
|
||||
@@ -23,6 +23,6 @@
|
||||
ansible.builtin.template:
|
||||
src: content.j2
|
||||
dest: "{{ log_destination }}/k3s-init@{{ ansible_hostname }}.log"
|
||||
mode: 0644
|
||||
mode: "0644"
|
||||
vars:
|
||||
content: "{{ k3s_init_log.stdout }}"
|
||||
|
||||
@@ -1,18 +1,16 @@
|
||||
---
|
||||
|
||||
- name: Create k3s.service.d directory
|
||||
file:
|
||||
path: '{{ systemd_dir }}/k3s.service.d'
|
||||
ansible.builtin.file:
|
||||
path: "{{ systemd_dir }}/k3s.service.d"
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0755'
|
||||
|
||||
mode: "0755"
|
||||
|
||||
- name: Copy K3s http_proxy conf file
|
||||
template:
|
||||
src: "http_proxy.conf.j2"
|
||||
ansible.builtin.template:
|
||||
src: http_proxy.conf.j2
|
||||
dest: "{{ systemd_dir }}/k3s.service.d/http_proxy.conf"
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0755'
|
||||
mode: "0755"
|
||||
|
||||
27
roles/k3s_server/tasks/kube-vip.yml
Normal file
27
roles/k3s_server/tasks/kube-vip.yml
Normal file
@@ -0,0 +1,27 @@
|
||||
---
|
||||
- name: Create manifests directory on first master
|
||||
ansible.builtin.file:
|
||||
path: /var/lib/rancher/k3s/server/manifests
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||
|
||||
- name: Download vip cloud provider manifest to first master
|
||||
ansible.builtin.get_url:
|
||||
url: https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/{{ kube_vip_cloud_provider_tag_version | default('main') }}/manifest/kube-vip-cloud-controller.yaml # noqa yaml[line-length]
|
||||
dest: /var/lib/rancher/k3s/server/manifests/kube-vip-cloud-controller.yaml
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||
|
||||
- name: Copy kubevip configMap manifest to first master
|
||||
ansible.builtin.template:
|
||||
src: kubevip.yaml.j2
|
||||
dest: /var/lib/rancher/k3s/server/manifests/kubevip.yaml
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||
@@ -1,166 +1,169 @@
|
||||
---
|
||||
|
||||
- name: Stop k3s-init
|
||||
systemd:
|
||||
ansible.builtin.systemd:
|
||||
name: k3s-init
|
||||
state: stopped
|
||||
failed_when: false
|
||||
|
||||
# k3s-init won't work if the port is already in use
|
||||
- name: Stop k3s
|
||||
systemd:
|
||||
ansible.builtin.systemd:
|
||||
name: k3s
|
||||
state: stopped
|
||||
failed_when: false
|
||||
|
||||
- name: Clean previous runs of k3s-init # noqa command-instead-of-module
|
||||
- name: Clean previous runs of k3s-init # noqa command-instead-of-module
|
||||
# The systemd module does not support "reset-failed", so we need to resort to command.
|
||||
command: systemctl reset-failed k3s-init
|
||||
ansible.builtin.command: systemctl reset-failed k3s-init
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
|
||||
- name: Deploy K3s http_proxy conf
|
||||
include_tasks: http_proxy.yml
|
||||
ansible.builtin.include_tasks: http_proxy.yml
|
||||
when: proxy_env is defined
|
||||
|
||||
- name: Deploy vip manifest
|
||||
include_tasks: vip.yml
|
||||
|
||||
ansible.builtin.include_tasks: vip.yml
|
||||
- name: Deploy metallb manifest
|
||||
include_tasks: metallb.yml
|
||||
ansible.builtin.include_tasks: metallb.yml
|
||||
tags: metallb
|
||||
when: kube_vip_lb_ip_range is not defined and (not cilium_bgp or cilium_iface is not defined)
|
||||
|
||||
- name: Deploy kube-vip manifest
|
||||
ansible.builtin.include_tasks: kube-vip.yml
|
||||
tags: kubevip
|
||||
when: kube_vip_lb_ip_range is defined
|
||||
|
||||
- name: Init cluster inside the transient k3s-init service
|
||||
command:
|
||||
cmd: "systemd-run -p RestartSec=2 \
|
||||
-p Restart=on-failure \
|
||||
--unit=k3s-init \
|
||||
k3s server {{ server_init_args }}"
|
||||
ansible.builtin.command:
|
||||
cmd: systemd-run -p RestartSec=2 -p Restart=on-failure --unit=k3s-init k3s server {{ server_init_args }}
|
||||
creates: "{{ systemd_dir }}/k3s-init.service"
|
||||
|
||||
- name: Verification
|
||||
when: not ansible_check_mode
|
||||
block:
|
||||
- name: Verify that all nodes actually joined (check k3s-init.service if this fails)
|
||||
command:
|
||||
cmd: k3s kubectl get nodes -l "node-role.kubernetes.io/master=true" -o=jsonpath="{.items[*].metadata.name}"
|
||||
ansible.builtin.command:
|
||||
cmd: "{{ k3s_kubectl_binary | default('k3s kubectl') }} get nodes -l 'node-role.kubernetes.io/master=true' -o=jsonpath='{.items[*].metadata.name}'" # yamllint disable-line rule:line-length
|
||||
register: nodes
|
||||
until: nodes.rc == 0 and (nodes.stdout.split() | length) == (groups[group_name_master | default('master')] | length) # yamllint disable-line rule:line-length
|
||||
until: nodes.rc == 0 and (nodes.stdout.split() | length) == (groups[group_name_master | default('master')] | length) # yamllint disable-line rule:line-length
|
||||
retries: "{{ retry_count | default(20) }}"
|
||||
delay: 10
|
||||
changed_when: false
|
||||
always:
|
||||
- name: Save logs of k3s-init.service
|
||||
include_tasks: fetch_k3s_init_logs.yml
|
||||
ansible.builtin.include_tasks: fetch_k3s_init_logs.yml
|
||||
when: log_destination
|
||||
vars:
|
||||
log_destination: >-
|
||||
{{ lookup('ansible.builtin.env', 'ANSIBLE_K3S_LOG_DIR', default=False) }}
|
||||
- name: Kill the temporary service used for initialization
|
||||
systemd:
|
||||
ansible.builtin.systemd:
|
||||
name: k3s-init
|
||||
state: stopped
|
||||
failed_when: false
|
||||
|
||||
- name: Copy K3s service file
|
||||
register: k3s_service
|
||||
template:
|
||||
src: "k3s.service.j2"
|
||||
ansible.builtin.template:
|
||||
src: k3s.service.j2
|
||||
dest: "{{ systemd_dir }}/k3s.service"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
mode: "0644"
|
||||
|
||||
- name: Enable and check K3s service
|
||||
systemd:
|
||||
ansible.builtin.systemd:
|
||||
name: k3s
|
||||
daemon_reload: true
|
||||
state: restarted
|
||||
enabled: true
|
||||
|
||||
- name: Wait for node-token
|
||||
wait_for:
|
||||
ansible.builtin.wait_for:
|
||||
path: /var/lib/rancher/k3s/server/node-token
|
||||
|
||||
- name: Register node-token file access mode
|
||||
stat:
|
||||
ansible.builtin.stat:
|
||||
path: /var/lib/rancher/k3s/server
|
||||
register: p
|
||||
|
||||
- name: Change file access node-token
|
||||
file:
|
||||
ansible.builtin.file:
|
||||
path: /var/lib/rancher/k3s/server
|
||||
mode: "g+rx,o+rx"
|
||||
mode: g+rx,o+rx
|
||||
|
||||
- name: Read node-token from master
|
||||
slurp:
|
||||
ansible.builtin.slurp:
|
||||
src: /var/lib/rancher/k3s/server/node-token
|
||||
register: node_token
|
||||
|
||||
- name: Store Master node-token
|
||||
set_fact:
|
||||
ansible.builtin.set_fact:
|
||||
token: "{{ node_token.content | b64decode | regex_replace('\n', '') }}"
|
||||
|
||||
- name: Restore node-token file access
|
||||
file:
|
||||
ansible.builtin.file:
|
||||
path: /var/lib/rancher/k3s/server
|
||||
mode: "{{ p.stat.mode }}"
|
||||
|
||||
- name: Create directory .kube
|
||||
file:
|
||||
ansible.builtin.file:
|
||||
path: "{{ ansible_user_dir }}/.kube"
|
||||
state: directory
|
||||
owner: "{{ ansible_user_id }}"
|
||||
mode: "u=rwx,g=rx,o="
|
||||
mode: u=rwx,g=rx,o=
|
||||
|
||||
- name: Copy config file to user home directory
|
||||
copy:
|
||||
ansible.builtin.copy:
|
||||
src: /etc/rancher/k3s/k3s.yaml
|
||||
dest: "{{ ansible_user_dir }}/.kube/config"
|
||||
remote_src: true
|
||||
owner: "{{ ansible_user_id }}"
|
||||
mode: "u=rw,g=,o="
|
||||
mode: u=rw,g=,o=
|
||||
|
||||
- name: Configure kubectl cluster to {{ endpoint_url }}
|
||||
command: >-
|
||||
k3s kubectl config set-cluster default
|
||||
ansible.builtin.command: >-
|
||||
{{ k3s_kubectl_binary | default('k3s kubectl') }} config set-cluster default
|
||||
--server={{ endpoint_url }}
|
||||
--kubeconfig {{ ansible_user_dir }}/.kube/config
|
||||
changed_when: true
|
||||
vars:
|
||||
endpoint_url: >-
|
||||
https://{{ apiserver_endpoint | ansible.utils.ipwrap }}:6443
|
||||
# Deactivated linter rules:
|
||||
# - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap
|
||||
# would be undefined. This will not be the case during playbook execution.
|
||||
# noqa jinja[invalid]
|
||||
# Deactivated linter rules:
|
||||
# - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap
|
||||
# would be undefined. This will not be the case during playbook execution.
|
||||
# noqa jinja[invalid]
|
||||
|
||||
- name: Create kubectl symlink
|
||||
file:
|
||||
ansible.builtin.file:
|
||||
src: /usr/local/bin/k3s
|
||||
dest: /usr/local/bin/kubectl
|
||||
state: link
|
||||
when: k3s_create_kubectl_symlink | default(true) | bool
|
||||
|
||||
- name: Create crictl symlink
|
||||
file:
|
||||
ansible.builtin.file:
|
||||
src: /usr/local/bin/k3s
|
||||
dest: /usr/local/bin/crictl
|
||||
state: link
|
||||
when: k3s_create_crictl_symlink | default(true) | bool
|
||||
|
||||
- name: Get contents of manifests folder
|
||||
find:
|
||||
ansible.builtin.find:
|
||||
paths: /var/lib/rancher/k3s/server/manifests
|
||||
file_type: file
|
||||
register: k3s_server_manifests
|
||||
|
||||
- name: Get sub dirs of manifests folder
|
||||
find:
|
||||
ansible.builtin.find:
|
||||
paths: /var/lib/rancher/k3s/server/manifests
|
||||
file_type: directory
|
||||
register: k3s_server_manifests_directories
|
||||
|
||||
- name: Remove manifests and folders that are only needed for bootstrapping cluster so k3s doesn't auto apply on start
|
||||
file:
|
||||
ansible.builtin.file:
|
||||
path: "{{ item.path }}"
|
||||
state: absent
|
||||
with_items:
|
||||
|
||||
@@ -1,30 +1,30 @@
|
||||
---
|
||||
- name: Create manifests directory on first master
|
||||
file:
|
||||
ansible.builtin.file:
|
||||
path: /var/lib/rancher/k3s/server/manifests
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
mode: "0644"
|
||||
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||
|
||||
- name: "Download to first master: manifest for metallb-{{ metal_lb_type }}"
|
||||
ansible.builtin.get_url:
|
||||
url: "https://raw.githubusercontent.com/metallb/metallb/{{ metal_lb_controller_tag_version }}/config/manifests/metallb-{{ metal_lb_type }}.yaml" # noqa yaml[line-length]
|
||||
dest: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml"
|
||||
url: https://raw.githubusercontent.com/metallb/metallb/{{ metal_lb_controller_tag_version }}/config/manifests/metallb-{{ metal_lb_type }}.yaml # noqa yaml[line-length]
|
||||
dest: /var/lib/rancher/k3s/server/manifests/metallb-crds.yaml
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
mode: "0644"
|
||||
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||
|
||||
- name: Set image versions in manifest for metallb-{{ metal_lb_type }}
|
||||
ansible.builtin.replace:
|
||||
path: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml"
|
||||
path: /var/lib/rancher/k3s/server/manifests/metallb-crds.yaml
|
||||
regexp: "{{ item.change | ansible.builtin.regex_escape }}"
|
||||
replace: "{{ item.to }}"
|
||||
with_items:
|
||||
- change: "metallb/speaker:{{ metal_lb_controller_tag_version }}"
|
||||
to: "metallb/speaker:{{ metal_lb_speaker_tag_version }}"
|
||||
- change: metallb/speaker:{{ metal_lb_controller_tag_version }}
|
||||
to: metallb/speaker:{{ metal_lb_speaker_tag_version }}
|
||||
loop_control:
|
||||
label: "{{ item.change }} => {{ item.to }}"
|
||||
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||
|
||||
@@ -1,27 +1,27 @@
|
||||
---
|
||||
- name: Create manifests directory on first master
|
||||
file:
|
||||
ansible.builtin.file:
|
||||
path: /var/lib/rancher/k3s/server/manifests
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
mode: "0644"
|
||||
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||
|
||||
- name: Download vip rbac manifest to first master
|
||||
ansible.builtin.get_url:
|
||||
url: "https://raw.githubusercontent.com/kube-vip/kube-vip/{{ kube_vip_tag_version }}/docs/manifests/rbac.yaml"
|
||||
dest: "/var/lib/rancher/k3s/server/manifests/vip-rbac.yaml"
|
||||
url: https://kube-vip.io/manifests/rbac.yaml
|
||||
dest: /var/lib/rancher/k3s/server/manifests/vip-rbac.yaml
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
mode: "0644"
|
||||
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||
|
||||
- name: Copy vip manifest to first master
|
||||
template:
|
||||
src: "vip.yaml.j2"
|
||||
dest: "/var/lib/rancher/k3s/server/manifests/vip.yaml"
|
||||
ansible.builtin.template:
|
||||
src: vip.yaml.j2
|
||||
dest: /var/lib/rancher/k3s/server/manifests/vip.yaml
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
mode: "0644"
|
||||
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||
|
||||
13
roles/k3s_server/templates/kubevip.yaml.j2
Normal file
13
roles/k3s_server/templates/kubevip.yaml.j2
Normal file
@@ -0,0 +1,13 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kubevip
|
||||
namespace: kube-system
|
||||
data:
|
||||
{% if kube_vip_lb_ip_range is string %}
|
||||
{# kube_vip_lb_ip_range was used in the legacy way: single string instead of a list #}
|
||||
{# => transform to list with single element #}
|
||||
{% set kube_vip_lb_ip_range = [kube_vip_lb_ip_range] %}
|
||||
{% endif %}
|
||||
range-global: {{ kube_vip_lb_ip_range | join(',') }}
|
||||
@@ -27,7 +27,9 @@ spec:
|
||||
- manager
|
||||
env:
|
||||
- name: vip_arp
|
||||
value: "true"
|
||||
value: "{{ 'true' if kube_vip_arp | default(true) | bool else 'false' }}"
|
||||
- name: bgp_enable
|
||||
value: "{{ 'true' if kube_vip_bgp | default(false) | bool else 'false' }}"
|
||||
- name: port
|
||||
value: "6443"
|
||||
{% if kube_vip_iface %}
|
||||
@@ -43,7 +45,7 @@ spec:
|
||||
- name: vip_ddns
|
||||
value: "false"
|
||||
- name: svc_enable
|
||||
value: "false"
|
||||
value: "{{ 'true' if kube_vip_lb_ip_range is defined else 'false' }}"
|
||||
- name: vip_leaderelection
|
||||
value: "true"
|
||||
- name: vip_leaseduration
|
||||
@@ -54,6 +56,24 @@ spec:
|
||||
value: "2"
|
||||
- name: address
|
||||
value: {{ apiserver_endpoint }}
|
||||
{% if kube_vip_bgp | default(false) | bool %}
|
||||
{% if kube_vip_bgp_routerid is defined %}
|
||||
- name: bgp_routerid
|
||||
value: "{{ kube_vip_bgp_routerid }}"
|
||||
{% endif %}
|
||||
{% if kube_vip_bgp_as is defined %}
|
||||
- name: bgp_as
|
||||
value: "{{ kube_vip_bgp_as }}"
|
||||
{% endif %}
|
||||
{% if kube_vip_bgp_peeraddress is defined %}
|
||||
- name: bgp_peeraddress
|
||||
value: "{{ kube_vip_bgp_peeraddress }}"
|
||||
{% endif %}
|
||||
{% if kube_vip_bgp_peeras is defined %}
|
||||
- name: bgp_peeras
|
||||
value: "{{ kube_vip_bgp_peeras }}"
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
image: ghcr.io/kube-vip/kube-vip:{{ kube_vip_tag_version }}
|
||||
imagePullPolicy: Always
|
||||
name: kube-vip
|
||||
|
||||
@@ -1,6 +1,32 @@
|
||||
---
|
||||
# Timeout to wait for MetalLB services to come up
|
||||
metal_lb_available_timeout: 240s
|
||||
k3s_kubectl_binary: k3s kubectl
|
||||
|
||||
# Name of the master group
|
||||
bpf_lb_algorithm: maglev
|
||||
bpf_lb_mode: hybrid
|
||||
|
||||
calico_blockSize: 26 # noqa var-naming
|
||||
calico_ebpf: false
|
||||
calico_encapsulation: VXLANCrossSubnet
|
||||
calico_natOutgoing: Enabled # noqa var-naming
|
||||
calico_nodeSelector: all() # noqa var-naming
|
||||
calico_tag: v3.27.2
|
||||
|
||||
cilium_bgp: false
|
||||
cilium_exportPodCIDR: true # noqa var-naming
|
||||
cilium_bgp_my_asn: 64513
|
||||
cilium_bgp_peer_asn: 64512
|
||||
cilium_bgp_neighbors: []
|
||||
cilium_bgp_neighbors_groups: ['k3s_all']
|
||||
cilium_bgp_lb_cidr: 192.168.31.0/24
|
||||
cilium_hubble: true
|
||||
cilium_mode: native
|
||||
|
||||
cluster_cidr: 10.52.0.0/16
|
||||
enable_bpf_masquerade: true
|
||||
kube_proxy_replacement: true
|
||||
group_name_master: master
|
||||
|
||||
metal_lb_mode: layer2
|
||||
metal_lb_available_timeout: 240s
|
||||
metal_lb_controller_tag_version: v0.14.3
|
||||
metal_lb_ip_range: 192.168.30.80-192.168.30.90
|
||||
|
||||
153
roles/k3s_server_post/meta/main.yml
Normal file
153
roles/k3s_server_post/meta/main.yml
Normal file
@@ -0,0 +1,153 @@
|
||||
---
|
||||
argument_specs:
|
||||
main:
|
||||
short_description: Configure k3s cluster
|
||||
options:
|
||||
apiserver_endpoint:
|
||||
description: Virtual ip-address configured on each master
|
||||
required: true
|
||||
|
||||
bpf_lb_algorithm:
|
||||
description: BPF lb algorithm
|
||||
default: maglev
|
||||
|
||||
bpf_lb_mode:
|
||||
description: BPF lb mode
|
||||
default: hybrid
|
||||
|
||||
calico_blockSize:
|
||||
description: IP pool block size
|
||||
type: int
|
||||
default: 26
|
||||
|
||||
calico_ebpf:
|
||||
description: Use eBPF dataplane instead of iptables
|
||||
type: bool
|
||||
default: false
|
||||
|
||||
calico_encapsulation:
|
||||
description: IP pool encapsulation
|
||||
default: VXLANCrossSubnet
|
||||
|
||||
calico_natOutgoing:
|
||||
description: IP pool NAT outgoing
|
||||
default: Enabled
|
||||
|
||||
calico_nodeSelector:
|
||||
description: IP pool node selector
|
||||
default: all()
|
||||
|
||||
calico_iface:
|
||||
description: The network interface used for when Calico is enabled
|
||||
default: ~
|
||||
|
||||
calico_tag:
|
||||
description: Calico version tag
|
||||
default: v3.27.2
|
||||
|
||||
cilium_bgp:
|
||||
description:
|
||||
- Enable cilium BGP control plane for LB services and pod cidrs.
|
||||
- Disables the use of MetalLB.
|
||||
type: bool
|
||||
default: false
|
||||
|
||||
cilium_bgp_my_asn:
|
||||
description: Local ASN for BGP peer
|
||||
type: int
|
||||
default: 64513
|
||||
|
||||
cilium_bgp_peer_asn:
|
||||
description: BGP peer ASN
|
||||
type: int
|
||||
default: 64512
|
||||
|
||||
cilium_bgp_peer_address:
|
||||
description: BGP peer address
|
||||
default: ~
|
||||
|
||||
cilium_bgp_neighbors:
|
||||
description: List of BGP peer ASN & address pairs
|
||||
default: []
|
||||
|
||||
cilium_bgp_neighbors_groups:
|
||||
description: Inventory group in which to search for additional cilium_bgp_neighbors parameters to merge.
|
||||
default: ['k3s_all']
|
||||
|
||||
cilium_bgp_lb_cidr:
|
||||
description: BGP load balancer IP range
|
||||
default: 192.168.31.0/24
|
||||
|
||||
cilium_exportPodCIDR:
|
||||
description: Export pod CIDR
|
||||
type: bool
|
||||
default: true
|
||||
|
||||
cilium_hubble:
|
||||
description: Enable Cilium Hubble
|
||||
type: bool
|
||||
default: true
|
||||
|
||||
cilium_iface:
|
||||
description: The network interface used for when Cilium is enabled
|
||||
default: ~
|
||||
|
||||
cilium_mode:
|
||||
description: Inner-node communication mode
|
||||
default: native
|
||||
choices:
|
||||
- native
|
||||
- routed
|
||||
|
||||
cluster_cidr:
|
||||
description: Inner-cluster IP range
|
||||
default: 10.52.0.0/16
|
||||
|
||||
enable_bpf_masquerade:
|
||||
description: Use IP masquerading
|
||||
type: bool
|
||||
default: true
|
||||
|
||||
group_name_master:
|
||||
description: Name of the master group
|
||||
default: master
|
||||
|
||||
kube_proxy_replacement:
|
||||
description: Replace the native kube-proxy with Cilium
|
||||
type: bool
|
||||
default: true
|
||||
|
||||
kube_vip_lb_ip_range:
|
||||
description: IP range for kube-vip load balancer
|
||||
default: ~
|
||||
|
||||
metal_lb_available_timeout:
|
||||
description: Wait for MetalLB resources
|
||||
default: 240s
|
||||
|
||||
metal_lb_ip_range:
|
||||
description: MetalLB ip range for load balancer
|
||||
default: 192.168.30.80-192.168.30.90
|
||||
|
||||
metal_lb_controller_tag_version:
|
||||
description: Image tag for MetalLB
|
||||
default: v0.14.3
|
||||
|
||||
metal_lb_mode:
|
||||
description: Metallb mode
|
||||
default: layer2
|
||||
choices:
|
||||
- bgp
|
||||
- layer2
|
||||
|
||||
metal_lb_bgp_my_asn:
|
||||
description: BGP ASN configurations
|
||||
default: ~
|
||||
|
||||
metal_lb_bgp_peer_asn:
|
||||
description: BGP peer ASN configurations
|
||||
default: ~
|
||||
|
||||
metal_lb_bgp_peer_address:
|
||||
description: BGP peer address
|
||||
default: ~
|
||||
120
roles/k3s_server_post/tasks/calico.yml
Normal file
120
roles/k3s_server_post/tasks/calico.yml
Normal file
@@ -0,0 +1,120 @@
|
||||
---
|
||||
- name: Deploy Calico to cluster
|
||||
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||
run_once: true
|
||||
block:
|
||||
- name: Create manifests directory on first master
|
||||
ansible.builtin.file:
|
||||
path: /tmp/k3s
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0755"
|
||||
|
||||
- name: "Download to first master: manifest for Tigera Operator and Calico CRDs"
|
||||
ansible.builtin.get_url:
|
||||
url: https://raw.githubusercontent.com/projectcalico/calico/{{ calico_tag }}/manifests/tigera-operator.yaml
|
||||
dest: /tmp/k3s/tigera-operator.yaml
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0755"
|
||||
|
||||
- name: Copy Calico custom resources manifest to first master
|
||||
ansible.builtin.template:
|
||||
src: calico.crs.j2
|
||||
dest: /tmp/k3s/custom-resources.yaml
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0755"
|
||||
|
||||
- name: Deploy or replace Tigera Operator
|
||||
block:
|
||||
- name: Deploy Tigera Operator
|
||||
ansible.builtin.command:
|
||||
cmd: "{{ k3s_kubectl_binary | default('k3s kubectl') }} create -f /tmp/k3s/tigera-operator.yaml"
|
||||
register: create_operator
|
||||
changed_when: "'created' in create_operator.stdout"
|
||||
failed_when: "'Error' in create_operator.stderr and 'already exists' not in create_operator.stderr"
|
||||
rescue:
|
||||
- name: Replace existing Tigera Operator
|
||||
ansible.builtin.command:
|
||||
cmd: "{{ k3s_kubectl_binary | default('k3s kubectl') }} replace -f /tmp/k3s/tigera-operator.yaml"
|
||||
register: replace_operator
|
||||
changed_when: "'replaced' in replace_operator.stdout"
|
||||
failed_when: "'Error' in replace_operator.stderr"
|
||||
|
||||
- name: Wait for Tigera Operator resources
|
||||
ansible.builtin.command: >-
|
||||
{{ k3s_kubectl_binary | default('k3s kubectl') }} wait {{ item.type }}/{{ item.name }}
|
||||
--namespace='tigera-operator'
|
||||
--for=condition=Available=True
|
||||
--timeout=30s
|
||||
register: tigera_result
|
||||
changed_when: false
|
||||
until: tigera_result is succeeded
|
||||
retries: 7
|
||||
delay: 7
|
||||
with_items:
|
||||
- { name: tigera-operator, type: deployment }
|
||||
loop_control:
|
||||
label: "{{ item.type }}/{{ item.name }}"
|
||||
|
||||
- name: Deploy Calico custom resources
|
||||
block:
|
||||
- name: Deploy custom resources for Calico
|
||||
ansible.builtin.command:
|
||||
cmd: "{{ k3s_kubectl_binary | default('k3s kubectl') }} create -f /tmp/k3s/custom-resources.yaml"
|
||||
register: create_cr
|
||||
changed_when: "'created' in create_cr.stdout"
|
||||
failed_when: "'Error' in create_cr.stderr and 'already exists' not in create_cr.stderr"
|
||||
rescue:
|
||||
- name: Apply new Calico custom resource manifest
|
||||
ansible.builtin.command:
|
||||
cmd: "{{ k3s_kubectl_binary | default('k3s kubectl') }} apply -f /tmp/k3s/custom-resources.yaml"
|
||||
register: apply_cr
|
||||
changed_when: "'configured' in apply_cr.stdout or 'created' in apply_cr.stdout"
|
||||
failed_when: "'Error' in apply_cr.stderr"
|
||||
|
||||
- name: Wait for Calico system resources to be available
|
||||
ansible.builtin.command: >-
|
||||
{% if item.type == 'daemonset' %}
|
||||
{{ k3s_kubectl_binary | default('k3s kubectl') }} wait pods
|
||||
--namespace='{{ item.namespace }}'
|
||||
--selector={{ item.selector }}
|
||||
--for=condition=Ready
|
||||
{% else %}
|
||||
{{ k3s_kubectl_binary | default('k3s kubectl') }} wait {{ item.type }}/{{ item.name }}
|
||||
--namespace='{{ item.namespace }}'
|
||||
--for=condition=Available
|
||||
{% endif %}
|
||||
--timeout=30s
|
||||
register: cr_result
|
||||
changed_when: false
|
||||
until: cr_result is succeeded
|
||||
retries: 30
|
||||
delay: 7
|
||||
with_items:
|
||||
- { name: calico-typha, type: deployment, namespace: calico-system }
|
||||
- { name: calico-kube-controllers, type: deployment, namespace: calico-system }
|
||||
- name: csi-node-driver
|
||||
type: daemonset
|
||||
selector: k8s-app=csi-node-driver
|
||||
namespace: calico-system
|
||||
- name: calico-node
|
||||
type: daemonset
|
||||
selector: k8s-app=calico-node
|
||||
namespace: calico-system
|
||||
- { name: calico-apiserver, type: deployment, namespace: calico-apiserver }
|
||||
loop_control:
|
||||
label: "{{ item.type }}/{{ item.name }}"
|
||||
|
||||
- name: Patch Felix configuration for eBPF mode
|
||||
ansible.builtin.command:
|
||||
cmd: >
|
||||
{{ k3s_kubectl_binary | default('k3s kubectl') }} patch felixconfiguration default
|
||||
--type='merge'
|
||||
--patch='{"spec": {"bpfKubeProxyIptablesCleanupEnabled": false}}'
|
||||
register: patch_result
|
||||
changed_when: "'felixconfiguration.projectcalico.org/default patched' in patch_result.stdout"
|
||||
failed_when: "'Error' in patch_result.stderr"
|
||||
when: calico_ebpf
|
||||
257
roles/k3s_server_post/tasks/cilium.yml
Normal file
257
roles/k3s_server_post/tasks/cilium.yml
Normal file
@@ -0,0 +1,257 @@
|
||||
---
|
||||
- name: Prepare Cilium CLI on first master and deploy CNI
|
||||
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||
run_once: true
|
||||
block:
|
||||
- name: Create tmp directory on first master
|
||||
ansible.builtin.file:
|
||||
path: /tmp/k3s
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0755"
|
||||
|
||||
- name: Check if Cilium CLI is installed
|
||||
ansible.builtin.command: cilium version
|
||||
register: cilium_cli_installed
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
ignore_errors: true
|
||||
|
||||
- name: Check for Cilium CLI version in command output
|
||||
ansible.builtin.set_fact:
|
||||
installed_cli_version: >-
|
||||
{{
|
||||
cilium_cli_installed.stdout_lines
|
||||
| join(' ')
|
||||
| regex_findall('cilium-cli: (v\d+\.\d+\.\d+)')
|
||||
| first
|
||||
| default('unknown')
|
||||
}}
|
||||
when: cilium_cli_installed.rc == 0
|
||||
|
||||
- name: Get latest stable Cilium CLI version file
|
||||
ansible.builtin.get_url:
|
||||
url: https://raw.githubusercontent.com/cilium/cilium-cli/main/stable.txt
|
||||
dest: /tmp/k3s/cilium-cli-stable.txt
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0755"
|
||||
|
||||
- name: Read Cilium CLI stable version from file
|
||||
ansible.builtin.command: cat /tmp/k3s/cilium-cli-stable.txt
|
||||
register: cli_ver
|
||||
changed_when: false
|
||||
|
||||
- name: Log installed Cilium CLI version
|
||||
ansible.builtin.debug:
|
||||
msg: "Installed Cilium CLI version: {{ installed_cli_version | default('Not installed') }}"
|
||||
|
||||
- name: Log latest stable Cilium CLI version
|
||||
ansible.builtin.debug:
|
||||
msg: "Latest Cilium CLI version: {{ cli_ver.stdout }}"
|
||||
|
||||
- name: Determine if Cilium CLI needs installation or update
|
||||
ansible.builtin.set_fact:
|
||||
cilium_cli_needs_update: >-
|
||||
{{
|
||||
cilium_cli_installed.rc != 0 or
|
||||
(cilium_cli_installed.rc == 0 and
|
||||
installed_cli_version != cli_ver.stdout)
|
||||
}}
|
||||
|
||||
- name: Install or update Cilium CLI
|
||||
when: cilium_cli_needs_update
|
||||
block:
|
||||
- name: Set architecture variable
|
||||
ansible.builtin.set_fact:
|
||||
cli_arch: "{{ 'arm64' if ansible_architecture == 'aarch64' else 'amd64' }}"
|
||||
|
||||
- name: Download Cilium CLI and checksum
|
||||
ansible.builtin.get_url:
|
||||
url: "{{ cilium_base_url }}/cilium-linux-{{ cli_arch }}{{ item }}"
|
||||
dest: /tmp/k3s/cilium-linux-{{ cli_arch }}{{ item }}
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0755"
|
||||
loop:
|
||||
- .tar.gz
|
||||
- .tar.gz.sha256sum
|
||||
vars:
|
||||
cilium_base_url: https://github.com/cilium/cilium-cli/releases/download/{{ cli_ver.stdout }}
|
||||
|
||||
- name: Verify the downloaded tarball
|
||||
ansible.builtin.shell: |
|
||||
cd /tmp/k3s && sha256sum --check cilium-linux-{{ cli_arch }}.tar.gz.sha256sum
|
||||
args:
|
||||
executable: /bin/bash
|
||||
changed_when: false
|
||||
|
||||
- name: Extract Cilium CLI to /usr/local/bin
|
||||
ansible.builtin.unarchive:
|
||||
src: /tmp/k3s/cilium-linux-{{ cli_arch }}.tar.gz
|
||||
dest: /usr/local/bin
|
||||
remote_src: true
|
||||
|
||||
- name: Remove downloaded tarball and checksum file
|
||||
ansible.builtin.file:
|
||||
path: "{{ item }}"
|
||||
state: absent
|
||||
loop:
|
||||
- /tmp/k3s/cilium-linux-{{ cli_arch }}.tar.gz
|
||||
- /tmp/k3s/cilium-linux-{{ cli_arch }}.tar.gz.sha256sum
|
||||
|
||||
- name: Wait for connectivity to kube VIP
|
||||
ansible.builtin.command: ping -c 1 {{ apiserver_endpoint }}
|
||||
register: ping_result
|
||||
until: ping_result.rc == 0
|
||||
retries: 21
|
||||
delay: 1
|
||||
ignore_errors: true
|
||||
changed_when: false
|
||||
|
||||
- name: Fail if kube VIP not reachable
|
||||
ansible.builtin.fail:
|
||||
msg: API endpoint {{ apiserver_endpoint }} is not reachable
|
||||
when: ping_result.rc != 0
|
||||
|
||||
- name: Test for existing Cilium install
|
||||
ansible.builtin.command: |
|
||||
{{ k3s_kubectl_binary | default('k3s kubectl') }} -n kube-system get daemonsets cilium
|
||||
register: cilium_installed
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
ignore_errors: true
|
||||
|
||||
- name: Check existing Cilium install
|
||||
when: cilium_installed.rc == 0
|
||||
block:
|
||||
- name: Check Cilium version
|
||||
ansible.builtin.command: cilium version
|
||||
register: cilium_version
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
ignore_errors: true
|
||||
|
||||
- name: Parse installed Cilium version
|
||||
ansible.builtin.set_fact:
|
||||
installed_cilium_version: >-
|
||||
{{
|
||||
cilium_version.stdout_lines
|
||||
| join(' ')
|
||||
| regex_findall('cilium image.+(\d+\.\d+\.\d+)')
|
||||
| first
|
||||
| default('unknown')
|
||||
}}
|
||||
|
||||
- name: Determine if Cilium needs update
|
||||
ansible.builtin.set_fact:
|
||||
cilium_needs_update: >-
|
||||
{{ 'v' + installed_cilium_version != cilium_tag }}
|
||||
|
||||
- name: Log result
|
||||
ansible.builtin.debug:
|
||||
msg: >
|
||||
Installed Cilium version: {{ installed_cilium_version }},
|
||||
Target Cilium version: {{ cilium_tag }},
|
||||
Update needed: {{ cilium_needs_update }}
|
||||
|
||||
- name: Install Cilium
|
||||
ansible.builtin.command: >-
|
||||
{% if cilium_installed.rc != 0 %}
|
||||
cilium install
|
||||
{% else %}
|
||||
cilium upgrade
|
||||
{% endif %}
|
||||
--version "{{ cilium_tag }}"
|
||||
--helm-set operator.replicas="1"
|
||||
{{ '--helm-set devices=' + cilium_iface if cilium_iface != 'auto' else '' }}
|
||||
--helm-set ipam.operator.clusterPoolIPv4PodCIDRList={{ cluster_cidr }}
|
||||
{% if cilium_mode == "native" or (cilium_bgp and cilium_exportPodCIDR != 'false') %}
|
||||
--helm-set ipv4NativeRoutingCIDR={{ cluster_cidr }}
|
||||
{% endif %}
|
||||
--helm-set k8sServiceHost="127.0.0.1"
|
||||
--helm-set k8sServicePort="6444"
|
||||
--helm-set routingMode={{ cilium_mode }}
|
||||
--helm-set autoDirectNodeRoutes={{ "true" if cilium_mode == "native" else "false" }}
|
||||
--helm-set kubeProxyReplacement={{ kube_proxy_replacement }}
|
||||
--helm-set bpf.masquerade={{ enable_bpf_masquerade }}
|
||||
--helm-set bgpControlPlane.enabled={{ cilium_bgp | default("false") }}
|
||||
--helm-set hubble.enabled={{ "true" if cilium_hubble else "false" }}
|
||||
--helm-set hubble.relay.enabled={{ "true" if cilium_hubble else "false" }}
|
||||
--helm-set hubble.ui.enabled={{ "true" if cilium_hubble else "false" }}
|
||||
{% if kube_proxy_replacement is not false %}
|
||||
--helm-set bpf.loadBalancer.algorithm={{ bpf_lb_algorithm }}
|
||||
--helm-set bpf.loadBalancer.mode={{ bpf_lb_mode }}
|
||||
{% endif %}
|
||||
environment:
|
||||
KUBECONFIG: "{{ ansible_user_dir }}/.kube/config"
|
||||
register: cilium_install_result
|
||||
changed_when: cilium_install_result.rc == 0
|
||||
when: cilium_installed.rc != 0 or cilium_needs_update
|
||||
|
||||
- name: Wait for Cilium resources
|
||||
ansible.builtin.command: >-
|
||||
{% if item.type == 'daemonset' %}
|
||||
{{ k3s_kubectl_binary | default('k3s kubectl') }} wait pods
|
||||
--namespace=kube-system
|
||||
--selector='k8s-app=cilium'
|
||||
--for=condition=Ready
|
||||
{% else %}
|
||||
{{ k3s_kubectl_binary | default('k3s kubectl') }} wait {{ item.type }}/{{ item.name }}
|
||||
--namespace=kube-system
|
||||
--for=condition=Available
|
||||
{% endif %}
|
||||
--timeout=30s
|
||||
register: cr_result
|
||||
changed_when: false
|
||||
until: cr_result is succeeded
|
||||
retries: 30
|
||||
delay: 7
|
||||
with_items:
|
||||
- { name: cilium-operator, type: deployment }
|
||||
- { name: cilium, type: daemonset, selector: k8s-app=cilium }
|
||||
- { name: hubble-relay, type: deployment, check_hubble: true }
|
||||
- { name: hubble-ui, type: deployment, check_hubble: true }
|
||||
loop_control:
|
||||
label: "{{ item.type }}/{{ item.name }}"
|
||||
when: >-
|
||||
not item.check_hubble | default(false) or (item.check_hubble | default(false) and cilium_hubble)
|
||||
|
||||
- name: Configure Cilium BGP
|
||||
when: cilium_bgp
|
||||
block:
|
||||
- name: Set _cilium_bgp_neighbors fact
|
||||
ansible.builtin.set_fact:
|
||||
_cilium_bgp_neighbors: "{{ lookup('community.general.merge_variables', '^cilium_bgp_neighbors__.+$', initial_value=cilium_bgp_neighbors, groups=cilium_bgp_neighbors_groups) }}" # yamllint disable-line rule:line-length
|
||||
when: cilium_bgp_neighbors | length > 0
|
||||
|
||||
- name: Copy BGP manifests to first master
|
||||
ansible.builtin.template:
|
||||
src: cilium.crs.j2
|
||||
dest: /tmp/k3s/cilium-bgp.yaml
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0755"
|
||||
|
||||
- name: Apply BGP manifests
|
||||
ansible.builtin.command:
|
||||
cmd: "{{ k3s_kubectl_binary | default('k3s kubectl') }} apply -f /tmp/k3s/cilium-bgp.yaml"
|
||||
register: apply_cr
|
||||
changed_when: "'configured' in apply_cr.stdout or 'created' in apply_cr.stdout"
|
||||
failed_when: "'is invalid' in apply_cr.stderr"
|
||||
ignore_errors: true
|
||||
|
||||
- name: Print error message if BGP manifests application fails
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ apply_cr.stderr }}"
|
||||
when: "'is invalid' in apply_cr.stderr"
|
||||
|
||||
- name: Test for BGP config resources
|
||||
ansible.builtin.command: "{{ item }}"
|
||||
loop:
|
||||
- "{{ k3s_kubectl_binary | default('k3s kubectl') }} get CiliumBGPPeeringPolicy.cilium.io"
|
||||
- "{{ k3s_kubectl_binary | default('k3s kubectl') }} get CiliumLoadBalancerIPPool.cilium.io"
|
||||
changed_when: false
|
||||
loop_control:
|
||||
label: "{{ item }}"
|
||||
@@ -1,9 +1,20 @@
|
||||
---
|
||||
- name: Deploy calico
|
||||
ansible.builtin.include_tasks: calico.yml
|
||||
tags: calico
|
||||
when: calico_iface is defined and cilium_iface is not defined
|
||||
|
||||
- name: Deploy cilium
|
||||
ansible.builtin.include_tasks: cilium.yml
|
||||
tags: cilium
|
||||
when: cilium_iface is defined
|
||||
|
||||
- name: Deploy metallb pool
|
||||
include_tasks: metallb.yml
|
||||
ansible.builtin.include_tasks: metallb.yml
|
||||
tags: metallb
|
||||
when: kube_vip_lb_ip_range is not defined and (not cilium_bgp or cilium_iface is not defined)
|
||||
|
||||
- name: Remove tmp directory used for manifests
|
||||
file:
|
||||
ansible.builtin.file:
|
||||
path: /tmp/k3s
|
||||
state: absent
|
||||
|
||||
@@ -1,32 +1,53 @@
|
||||
---
|
||||
- name: Create manifests directory for temp configuration
|
||||
file:
|
||||
ansible.builtin.file:
|
||||
path: /tmp/k3s
|
||||
state: directory
|
||||
owner: "{{ ansible_user_id }}"
|
||||
mode: 0755
|
||||
mode: "0755"
|
||||
with_items: "{{ groups[group_name_master | default('master')] }}"
|
||||
run_once: true
|
||||
|
||||
- name: Delete outdated metallb replicas
|
||||
ansible.builtin.shell: |-
|
||||
set -o pipefail
|
||||
|
||||
REPLICAS=$({{ k3s_kubectl_binary | default('k3s kubectl') }} --namespace='metallb-system' get replicasets \
|
||||
-l 'component=controller,app=metallb' \
|
||||
-o jsonpath='{.items[0].spec.template.spec.containers[0].image}, {.items[0].metadata.name}' 2>/dev/null || true)
|
||||
REPLICAS_SETS=$(echo ${REPLICAS} | grep -v '{{ metal_lb_controller_tag_version }}' | sed -e "s/^.*\s//g")
|
||||
if [ -n "${REPLICAS_SETS}" ] ; then
|
||||
for REPLICAS in "${REPLICAS_SETS}"
|
||||
do
|
||||
{{ k3s_kubectl_binary | default('k3s kubectl') }} --namespace='metallb-system' \
|
||||
delete rs "${REPLICAS}"
|
||||
done
|
||||
fi
|
||||
args:
|
||||
executable: /bin/bash
|
||||
changed_when: false
|
||||
run_once: true
|
||||
with_items: "{{ groups[group_name_master | default('master')] }}"
|
||||
|
||||
- name: Copy metallb CRs manifest to first master
|
||||
template:
|
||||
src: "metallb.crs.j2"
|
||||
dest: "/tmp/k3s/metallb-crs.yaml"
|
||||
ansible.builtin.template:
|
||||
src: metallb.crs.j2
|
||||
dest: /tmp/k3s/metallb-crs.yaml
|
||||
owner: "{{ ansible_user_id }}"
|
||||
mode: 0755
|
||||
mode: "0755"
|
||||
with_items: "{{ groups[group_name_master | default('master')] }}"
|
||||
run_once: true
|
||||
|
||||
- name: Test metallb-system namespace
|
||||
command: >-
|
||||
k3s kubectl -n metallb-system
|
||||
ansible.builtin.command: >-
|
||||
{{ k3s_kubectl_binary | default('k3s kubectl') }} -n metallb-system
|
||||
changed_when: false
|
||||
with_items: "{{ groups[group_name_master | default('master')] }}"
|
||||
run_once: true
|
||||
|
||||
- name: Wait for MetalLB resources
|
||||
command: >-
|
||||
k3s kubectl wait {{ item.resource }}
|
||||
ansible.builtin.command: >-
|
||||
{{ k3s_kubectl_binary | default('k3s kubectl') }} wait {{ item.resource }}
|
||||
--namespace='metallb-system'
|
||||
{% if item.name | default(False) -%}{{ item.name }}{%- endif %}
|
||||
{% if item.selector | default(False) -%}--selector='{{ item.selector }}'{%- endif %}
|
||||
@@ -62,16 +83,30 @@
|
||||
loop_control:
|
||||
label: "{{ item.description }}"
|
||||
|
||||
- name: Set metallb webhook service name
|
||||
ansible.builtin.set_fact:
|
||||
metallb_webhook_service_name: >-
|
||||
{{
|
||||
(
|
||||
(metal_lb_controller_tag_version | regex_replace('^v', ''))
|
||||
is
|
||||
version('0.14.4', '<', version_type='semver')
|
||||
) | ternary(
|
||||
'webhook-service',
|
||||
'metallb-webhook-service'
|
||||
)
|
||||
}}
|
||||
|
||||
- name: Test metallb-system webhook-service endpoint
|
||||
command: >-
|
||||
k3s kubectl -n metallb-system get endpoints webhook-service
|
||||
ansible.builtin.command: >-
|
||||
{{ k3s_kubectl_binary | default('k3s kubectl') }} -n metallb-system get endpoints {{ metallb_webhook_service_name }}
|
||||
changed_when: false
|
||||
with_items: "{{ groups[group_name_master | default('master')] }}"
|
||||
run_once: true
|
||||
|
||||
- name: Apply metallb CRs
|
||||
command: >-
|
||||
k3s kubectl apply -f /tmp/k3s/metallb-crs.yaml
|
||||
ansible.builtin.command: >-
|
||||
{{ k3s_kubectl_binary | default('k3s kubectl') }} apply -f /tmp/k3s/metallb-crs.yaml
|
||||
--timeout='{{ metal_lb_available_timeout }}'
|
||||
register: this
|
||||
changed_when: false
|
||||
@@ -80,8 +115,8 @@
|
||||
retries: 5
|
||||
|
||||
- name: Test metallb-system resources for Layer 2 configuration
|
||||
command: >-
|
||||
k3s kubectl -n metallb-system get {{ item }}
|
||||
ansible.builtin.command: >-
|
||||
{{ k3s_kubectl_binary | default('k3s kubectl') }} -n metallb-system get {{ item }}
|
||||
changed_when: false
|
||||
run_once: true
|
||||
when: metal_lb_mode == "layer2"
|
||||
@@ -90,8 +125,8 @@
|
||||
- L2Advertisement
|
||||
|
||||
- name: Test metallb-system resources for BGP configuration
|
||||
command: >-
|
||||
k3s kubectl -n metallb-system get {{ item }}
|
||||
ansible.builtin.command: >-
|
||||
{{ k3s_kubectl_binary | default('k3s kubectl') }} -n metallb-system get {{ item }}
|
||||
changed_when: false
|
||||
run_once: true
|
||||
when: metal_lb_mode == "bgp"
|
||||
|
||||
41
roles/k3s_server_post/templates/calico.crs.j2
Normal file
41
roles/k3s_server_post/templates/calico.crs.j2
Normal file
@@ -0,0 +1,41 @@
|
||||
# This section includes base Calico installation configuration.
|
||||
# For more information, see: https://docs.tigera.io/calico/latest/reference/installation/api#operator.tigera.io/v1.Installation
|
||||
apiVersion: operator.tigera.io/v1
|
||||
kind: Installation
|
||||
metadata:
|
||||
name: default
|
||||
spec:
|
||||
# Configures Calico networking.
|
||||
calicoNetwork:
|
||||
# Note: The ipPools section cannot be modified post-install.
|
||||
ipPools:
|
||||
- blockSize: {{ calico_blockSize }}
|
||||
cidr: {{ cluster_cidr }}
|
||||
encapsulation: {{ calico_encapsulation }}
|
||||
natOutgoing: {{ calico_natOutgoing }}
|
||||
nodeSelector: {{ calico_nodeSelector }}
|
||||
nodeAddressAutodetectionV4:
|
||||
interface: {{ calico_iface }}
|
||||
linuxDataplane: {{ 'BPF' if calico_ebpf else 'Iptables' }}
|
||||
|
||||
---
|
||||
|
||||
# This section configures the Calico API server.
|
||||
# For more information, see: https://docs.tigera.io/calico/latest/reference/installation/api#operator.tigera.io/v1.APIServer
|
||||
apiVersion: operator.tigera.io/v1
|
||||
kind: APIServer
|
||||
metadata:
|
||||
name: default
|
||||
spec: {}
|
||||
|
||||
{% if calico_ebpf %}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: kubernetes-services-endpoint
|
||||
namespace: tigera-operator
|
||||
data:
|
||||
KUBERNETES_SERVICE_HOST: '{{ apiserver_endpoint }}'
|
||||
KUBERNETES_SERVICE_PORT: '6443'
|
||||
{% endif %}
|
||||
48
roles/k3s_server_post/templates/cilium.crs.j2
Normal file
48
roles/k3s_server_post/templates/cilium.crs.j2
Normal file
@@ -0,0 +1,48 @@
|
||||
apiVersion: "cilium.io/v2alpha1"
|
||||
kind: CiliumBGPPeeringPolicy
|
||||
metadata:
|
||||
name: 01-bgp-peering-policy
|
||||
spec: # CiliumBGPPeeringPolicySpec
|
||||
virtualRouters: # []CiliumBGPVirtualRouter
|
||||
- localASN: {{ cilium_bgp_my_asn }}
|
||||
exportPodCIDR: {{ cilium_exportPodCIDR | default('true') }}
|
||||
neighbors: # []CiliumBGPNeighbor
|
||||
{% if _cilium_bgp_neighbors | length > 0 %}
|
||||
{% for item in _cilium_bgp_neighbors %}
|
||||
- peerAddress: '{{ item.peer_address + "/32"}}'
|
||||
peerASN: {{ item.peer_asn }}
|
||||
eBGPMultihopTTL: 10
|
||||
connectRetryTimeSeconds: 120
|
||||
holdTimeSeconds: 90
|
||||
keepAliveTimeSeconds: 30
|
||||
gracefulRestart:
|
||||
enabled: true
|
||||
restartTimeSeconds: 120
|
||||
{% endfor %}
|
||||
{% else %}
|
||||
- peerAddress: '{{ cilium_bgp_peer_address + "/32"}}'
|
||||
peerASN: {{ cilium_bgp_peer_asn }}
|
||||
eBGPMultihopTTL: 10
|
||||
connectRetryTimeSeconds: 120
|
||||
holdTimeSeconds: 90
|
||||
keepAliveTimeSeconds: 30
|
||||
gracefulRestart:
|
||||
enabled: true
|
||||
restartTimeSeconds: 120
|
||||
{% endif %}
|
||||
serviceSelector:
|
||||
matchExpressions:
|
||||
- {key: somekey, operator: NotIn, values: ['never-used-value']}
|
||||
---
|
||||
apiVersion: "cilium.io/v2alpha1"
|
||||
kind: CiliumLoadBalancerIPPool
|
||||
metadata:
|
||||
name: "01-lb-pool"
|
||||
spec:
|
||||
blocks:
|
||||
{% if "/" in cilium_bgp_lb_cidr %}
|
||||
- cidr: {{ cilium_bgp_lb_cidr }}
|
||||
{% else %}
|
||||
- start: {{ cilium_bgp_lb_cidr.split('-')[0] }}
|
||||
stop: {{ cilium_bgp_lb_cidr.split('-')[1] }}
|
||||
{% endif %}
|
||||
@@ -1,5 +1,6 @@
|
||||
---
|
||||
- name: Reboot server
|
||||
become: true
|
||||
reboot:
|
||||
ansible.builtin.reboot:
|
||||
reboot_command: "{{ custom_reboot_command | default(omit) }}"
|
||||
listen: reboot server
|
||||
|
||||
7
roles/lxc/meta/main.yml
Normal file
7
roles/lxc/meta/main.yml
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
argument_specs:
|
||||
main:
|
||||
short_description: Configure LXC
|
||||
options:
|
||||
custom_reboot_command:
|
||||
default: ~
|
||||
@@ -1,20 +1,20 @@
|
||||
---
|
||||
- name: Check for rc.local file
|
||||
stat:
|
||||
ansible.builtin.stat:
|
||||
path: /etc/rc.local
|
||||
register: rcfile
|
||||
|
||||
- name: Create rc.local if needed
|
||||
lineinfile:
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/rc.local
|
||||
line: "#!/bin/sh -e"
|
||||
create: true
|
||||
insertbefore: BOF
|
||||
mode: "u=rwx,g=rx,o=rx"
|
||||
mode: u=rwx,g=rx,o=rx
|
||||
when: not rcfile.stat.exists
|
||||
|
||||
- name: Write rc.local file
|
||||
blockinfile:
|
||||
ansible.builtin.blockinfile:
|
||||
path: /etc/rc.local
|
||||
content: "{{ lookup('template', 'templates/rc.local.j2') }}"
|
||||
state: present
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
---
|
||||
secure_path:
|
||||
RedHat: '/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/bin'
|
||||
Suse: '/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/bin'
|
||||
RedHat: /sbin:/bin:/usr/sbin:/usr/bin:/usr/local/bin
|
||||
Suse: /usr/sbin:/usr/bin:/sbin:/bin:/usr/local/bin
|
||||
|
||||
7
roles/prereq/meta/main.yml
Normal file
7
roles/prereq/meta/main.yml
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
argument_specs:
|
||||
main:
|
||||
short_description: Prerequisites
|
||||
options:
|
||||
system_timezone:
|
||||
description: Timezone to be set on all nodes
|
||||
@@ -34,10 +34,10 @@
|
||||
tags: sysctl
|
||||
|
||||
- name: Add br_netfilter to /etc/modules-load.d/
|
||||
copy:
|
||||
content: "br_netfilter"
|
||||
ansible.builtin.copy:
|
||||
content: br_netfilter
|
||||
dest: /etc/modules-load.d/br_netfilter.conf
|
||||
mode: "u=rw,g=,o="
|
||||
mode: u=rw,g=,o=
|
||||
when: ansible_os_family == "RedHat"
|
||||
|
||||
- name: Load br_netfilter
|
||||
@@ -59,11 +59,11 @@
|
||||
tags: sysctl
|
||||
|
||||
- name: Add /usr/local/bin to sudo secure_path
|
||||
lineinfile:
|
||||
line: 'Defaults secure_path = {{ secure_path[ansible_os_family] }}'
|
||||
regexp: "Defaults(\\s)*secure_path(\\s)*="
|
||||
ansible.builtin.lineinfile:
|
||||
line: Defaults secure_path = {{ secure_path[ansible_os_family] }}
|
||||
regexp: Defaults(\s)*secure_path(\s)*=
|
||||
state: present
|
||||
insertafter: EOF
|
||||
path: /etc/sudoers
|
||||
validate: 'visudo -cf %s'
|
||||
validate: visudo -cf %s
|
||||
when: ansible_os_family in [ "RedHat", "Suse" ]
|
||||
|
||||
@@ -2,12 +2,12 @@
|
||||
- name: Reboot containers
|
||||
block:
|
||||
- name: Get container ids from filtered files
|
||||
set_fact:
|
||||
ansible.builtin.set_fact:
|
||||
proxmox_lxc_filtered_ids: >-
|
||||
{{ proxmox_lxc_filtered_files | map("split", "/") | map("last") | map("split", ".") | map("first") }}
|
||||
listen: reboot containers
|
||||
- name: Reboot container
|
||||
command: "pct reboot {{ item }}"
|
||||
ansible.builtin.command: pct reboot {{ item }}
|
||||
loop: "{{ proxmox_lxc_filtered_ids }}"
|
||||
changed_when: true
|
||||
listen: reboot containers
|
||||
|
||||
@@ -1,44 +1,43 @@
|
||||
---
|
||||
- name: Check for container files that exist on this host
|
||||
stat:
|
||||
path: "/etc/pve/lxc/{{ item }}.conf"
|
||||
ansible.builtin.stat:
|
||||
path: /etc/pve/lxc/{{ item }}.conf
|
||||
loop: "{{ proxmox_lxc_ct_ids }}"
|
||||
register: stat_results
|
||||
|
||||
- name: Filter out files that do not exist
|
||||
set_fact:
|
||||
proxmox_lxc_filtered_files:
|
||||
'{{ stat_results.results | rejectattr("stat.exists", "false") | map(attribute="stat.path") }}'
|
||||
ansible.builtin.set_fact:
|
||||
proxmox_lxc_filtered_files: '{{ stat_results.results | rejectattr("stat.exists", "false") | map(attribute="stat.path") }}' # noqa yaml[line-length]
|
||||
|
||||
# https://gist.github.com/triangletodd/02f595cd4c0dc9aac5f7763ca2264185
|
||||
- name: Ensure lxc config has the right apparmor profile
|
||||
lineinfile:
|
||||
ansible.builtin.lineinfile:
|
||||
dest: "{{ item }}"
|
||||
regexp: "^lxc.apparmor.profile"
|
||||
regexp: ^lxc.apparmor.profile
|
||||
line: "lxc.apparmor.profile: unconfined"
|
||||
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||
notify: reboot containers
|
||||
|
||||
- name: Ensure lxc config has the right cgroup
|
||||
lineinfile:
|
||||
ansible.builtin.lineinfile:
|
||||
dest: "{{ item }}"
|
||||
regexp: "^lxc.cgroup.devices.allow"
|
||||
regexp: ^lxc.cgroup.devices.allow
|
||||
line: "lxc.cgroup.devices.allow: a"
|
||||
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||
notify: reboot containers
|
||||
|
||||
- name: Ensure lxc config has the right cap drop
|
||||
lineinfile:
|
||||
ansible.builtin.lineinfile:
|
||||
dest: "{{ item }}"
|
||||
regexp: "^lxc.cap.drop"
|
||||
regexp: ^lxc.cap.drop
|
||||
line: "lxc.cap.drop: "
|
||||
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||
notify: reboot containers
|
||||
|
||||
- name: Ensure lxc config has the right mounts
|
||||
lineinfile:
|
||||
ansible.builtin.lineinfile:
|
||||
dest: "{{ item }}"
|
||||
regexp: "^lxc.mount.auto"
|
||||
regexp: ^lxc.mount.auto
|
||||
line: 'lxc.mount.auto: "proc:rw sys:rw"'
|
||||
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||
notify: reboot containers
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
---
|
||||
- name: Reboot
|
||||
reboot:
|
||||
ansible.builtin.reboot:
|
||||
reboot_command: "{{ custom_reboot_command | default(omit) }}"
|
||||
listen: reboot
|
||||
|
||||
@@ -1,44 +1,51 @@
|
||||
---
|
||||
- name: Test for raspberry pi /proc/cpuinfo
|
||||
command: grep -E "Raspberry Pi|BCM2708|BCM2709|BCM2835|BCM2836" /proc/cpuinfo
|
||||
ansible.builtin.command: grep -E "Raspberry Pi|BCM2708|BCM2709|BCM2835|BCM2836" /proc/cpuinfo
|
||||
register: grep_cpuinfo_raspberrypi
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
|
||||
- name: Test for raspberry pi /proc/device-tree/model
|
||||
command: grep -E "Raspberry Pi" /proc/device-tree/model
|
||||
ansible.builtin.command: grep -E "Raspberry Pi" /proc/device-tree/model
|
||||
register: grep_device_tree_model_raspberrypi
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
|
||||
- name: Set raspberry_pi fact to true
|
||||
set_fact:
|
||||
ansible.builtin.set_fact:
|
||||
raspberry_pi: true
|
||||
when:
|
||||
grep_cpuinfo_raspberrypi.rc == 0 or grep_device_tree_model_raspberrypi.rc == 0
|
||||
when: grep_cpuinfo_raspberrypi.rc == 0 or grep_device_tree_model_raspberrypi.rc == 0
|
||||
|
||||
- name: Set detected_distribution to Raspbian (ARM64 on Raspbian, Debian Buster/Bullseye/Bookworm)
|
||||
set_fact:
|
||||
ansible.builtin.set_fact:
|
||||
detected_distribution: Raspbian
|
||||
vars:
|
||||
allowed_descriptions:
|
||||
- "[Rr]aspbian.*"
|
||||
- "Debian.*buster"
|
||||
- "Debian.*bullseye"
|
||||
- "Debian.*bookworm"
|
||||
- Debian.*buster
|
||||
- Debian.*bullseye
|
||||
- Debian.*bookworm
|
||||
when:
|
||||
- ansible_facts.architecture is search("aarch64")
|
||||
- raspberry_pi|default(false)
|
||||
- ansible_facts.lsb.description|default("") is match(allowed_descriptions | join('|'))
|
||||
|
||||
- name: Set detected_distribution to Raspbian (ARM64 on Debian Bookworm)
|
||||
ansible.builtin.set_fact:
|
||||
detected_distribution: Raspbian
|
||||
when:
|
||||
- ansible_facts.architecture is search("aarch64")
|
||||
- raspberry_pi|default(false)
|
||||
- ansible_facts.lsb.description|default("") is match("Debian.*bookworm")
|
||||
|
||||
- name: Set detected_distribution_major_version
|
||||
set_fact:
|
||||
ansible.builtin.set_fact:
|
||||
detected_distribution_major_version: "{{ ansible_facts.lsb.major_release }}"
|
||||
when:
|
||||
- detected_distribution | default("") == "Raspbian"
|
||||
|
||||
- name: Execute OS related tasks on the Raspberry Pi - {{ action_ }}
|
||||
include_tasks: "{{ item }}"
|
||||
ansible.builtin.include_tasks: "{{ item }}"
|
||||
with_first_found:
|
||||
- "{{ action_ }}/{{ detected_distribution }}-{{ detected_distribution_major_version }}.yml"
|
||||
- "{{ action_ }}/{{ detected_distribution }}.yml"
|
||||
|
||||
@@ -1,19 +1,39 @@
|
||||
---
|
||||
- name: Test for cmdline path
|
||||
ansible.builtin.stat:
|
||||
path: /boot/firmware/cmdline.txt
|
||||
register: boot_cmdline_path
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
|
||||
- name: Set cmdline path based on Debian version and command result
|
||||
ansible.builtin.set_fact:
|
||||
cmdline_path: >-
|
||||
{{
|
||||
(
|
||||
boot_cmdline_path.stat.exists and
|
||||
ansible_facts.lsb.description | default('') is match('Debian.*(?!(bookworm|sid))')
|
||||
) | ternary(
|
||||
'/boot/firmware/cmdline.txt',
|
||||
'/boot/cmdline.txt'
|
||||
)
|
||||
}}
|
||||
|
||||
- name: Activating cgroup support
|
||||
lineinfile:
|
||||
path: /boot/cmdline.txt
|
||||
regexp: '^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$'
|
||||
line: '\1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory'
|
||||
ansible.builtin.lineinfile:
|
||||
path: "{{ cmdline_path }}"
|
||||
regexp: ^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$
|
||||
line: \1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory
|
||||
backrefs: true
|
||||
notify: reboot
|
||||
|
||||
- name: Install iptables
|
||||
apt:
|
||||
ansible.builtin.apt:
|
||||
name: iptables
|
||||
state: present
|
||||
|
||||
- name: Flush iptables before changing to iptables-legacy
|
||||
iptables:
|
||||
ansible.builtin.iptables:
|
||||
flush: true
|
||||
|
||||
- name: Changing to iptables-legacy
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
---
|
||||
- name: Enable cgroup via boot commandline if not already enabled for Rocky
|
||||
lineinfile:
|
||||
ansible.builtin.lineinfile:
|
||||
path: /boot/cmdline.txt
|
||||
backrefs: true
|
||||
regexp: '^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$'
|
||||
line: '\1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory'
|
||||
regexp: ^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$
|
||||
line: \1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory
|
||||
notify: reboot
|
||||
when: not ansible_check_mode
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
---
|
||||
- name: Enable cgroup via boot commandline if not already enabled for Ubuntu on a Raspberry Pi
|
||||
lineinfile:
|
||||
ansible.builtin.lineinfile:
|
||||
path: /boot/firmware/cmdline.txt
|
||||
backrefs: true
|
||||
regexp: '^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$'
|
||||
line: '\1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory'
|
||||
regexp: ^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$
|
||||
line: \1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory
|
||||
notify: reboot
|
||||
|
||||
- name: Install linux-modules-extra-raspi
|
||||
apt:
|
||||
ansible.builtin.apt:
|
||||
name: linux-modules-extra-raspi
|
||||
state: present
|
||||
when: ansible_distribution_version is version('24.04', '<')
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
---
|
||||
- name: Remove linux-modules-extra-raspi
|
||||
apt:
|
||||
ansible.builtin.apt:
|
||||
name: linux-modules-extra-raspi
|
||||
state: absent
|
||||
when: ansible_distribution_version is version('24.04', '<')
|
||||
|
||||
2
roles/reset/defaults/main.yml
Normal file
2
roles/reset/defaults/main.yml
Normal file
@@ -0,0 +1,2 @@
|
||||
---
|
||||
systemd_dir: /etc/systemd/system
|
||||
8
roles/reset/meta/main.yml
Normal file
8
roles/reset/meta/main.yml
Normal file
@@ -0,0 +1,8 @@
|
||||
---
|
||||
argument_specs:
|
||||
main:
|
||||
short_description: Reset all nodes
|
||||
options:
|
||||
systemd_dir:
|
||||
description: Path to systemd services
|
||||
default: /etc/systemd/system
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
- name: Disable services
|
||||
systemd:
|
||||
ansible.builtin.systemd:
|
||||
name: "{{ item }}"
|
||||
state: stopped
|
||||
enabled: false
|
||||
@@ -12,12 +12,12 @@
|
||||
|
||||
- name: RUN pkill -9 -f "k3s/data/[^/]+/bin/containerd-shim-runc"
|
||||
register: pkill_containerd_shim_runc
|
||||
command: pkill -9 -f "k3s/data/[^/]+/bin/containerd-shim-runc"
|
||||
changed_when: "pkill_containerd_shim_runc.rc == 0"
|
||||
ansible.builtin.command: pkill -9 -f "k3s/data/[^/]+/bin/containerd-shim-runc"
|
||||
changed_when: pkill_containerd_shim_runc.rc == 0
|
||||
failed_when: false
|
||||
|
||||
- name: Umount k3s filesystems
|
||||
include_tasks: umount_with_children.yml
|
||||
ansible.builtin.include_tasks: umount_with_children.yml
|
||||
with_items:
|
||||
- /run/k3s
|
||||
- /var/lib/kubelet
|
||||
@@ -30,7 +30,7 @@
|
||||
loop_var: mounted_fs
|
||||
|
||||
- name: Remove service files, binaries and data
|
||||
file:
|
||||
ansible.builtin.file:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
with_items:
|
||||
@@ -45,9 +45,10 @@
|
||||
- /var/lib/rancher/k3s
|
||||
- /var/lib/rancher/
|
||||
- /var/lib/cni/
|
||||
- /etc/cni/net.d
|
||||
|
||||
- name: Remove K3s http_proxy files
|
||||
file:
|
||||
ansible.builtin.file:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
with_items:
|
||||
@@ -58,22 +59,22 @@
|
||||
when: proxy_env is defined
|
||||
|
||||
- name: Reload daemon_reload
|
||||
systemd:
|
||||
ansible.builtin.systemd:
|
||||
daemon_reload: true
|
||||
|
||||
- name: Remove tmp directory used for manifests
|
||||
file:
|
||||
ansible.builtin.file:
|
||||
path: /tmp/k3s
|
||||
state: absent
|
||||
|
||||
- name: Check if rc.local exists
|
||||
stat:
|
||||
ansible.builtin.stat:
|
||||
path: /etc/rc.local
|
||||
register: rcfile
|
||||
|
||||
- name: Remove rc.local modifications for proxmox lxc containers
|
||||
become: true
|
||||
blockinfile:
|
||||
ansible.builtin.blockinfile:
|
||||
path: /etc/rc.local
|
||||
content: "{{ lookup('template', 'templates/rc.local.j2') }}"
|
||||
create: false
|
||||
@@ -82,14 +83,14 @@
|
||||
|
||||
- name: Check rc.local for cleanup
|
||||
become: true
|
||||
slurp:
|
||||
ansible.builtin.slurp:
|
||||
src: /etc/rc.local
|
||||
register: rcslurp
|
||||
when: proxmox_lxc_configure and rcfile.stat.exists
|
||||
|
||||
- name: Cleanup rc.local if we only have a Shebang line
|
||||
become: true
|
||||
file:
|
||||
ansible.builtin.file:
|
||||
path: /etc/rc.local
|
||||
state: absent
|
||||
when: proxmox_lxc_configure and rcfile.stat.exists and ((rcslurp.content | b64decode).splitlines() | length) <= 1
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
- name: Get the list of mounted filesystems
|
||||
shell: set -o pipefail && cat /proc/mounts | awk '{ print $2}' | grep -E "^{{ mounted_fs }}"
|
||||
ansible.builtin.shell: set -o pipefail && cat /proc/mounts | awk '{ print $2}' | grep -E "^{{ mounted_fs }}"
|
||||
register: get_mounted_filesystems
|
||||
args:
|
||||
executable: /bin/bash
|
||||
@@ -12,5 +12,4 @@
|
||||
ansible.posix.mount:
|
||||
path: "{{ item }}"
|
||||
state: unmounted
|
||||
with_items:
|
||||
"{{ get_mounted_filesystems.stdout_lines | reverse | list }}"
|
||||
with_items: "{{ get_mounted_filesystems.stdout_lines | reverse | list }}"
|
||||
|
||||
@@ -1,46 +1,45 @@
|
||||
---
|
||||
- name: Check for container files that exist on this host
|
||||
stat:
|
||||
path: "/etc/pve/lxc/{{ item }}.conf"
|
||||
ansible.builtin.stat:
|
||||
path: /etc/pve/lxc/{{ item }}.conf
|
||||
loop: "{{ proxmox_lxc_ct_ids }}"
|
||||
register: stat_results
|
||||
|
||||
- name: Filter out files that do not exist
|
||||
set_fact:
|
||||
proxmox_lxc_filtered_files:
|
||||
'{{ stat_results.results | rejectattr("stat.exists", "false") | map(attribute="stat.path") }}'
|
||||
ansible.builtin.set_fact:
|
||||
proxmox_lxc_filtered_files: '{{ stat_results.results | rejectattr("stat.exists", "false") | map(attribute="stat.path") }}' # noqa yaml[line-length]
|
||||
|
||||
- name: Remove LXC apparmor profile
|
||||
lineinfile:
|
||||
ansible.builtin.lineinfile:
|
||||
dest: "{{ item }}"
|
||||
regexp: "^lxc.apparmor.profile"
|
||||
regexp: ^lxc.apparmor.profile
|
||||
line: "lxc.apparmor.profile: unconfined"
|
||||
state: absent
|
||||
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||
notify: reboot containers
|
||||
|
||||
- name: Remove lxc cgroups
|
||||
lineinfile:
|
||||
ansible.builtin.lineinfile:
|
||||
dest: "{{ item }}"
|
||||
regexp: "^lxc.cgroup.devices.allow"
|
||||
regexp: ^lxc.cgroup.devices.allow
|
||||
line: "lxc.cgroup.devices.allow: a"
|
||||
state: absent
|
||||
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||
notify: reboot containers
|
||||
|
||||
- name: Remove lxc cap drop
|
||||
lineinfile:
|
||||
ansible.builtin.lineinfile:
|
||||
dest: "{{ item }}"
|
||||
regexp: "^lxc.cap.drop"
|
||||
regexp: ^lxc.cap.drop
|
||||
line: "lxc.cap.drop: "
|
||||
state: absent
|
||||
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||
notify: reboot containers
|
||||
|
||||
- name: Remove lxc mounts
|
||||
lineinfile:
|
||||
ansible.builtin.lineinfile:
|
||||
dest: "{{ item }}"
|
||||
regexp: "^lxc.mount.auto"
|
||||
regexp: ^lxc.mount.auto
|
||||
line: 'lxc.mount.auto: "proc:rw sys:rw"'
|
||||
state: absent
|
||||
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||
|
||||
20
site.yml
20
site.yml
@@ -1,4 +1,13 @@
|
||||
---
|
||||
- name: Pre tasks
|
||||
hosts: all
|
||||
pre_tasks:
|
||||
- name: Verify Ansible is version 2.11 or above. (If this fails you may need to update Ansible)
|
||||
ansible.builtin.assert:
|
||||
that: ansible_version.full is version_compare('2.11', '>=')
|
||||
msg: >
|
||||
"Ansible is out of date. See here for more info: https://docs.technotim.live/posts/ansible-automation/"
|
||||
|
||||
- name: Prepare Proxmox cluster
|
||||
hosts: proxmox
|
||||
gather_facts: true
|
||||
@@ -46,3 +55,14 @@
|
||||
roles:
|
||||
- role: k3s_server_post
|
||||
become: true
|
||||
|
||||
- name: Storing kubeconfig in the playbook directory
|
||||
hosts: master
|
||||
environment: "{{ proxy_env | default({}) }}"
|
||||
tasks:
|
||||
- name: Copying kubeconfig from {{ hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] }}
|
||||
ansible.builtin.fetch:
|
||||
src: "{{ ansible_user_dir }}/.kube/config"
|
||||
dest: ./kubeconfig
|
||||
flat: true
|
||||
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||
|
||||
Reference in New Issue
Block a user