mirror of
https://github.com/techno-tim/k3s-ansible.git
synced 2025-12-25 18:23:05 +01:00
Compare commits
141 Commits
v1.24.6+k3
...
v1.29.2+k3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a46d97a28d | ||
|
|
dc9d571f17 | ||
|
|
6742551e5c | ||
|
|
fb3478a086 | ||
|
|
518c5bb62a | ||
|
|
3f5d8dfe9f | ||
|
|
efbfadcb93 | ||
|
|
f81ec04ba2 | ||
|
|
8432d3bc66 | ||
|
|
14ae9df1bc | ||
|
|
f175716339 | ||
|
|
955c6f6b4a | ||
|
|
3b74985767 | ||
|
|
9ace193ade | ||
|
|
83a0be3afd | ||
|
|
029eba6102 | ||
|
|
0c8253b3a5 | ||
|
|
326b71dfa2 | ||
|
|
b95d6dd2cc | ||
|
|
e4146b4ca9 | ||
|
|
1fb10faf7f | ||
|
|
ea3b3c776a | ||
|
|
5beca87783 | ||
|
|
6ffc25dfe5 | ||
|
|
bcd37a6904 | ||
|
|
8dd3ffc825 | ||
|
|
f6ba208b5c | ||
|
|
a22d8f7aaf | ||
|
|
05fb6b566d | ||
|
|
3aeb7d69ea | ||
|
|
61bf3971ef | ||
|
|
3f06a11c8d | ||
|
|
3888a29bb1 | ||
|
|
98ef696f31 | ||
|
|
de26a79a4c | ||
|
|
ab7ca9b551 | ||
|
|
c5f71c9e2e | ||
|
|
0f23e7e258 | ||
|
|
121061d875 | ||
|
|
db53f595fd | ||
|
|
7b6b24ce4d | ||
|
|
a5728da35e | ||
|
|
cda7c92203 | ||
|
|
d910b83bf3 | ||
|
|
101313f880 | ||
|
|
12be355867 | ||
|
|
aa09e3e9df | ||
|
|
511c410451 | ||
|
|
df9c6f3014 | ||
|
|
5ae8fd1223 | ||
|
|
e2e9881f0f | ||
|
|
edf0c9eebd | ||
|
|
7669fd4721 | ||
|
|
cddbfc8e40 | ||
|
|
70e658cf98 | ||
|
|
7badfbd7bd | ||
|
|
e880f08d26 | ||
|
|
95b2836dfc | ||
|
|
505c2eeff2 | ||
|
|
9b6d551dd6 | ||
|
|
a64e882fb7 | ||
|
|
38e773315b | ||
|
|
70ddf7b63c | ||
|
|
fb3128a783 | ||
|
|
2e318e0862 | ||
|
|
0607eb8aa4 | ||
|
|
a9904d1562 | ||
|
|
9707bc8a58 | ||
|
|
e635bd2626 | ||
|
|
1aabb5a927 | ||
|
|
215690b55b | ||
|
|
bd44a9b126 | ||
|
|
8d61fe81e5 | ||
|
|
c0ff304f22 | ||
|
|
83077ecdd1 | ||
|
|
33ae0d4970 | ||
|
|
edd4838407 | ||
|
|
5c79ea9b71 | ||
|
|
3d204ad851 | ||
|
|
13bd868faa | ||
|
|
c564a8562a | ||
|
|
0d6d43e7ca | ||
|
|
c0952288c2 | ||
|
|
1c9796e98b | ||
|
|
288c4089e0 | ||
|
|
49f0a2ce6b | ||
|
|
6c4621bd56 | ||
|
|
3e16ab6809 | ||
|
|
83fe50797c | ||
|
|
2db0b3024c | ||
|
|
6b2af77e74 | ||
|
|
d1d1bc3d91 | ||
|
|
3a1a7a19aa | ||
|
|
030eeb4b75 | ||
|
|
4aeeb124ef | ||
|
|
511c020bec | ||
|
|
c47da38b53 | ||
|
|
6448948e9f | ||
|
|
7bc198ab26 | ||
|
|
65bbc8e2ac | ||
|
|
dc2976e7f6 | ||
|
|
5a7ba98968 | ||
|
|
10c6ef1d57 | ||
|
|
ed4d888e3d | ||
|
|
49d6d484ae | ||
|
|
96c49c864e | ||
|
|
60adb1de42 | ||
|
|
e023808f2f | ||
|
|
511ec493d6 | ||
|
|
be3e72e173 | ||
|
|
e33cbe52c1 | ||
|
|
c06af919f3 | ||
|
|
b86384c439 | ||
|
|
bf2bd1edc5 | ||
|
|
e98e3ee77c | ||
|
|
78f7a60378 | ||
|
|
e64fea760d | ||
|
|
764e32c778 | ||
|
|
e6cf14ea78 | ||
|
|
da049dcc28 | ||
|
|
2604caa483 | ||
|
|
82d820805f | ||
|
|
da72884a5b | ||
|
|
17a74b66c8 | ||
|
|
88d679ecb6 | ||
|
|
6bf3bcce92 | ||
|
|
cff815a031 | ||
|
|
f892029fcf | ||
|
|
6b37ba5e60 | ||
|
|
b1fee44403 | ||
|
|
a1c7175bd1 | ||
|
|
69d3bdcd88 | ||
|
|
5268ef305a | ||
|
|
a840571733 | ||
|
|
b1370406ea | ||
|
|
12d57a07d0 | ||
|
|
4f3b8ec9e0 | ||
|
|
45ddd65e74 | ||
|
|
b2a62ea4eb | ||
|
|
a8697edc99 | ||
|
|
d3218f5d5c |
@@ -13,5 +13,9 @@ exclude_paths:
|
|||||||
- 'molecule/**/prepare.yml'
|
- 'molecule/**/prepare.yml'
|
||||||
- 'molecule/**/reset.yml'
|
- 'molecule/**/reset.yml'
|
||||||
|
|
||||||
|
# The file was generated by galaxy ansible - don't mess with it.
|
||||||
|
- 'galaxy.yml'
|
||||||
|
|
||||||
skip_list:
|
skip_list:
|
||||||
- 'fqcn-builtins'
|
- 'fqcn-builtins'
|
||||||
|
- var-naming[no-role-prefix]
|
||||||
|
|||||||
8
.github/ISSUE_TEMPLATE.md
vendored
8
.github/ISSUE_TEMPLATE.md
vendored
@@ -37,6 +37,11 @@ systemd_dir: ""
|
|||||||
|
|
||||||
flannel_iface: ""
|
flannel_iface: ""
|
||||||
|
|
||||||
|
#calico_iface: ""
|
||||||
|
calico_ebpf: ""
|
||||||
|
calico_cidr: ""
|
||||||
|
calico_tag: ""
|
||||||
|
|
||||||
apiserver_endpoint: ""
|
apiserver_endpoint: ""
|
||||||
|
|
||||||
k3s_token: "NA"
|
k3s_token: "NA"
|
||||||
@@ -46,6 +51,9 @@ extra_agent_args: ""
|
|||||||
|
|
||||||
kube_vip_tag_version: ""
|
kube_vip_tag_version: ""
|
||||||
|
|
||||||
|
kube_vip_cloud_provider_tag_version: ""
|
||||||
|
kube_vip_lb_ip_range: ""
|
||||||
|
|
||||||
metal_lb_speaker_tag_version: ""
|
metal_lb_speaker_tag_version: ""
|
||||||
metal_lb_controller_tag_version: ""
|
metal_lb_controller_tag_version: ""
|
||||||
|
|
||||||
|
|||||||
1
.github/PULL_REQUEST_TEMPLATE.md
vendored
1
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -11,4 +11,5 @@
|
|||||||
- [ ] Ran `site.yml` playbook
|
- [ ] Ran `site.yml` playbook
|
||||||
- [ ] Ran `reset.yml` playbook
|
- [ ] Ran `reset.yml` playbook
|
||||||
- [ ] Did not add any unnecessary changes
|
- [ ] Did not add any unnecessary changes
|
||||||
|
- [ ] Ran pre-commit install at least once before committing
|
||||||
- [ ] 🚀
|
- [ ] 🚀
|
||||||
|
|||||||
15
.github/dependabot.yml
vendored
15
.github/dependabot.yml
vendored
@@ -9,3 +9,18 @@ updates:
|
|||||||
ignore:
|
ignore:
|
||||||
- dependency-name: "*"
|
- dependency-name: "*"
|
||||||
update-types: ["version-update:semver-major"]
|
update-types: ["version-update:semver-major"]
|
||||||
|
|
||||||
|
- package-ecosystem: "github-actions"
|
||||||
|
directory: "/"
|
||||||
|
schedule:
|
||||||
|
interval: "daily"
|
||||||
|
rebase-strategy: "auto"
|
||||||
|
|
||||||
|
- package-ecosystem: "docker"
|
||||||
|
directory: "/"
|
||||||
|
schedule:
|
||||||
|
interval: "daily"
|
||||||
|
rebase-strategy: "auto"
|
||||||
|
ignore:
|
||||||
|
- dependency-name: "*"
|
||||||
|
update-types: ["version-update:semver-major"]
|
||||||
|
|||||||
42
.github/workflows/cache.yml
vendored
Normal file
42
.github/workflows/cache.yml
vendored
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
---
|
||||||
|
name: "Cache"
|
||||||
|
on:
|
||||||
|
workflow_call:
|
||||||
|
jobs:
|
||||||
|
molecule:
|
||||||
|
name: cache
|
||||||
|
runs-on: self-hosted
|
||||||
|
env:
|
||||||
|
PYTHON_VERSION: "3.11"
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Check out the codebase
|
||||||
|
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # 4.1.2
|
||||||
|
with:
|
||||||
|
ref: ${{ github.event.pull_request.head.sha }}
|
||||||
|
|
||||||
|
- name: Set up Python ${{ env.PYTHON_VERSION }}
|
||||||
|
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # 5.0.0
|
||||||
|
with:
|
||||||
|
python-version: ${{ env.PYTHON_VERSION }}
|
||||||
|
cache: 'pip' # caching pip dependencies
|
||||||
|
|
||||||
|
- name: Cache Vagrant boxes
|
||||||
|
id: cache-vagrant
|
||||||
|
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # 4.0
|
||||||
|
with:
|
||||||
|
lookup-only: true #if it exists, we don't need to restore and can skip the next step
|
||||||
|
path: |
|
||||||
|
~/.vagrant.d/boxes
|
||||||
|
key: vagrant-boxes-${{ hashFiles('**/molecule.yml') }}
|
||||||
|
restore-keys: |
|
||||||
|
vagrant-boxes
|
||||||
|
|
||||||
|
- name: Download Vagrant boxes for all scenarios
|
||||||
|
# To save some cache space, all scenarios share the same cache key.
|
||||||
|
# On the other hand, this means that the cache contents should be
|
||||||
|
# the same across all scenarios. This step ensures that.
|
||||||
|
if: steps.cache-vagrant.outputs.cache-hit != 'true' # only run if false since this is just a cache step
|
||||||
|
run: |
|
||||||
|
./.github/download-boxes.sh
|
||||||
|
vagrant box list
|
||||||
27
.github/workflows/ci.yml
vendored
Normal file
27
.github/workflows/ci.yml
vendored
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
---
|
||||||
|
name: "CI"
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
types:
|
||||||
|
- opened
|
||||||
|
- synchronize
|
||||||
|
paths-ignore:
|
||||||
|
- '**/.gitignore'
|
||||||
|
- '**/FUNDING.yml'
|
||||||
|
- '**/host.ini'
|
||||||
|
- '**/*.md'
|
||||||
|
- '**/.editorconfig'
|
||||||
|
- '**/ansible.example.cfg'
|
||||||
|
- '**/deploy.sh'
|
||||||
|
- '**/LICENSE'
|
||||||
|
- '**/reboot.sh'
|
||||||
|
- '**/reset.sh'
|
||||||
|
jobs:
|
||||||
|
pre:
|
||||||
|
uses: ./.github/workflows/cache.yml
|
||||||
|
lint:
|
||||||
|
uses: ./.github/workflows/lint.yml
|
||||||
|
needs: [pre]
|
||||||
|
test:
|
||||||
|
uses: ./.github/workflows/test.yml
|
||||||
|
needs: [pre, lint]
|
||||||
55
.github/workflows/lint.yml
vendored
55
.github/workflows/lint.yml
vendored
@@ -1,24 +1,31 @@
|
|||||||
---
|
---
|
||||||
name: Linting
|
name: Linting
|
||||||
on:
|
on:
|
||||||
pull_request:
|
workflow_call:
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
ansible-lint:
|
pre-commit-ci:
|
||||||
name: YAML Lint + Ansible Lint
|
name: Pre-Commit
|
||||||
runs-on: ubuntu-latest
|
runs-on: self-hosted
|
||||||
|
env:
|
||||||
|
PYTHON_VERSION: "3.11"
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Check out the codebase
|
- name: Check out the codebase
|
||||||
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # 3.0.2
|
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # 4.1.2
|
||||||
|
|
||||||
- name: Set up Python 3.x
|
|
||||||
uses: actions/setup-python@b55428b1882923874294fa556849718a1d7f2ca5 #4.0.2
|
|
||||||
with:
|
with:
|
||||||
python-version: "3.x"
|
ref: ${{ github.event.pull_request.head.sha }}
|
||||||
|
|
||||||
|
- name: Set up Python ${{ env.PYTHON_VERSION }}
|
||||||
|
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # 5.0.0
|
||||||
|
with:
|
||||||
|
python-version: ${{ env.PYTHON_VERSION }}
|
||||||
|
cache: 'pip' # caching pip dependencies
|
||||||
|
|
||||||
|
- name: Restore Ansible cache
|
||||||
|
uses: actions/cache/restore@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # 4.0
|
||||||
|
with:
|
||||||
|
path: ~/.ansible/collections
|
||||||
|
key: ansible-${{ hashFiles('collections/requirements.yml') }}
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
@@ -30,12 +37,18 @@ jobs:
|
|||||||
python3 -m pip install -r requirements.txt
|
python3 -m pip install -r requirements.txt
|
||||||
echo "::endgroup::"
|
echo "::endgroup::"
|
||||||
|
|
||||||
echo "::group::Install Ansible role requirements from collections/requirements.yml"
|
- name: Run pre-commit
|
||||||
ansible-galaxy install -r collections/requirements.yml
|
uses: pre-commit/action@2c7b3805fd2a0fd8c1884dcaebf91fc102a13ecd # 3.0.1
|
||||||
echo "::endgroup::"
|
|
||||||
|
|
||||||
- name: Run yamllint
|
ensure-pinned-actions:
|
||||||
run: yamllint .
|
name: Ensure SHA Pinned Actions
|
||||||
|
runs-on: self-hosted
|
||||||
- name: Run ansible-lint
|
steps:
|
||||||
run: ansible-lint
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # 4.1.2
|
||||||
|
- name: Ensure SHA pinned actions
|
||||||
|
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@ba37328d4ea95eaf8b3bd6c6cef308f709a5f2ec # 3.0.3
|
||||||
|
with:
|
||||||
|
allowlist: |
|
||||||
|
aws-actions/
|
||||||
|
docker/login-action
|
||||||
|
|||||||
115
.github/workflows/test.yml
vendored
115
.github/workflows/test.yml
vendored
@@ -1,30 +1,54 @@
|
|||||||
---
|
---
|
||||||
name: Test
|
name: Test
|
||||||
on:
|
on:
|
||||||
pull_request:
|
workflow_call:
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
paths-ignore:
|
|
||||||
- '**/README.md'
|
|
||||||
jobs:
|
jobs:
|
||||||
molecule:
|
molecule:
|
||||||
name: Molecule
|
name: Molecule
|
||||||
runs-on: macos-12
|
runs-on: self-hosted
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
scenario:
|
scenario:
|
||||||
- default
|
- default
|
||||||
- ipv6
|
# - ipv6
|
||||||
- single_node
|
- single_node
|
||||||
|
- calico
|
||||||
|
- cilium
|
||||||
|
- kube-vip
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
env:
|
env:
|
||||||
PYTHON_VERSION: "3.10"
|
PYTHON_VERSION: "3.11"
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Check out the codebase
|
- name: Check out the codebase
|
||||||
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # 3.0.2
|
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # 4.1.2
|
||||||
|
with:
|
||||||
|
ref: ${{ github.event.pull_request.head.sha }}
|
||||||
|
|
||||||
|
# these steps are necessary if not using ephemeral nodes
|
||||||
|
- name: Delete old Vagrant box versions
|
||||||
|
if: always() # do this even if a step before has failed
|
||||||
|
run: vagrant box prune --force
|
||||||
|
|
||||||
|
- name: Remove all local Vagrant boxes
|
||||||
|
if: always() # do this even if a step before has failed
|
||||||
|
run: if vagrant box list 2>/dev/null; then vagrant box list | cut -f 1 -d ' ' | xargs -L 1 vagrant box remove -f 2>/dev/null && echo "All Vagrant boxes removed." || echo "No Vagrant boxes found."; else echo "No Vagrant boxes found."; fi
|
||||||
|
|
||||||
|
- name: Remove all Virtualbox VMs
|
||||||
|
if: always() # do this even if a step before has failed
|
||||||
|
run: VBoxManage list vms | awk -F'"' '{print $2}' | xargs -I {} VBoxManage unregistervm --delete "{}"
|
||||||
|
|
||||||
|
- name: Remove all Virtualbox HDs
|
||||||
|
if: always() # do this even if a step before has failed
|
||||||
|
run: VBoxManage list hdds | awk -F':' '/^UUID:/ {print $2}' | xargs -I {} VBoxManage closemedium disk "{}" --delete
|
||||||
|
|
||||||
|
- name: Remove all Virtualbox Networks
|
||||||
|
if: always() # do this even if a step before has failed
|
||||||
|
run: VBoxManage list hostonlyifs | grep '^Name:' | awk '{print $2}' | grep '^vboxnet' | xargs -I {} VBoxManage hostonlyif remove {}
|
||||||
|
|
||||||
|
- name: Remove Virtualbox network config
|
||||||
|
if: always() # do this even if a step before has failed
|
||||||
|
run: sudo rm /etc/vbox/networks.conf || true
|
||||||
|
|
||||||
- name: Configure VirtualBox
|
- name: Configure VirtualBox
|
||||||
run: |-
|
run: |-
|
||||||
@@ -34,48 +58,69 @@ jobs:
|
|||||||
* fdad:bad:ba55::/64
|
* fdad:bad:ba55::/64
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
- name: Cache Vagrant boxes
|
|
||||||
uses: actions/cache@fd5de65bc895cf536527842281bea11763fefd77 # 3.0.8
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/.vagrant.d/boxes
|
|
||||||
key: vagrant-boxes-${{ hashFiles('**/molecule.yml') }}
|
|
||||||
restore-keys: |
|
|
||||||
vagrant-boxes
|
|
||||||
|
|
||||||
- name: Download Vagrant boxes for all scenarios
|
|
||||||
# To save some cache space, all scenarios share the same cache key.
|
|
||||||
# On the other hand, this means that the cache contents should be
|
|
||||||
# the same across all scenarios. This step ensures that.
|
|
||||||
run: ./.github/download-boxes.sh
|
|
||||||
|
|
||||||
- name: Set up Python ${{ env.PYTHON_VERSION }}
|
- name: Set up Python ${{ env.PYTHON_VERSION }}
|
||||||
uses: actions/setup-python@v2
|
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # 5.0.0
|
||||||
with:
|
with:
|
||||||
python-version: ${{ env.PYTHON_VERSION }}
|
python-version: ${{ env.PYTHON_VERSION }}
|
||||||
|
cache: 'pip' # caching pip dependencies
|
||||||
|
|
||||||
|
- name: Restore vagrant Boxes cache
|
||||||
|
uses: actions/cache/restore@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # 4.0
|
||||||
|
with:
|
||||||
|
path: ~/.vagrant.d/boxes
|
||||||
|
key: vagrant-boxes-${{ hashFiles('**/molecule.yml') }}
|
||||||
|
fail-on-cache-miss: true
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: >-
|
run: |
|
||||||
python3 -m pip install --upgrade pip &&
|
echo "::group::Upgrade pip"
|
||||||
|
python3 -m pip install --upgrade pip
|
||||||
|
echo "::endgroup::"
|
||||||
|
|
||||||
|
echo "::group::Install Python requirements from requirements.txt"
|
||||||
python3 -m pip install -r requirements.txt
|
python3 -m pip install -r requirements.txt
|
||||||
|
echo "::endgroup::"
|
||||||
|
|
||||||
- name: Test with molecule
|
- name: Test with molecule
|
||||||
run: molecule test --scenario-name ${{ matrix.scenario }}
|
run: molecule test --scenario-name ${{ matrix.scenario }}
|
||||||
|
timeout-minutes: 90
|
||||||
env:
|
env:
|
||||||
ANSIBLE_K3S_LOG_DIR: ${{ runner.temp }}/logs/k3s-ansible/${{ matrix.scenario }}
|
ANSIBLE_K3S_LOG_DIR: ${{ runner.temp }}/logs/k3s-ansible/${{ matrix.scenario }}
|
||||||
ANSIBLE_SSH_RETRIES: 4
|
ANSIBLE_SSH_RETRIES: 4
|
||||||
ANSIBLE_TIMEOUT: 60
|
ANSIBLE_TIMEOUT: 120
|
||||||
PY_COLORS: 1
|
PY_COLORS: 1
|
||||||
ANSIBLE_FORCE_COLOR: 1
|
ANSIBLE_FORCE_COLOR: 1
|
||||||
|
|
||||||
|
# these steps are necessary if not using ephemeral nodes
|
||||||
|
- name: Delete old Vagrant box versions
|
||||||
|
if: always() # do this even if a step before has failed
|
||||||
|
run: vagrant box prune --force
|
||||||
|
|
||||||
|
- name: Remove all local Vagrant boxes
|
||||||
|
if: always() # do this even if a step before has failed
|
||||||
|
run: if vagrant box list 2>/dev/null; then vagrant box list | cut -f 1 -d ' ' | xargs -L 1 vagrant box remove -f 2>/dev/null && echo "All Vagrant boxes removed." || echo "No Vagrant boxes found."; else echo "No Vagrant boxes found."; fi
|
||||||
|
|
||||||
|
- name: Remove all Virtualbox VMs
|
||||||
|
if: always() # do this even if a step before has failed
|
||||||
|
run: VBoxManage list vms | awk -F'"' '{print $2}' | xargs -I {} VBoxManage unregistervm --delete "{}"
|
||||||
|
|
||||||
|
- name: Remove all Virtualbox HDs
|
||||||
|
if: always() # do this even if a step before has failed
|
||||||
|
run: VBoxManage list hdds | awk -F':' '/^UUID:/ {print $2}' | xargs -I {} VBoxManage closemedium disk "{}" --delete
|
||||||
|
|
||||||
|
- name: Remove all Virtualbox Networks
|
||||||
|
if: always() # do this even if a step before has failed
|
||||||
|
run: VBoxManage list hostonlyifs | grep '^Name:' | awk '{print $2}' | grep '^vboxnet' | xargs -I {} VBoxManage hostonlyif remove {}
|
||||||
|
|
||||||
|
- name: Remove Virtualbox network config
|
||||||
|
if: always() # do this even if a step before has failed
|
||||||
|
run: sudo rm /etc/vbox/networks.conf || true
|
||||||
|
|
||||||
- name: Upload log files
|
- name: Upload log files
|
||||||
if: always() # do this even if a step before has failed
|
if: always() # do this even if a step before has failed
|
||||||
uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8 # 3.1.0
|
uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # 4.3.1
|
||||||
with:
|
with:
|
||||||
name: logs
|
name: logs
|
||||||
path: |
|
path: |
|
||||||
${{ runner.temp }}/logs
|
${{ runner.temp }}/logs
|
||||||
|
overwrite: true
|
||||||
- name: Delete old box versions
|
|
||||||
if: always() # do this even if a step before has failed
|
|
||||||
run: vagrant box prune --force
|
|
||||||
|
|||||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1 +1,4 @@
|
|||||||
.env/
|
.env/
|
||||||
|
*.log
|
||||||
|
ansible.cfg
|
||||||
|
kubeconfig
|
||||||
|
|||||||
35
.pre-commit-config.yaml
Normal file
35
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
---
|
||||||
|
repos:
|
||||||
|
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||||
|
rev: v4.5.0
|
||||||
|
hooks:
|
||||||
|
- id: requirements-txt-fixer
|
||||||
|
- id: sort-simple-yaml
|
||||||
|
- id: detect-private-key
|
||||||
|
- id: check-merge-conflict
|
||||||
|
- id: end-of-file-fixer
|
||||||
|
- id: mixed-line-ending
|
||||||
|
- id: trailing-whitespace
|
||||||
|
args: [--markdown-linebreak-ext=md]
|
||||||
|
- repo: https://github.com/adrienverge/yamllint.git
|
||||||
|
rev: v1.33.0
|
||||||
|
hooks:
|
||||||
|
- id: yamllint
|
||||||
|
args: [-c=.yamllint]
|
||||||
|
- repo: https://github.com/ansible-community/ansible-lint.git
|
||||||
|
rev: v6.22.2
|
||||||
|
hooks:
|
||||||
|
- id: ansible-lint
|
||||||
|
- repo: https://github.com/shellcheck-py/shellcheck-py
|
||||||
|
rev: v0.9.0.6
|
||||||
|
hooks:
|
||||||
|
- id: shellcheck
|
||||||
|
- repo: https://github.com/Lucas-C/pre-commit-hooks
|
||||||
|
rev: v1.5.4
|
||||||
|
hooks:
|
||||||
|
- id: remove-crlf
|
||||||
|
- id: remove-tabs
|
||||||
|
- repo: https://github.com/sirosen/texthooks
|
||||||
|
rev: 0.6.4
|
||||||
|
hooks:
|
||||||
|
- id: fix-smartquotes
|
||||||
@@ -6,4 +6,6 @@ rules:
|
|||||||
max: 120
|
max: 120
|
||||||
level: warning
|
level: warning
|
||||||
truthy:
|
truthy:
|
||||||
allowed-values: ['true', 'false', 'yes', 'no']
|
allowed-values: ['true', 'false']
|
||||||
|
ignore:
|
||||||
|
- galaxy.yml
|
||||||
|
|||||||
60
README.md
60
README.md
@@ -4,13 +4,13 @@
|
|||||||
|
|
||||||
This playbook will build an HA Kubernetes cluster with `k3s`, `kube-vip` and MetalLB via `ansible`.
|
This playbook will build an HA Kubernetes cluster with `k3s`, `kube-vip` and MetalLB via `ansible`.
|
||||||
|
|
||||||
This is based on the work from [this fork](https://github.com/212850a/k3s-ansible) which is based on the work from [k3s-io/k3s-ansible](https://github.com/k3s-io/k3s-ansible). It uses [kube-vip](https://kube-vip.chipzoller.dev/) to create a load balancer for control plane, and [metal-lb](https://metallb.universe.tf/installation/) for its service `LoadBalancer`.
|
This is based on the work from [this fork](https://github.com/212850a/k3s-ansible) which is based on the work from [k3s-io/k3s-ansible](https://github.com/k3s-io/k3s-ansible). It uses [kube-vip](https://kube-vip.io/) to create a load balancer for control plane, and [metal-lb](https://metallb.universe.tf/installation/) for its service `LoadBalancer`.
|
||||||
|
|
||||||
If you want more context on how this works, see:
|
If you want more context on how this works, see:
|
||||||
|
|
||||||
📄 [Documentation](https://docs.technotim.live/posts/k3s-etcd-ansible/) (including example commands)
|
📄 [Documentation](https://technotim.live/posts/k3s-etcd-ansible/) (including example commands)
|
||||||
|
|
||||||
📺 [Video](https://www.youtube.com/watch?v=CbkEWcUZ7zM)
|
📺 [Watch the Video](https://www.youtube.com/watch?v=CbkEWcUZ7zM)
|
||||||
|
|
||||||
## 📖 k3s Ansible Playbook
|
## 📖 k3s Ansible Playbook
|
||||||
|
|
||||||
@@ -28,14 +28,14 @@ on processor architecture:
|
|||||||
|
|
||||||
## ✅ System requirements
|
## ✅ System requirements
|
||||||
|
|
||||||
- Deployment environment must have Ansible 2.4.0+. If you need a quick primer on Ansible [you can check out my docs and setting up Ansible](https://docs.technotim.live/posts/ansible-automation/).
|
- Control Node (the machine you are running `ansible` commands) must have Ansible 2.11+ If you need a quick primer on Ansible [you can check out my docs and setting up Ansible](https://technotim.live/posts/ansible-automation/).
|
||||||
|
|
||||||
|
- You will also need to install collections that this playbook uses by running `ansible-galaxy collection install -r ./collections/requirements.yml` (important❗)
|
||||||
|
|
||||||
- [`netaddr` package](https://pypi.org/project/netaddr/) must be available to Ansible. If you have installed Ansible via apt, this is already taken care of. If you have installed Ansible via `pip`, make sure to install `netaddr` into the respective virtual environment.
|
- [`netaddr` package](https://pypi.org/project/netaddr/) must be available to Ansible. If you have installed Ansible via apt, this is already taken care of. If you have installed Ansible via `pip`, make sure to install `netaddr` into the respective virtual environment.
|
||||||
|
|
||||||
- `server` and `agent` nodes should have passwordless SSH access, if not you can supply arguments to provide credentials `--ask-pass --ask-become-pass` to each command.
|
- `server` and `agent` nodes should have passwordless SSH access, if not you can supply arguments to provide credentials `--ask-pass --ask-become-pass` to each command.
|
||||||
|
|
||||||
- You will also need to install collections that this playbook uses by running `ansible-galaxy collection install -r ./collections/requirements.yml`
|
|
||||||
|
|
||||||
## 🚀 Getting Started
|
## 🚀 Getting Started
|
||||||
|
|
||||||
### 🍴 Preparation
|
### 🍴 Preparation
|
||||||
@@ -67,6 +67,8 @@ node
|
|||||||
|
|
||||||
If multiple hosts are in the master group, the playbook will automatically set up k3s in [HA mode with etcd](https://rancher.com/docs/k3s/latest/en/installation/ha-embedded/).
|
If multiple hosts are in the master group, the playbook will automatically set up k3s in [HA mode with etcd](https://rancher.com/docs/k3s/latest/en/installation/ha-embedded/).
|
||||||
|
|
||||||
|
Finally, copy `ansible.example.cfg` to `ansible.cfg` and adapt the inventory path to match the files that you just created.
|
||||||
|
|
||||||
This requires at least k3s version `1.19.1` however the version is configurable by using the `k3s_version` variable.
|
This requires at least k3s version `1.19.1` however the version is configurable by using the `k3s_version` variable.
|
||||||
|
|
||||||
If needed, you can also edit `inventory/my-cluster/group_vars/all.yml` to match your environment.
|
If needed, you can also edit `inventory/my-cluster/group_vars/all.yml` to match your environment.
|
||||||
@@ -94,12 +96,26 @@ ansible-playbook reset.yml -i inventory/my-cluster/hosts.ini
|
|||||||
To copy your `kube config` locally so that you can access your **Kubernetes** cluster run:
|
To copy your `kube config` locally so that you can access your **Kubernetes** cluster run:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
scp debian@master_ip:~/.kube/config ~/.kube/config
|
scp debian@master_ip:/etc/rancher/k3s/k3s.yaml ~/.kube/config
|
||||||
```
|
```
|
||||||
|
If you get file Permission denied, go into the node and temporarly run:
|
||||||
|
```bash
|
||||||
|
sudo chmod 777 /etc/rancher/k3s/k3s.yaml
|
||||||
|
```
|
||||||
|
Then copy with the scp command and reset the permissions back to:
|
||||||
|
```bash
|
||||||
|
sudo chmod 600 /etc/rancher/k3s/k3s.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
You'll then want to modify the config to point to master IP by running:
|
||||||
|
```bash
|
||||||
|
sudo nano ~/.kube/config
|
||||||
|
```
|
||||||
|
Then change `server: https://127.0.0.1:6443` to match your master IP: `server: https://192.168.1.222:6443`
|
||||||
|
|
||||||
### 🔨 Testing your cluster
|
### 🔨 Testing your cluster
|
||||||
|
|
||||||
See the commands [here](https://docs.technotim.live/posts/k3s-etcd-ansible/#testing-your-cluster).
|
See the commands [here](https://technotim.live/posts/k3s-etcd-ansible/#testing-your-cluster).
|
||||||
|
|
||||||
### Troubleshooting
|
### Troubleshooting
|
||||||
|
|
||||||
@@ -112,9 +128,35 @@ It is run automatically in CI, but you can also run the tests locally.
|
|||||||
This might be helpful for quick feedback in a few cases.
|
This might be helpful for quick feedback in a few cases.
|
||||||
You can find more information about it [here](molecule/README.md).
|
You can find more information about it [here](molecule/README.md).
|
||||||
|
|
||||||
|
### Pre-commit Hooks
|
||||||
|
|
||||||
|
This repo uses `pre-commit` and `pre-commit-hooks` to lint and fix common style and syntax errors. Be sure to install python packages and then run `pre-commit install`. For more information, see [pre-commit](https://pre-commit.com/)
|
||||||
|
|
||||||
|
## 🌌 Ansible Galaxy
|
||||||
|
|
||||||
|
This collection can now be used in larger ansible projects.
|
||||||
|
|
||||||
|
Instructions:
|
||||||
|
|
||||||
|
- create or modify a file `collections/requirements.yml` in your project
|
||||||
|
|
||||||
|
```yml
|
||||||
|
collections:
|
||||||
|
- name: ansible.utils
|
||||||
|
- name: community.general
|
||||||
|
- name: ansible.posix
|
||||||
|
- name: kubernetes.core
|
||||||
|
- name: https://github.com/techno-tim/k3s-ansible.git
|
||||||
|
type: git
|
||||||
|
version: master
|
||||||
|
```
|
||||||
|
|
||||||
|
- install via `ansible-galaxy collection install -r ./collections/requirements.yml`
|
||||||
|
- every role is now available via the prefix `techno_tim.k3s_ansible.` e.g. `techno_tim.k3s_ansible.lxc`
|
||||||
|
|
||||||
## Thanks 🤝
|
## Thanks 🤝
|
||||||
|
|
||||||
This repo is really standing on the shoulders of giants. Thank you to all those who have contributed and tanks to these repos for code and ideas:
|
This repo is really standing on the shoulders of giants. Thank you to all those who have contributed and thanks to these repos for code and ideas:
|
||||||
|
|
||||||
- [k3s-io/k3s-ansible](https://github.com/k3s-io/k3s-ansible)
|
- [k3s-io/k3s-ansible](https://github.com/k3s-io/k3s-ansible)
|
||||||
- [geerlingguy/turing-pi-cluster](https://github.com/geerlingguy/turing-pi-cluster)
|
- [geerlingguy/turing-pi-cluster](https://github.com/geerlingguy/turing-pi-cluster)
|
||||||
|
|||||||
12
ansible.cfg
12
ansible.cfg
@@ -1,12 +0,0 @@
|
|||||||
[defaults]
|
|
||||||
nocows = True
|
|
||||||
roles_path = ./roles
|
|
||||||
inventory = ./hosts.ini
|
|
||||||
|
|
||||||
remote_tmp = $HOME/.ansible/tmp
|
|
||||||
local_tmp = $HOME/.ansible/tmp
|
|
||||||
pipelining = True
|
|
||||||
become = True
|
|
||||||
host_key_checking = False
|
|
||||||
deprecation_warnings = False
|
|
||||||
callback_whitelist = profile_tasks
|
|
||||||
2
ansible.example.cfg
Normal file
2
ansible.example.cfg
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
[defaults]
|
||||||
|
inventory = inventory/my-cluster/hosts.ini ; Adapt this to the path to your inventory file
|
||||||
@@ -1,3 +1,3 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
ansible-playbook site.yml -i inventory/my-cluster/hosts.ini
|
ansible-playbook site.yml
|
||||||
|
|||||||
81
galaxy.yml
Normal file
81
galaxy.yml
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
### REQUIRED
|
||||||
|
# The namespace of the collection. This can be a company/brand/organization or product namespace under which all
|
||||||
|
# content lives. May only contain alphanumeric lowercase characters and underscores. Namespaces cannot start with
|
||||||
|
# underscores or numbers and cannot contain consecutive underscores
|
||||||
|
namespace: techno_tim
|
||||||
|
|
||||||
|
# The name of the collection. Has the same character restrictions as 'namespace'
|
||||||
|
name: k3s_ansible
|
||||||
|
|
||||||
|
# The version of the collection. Must be compatible with semantic versioning
|
||||||
|
version: 1.0.0
|
||||||
|
|
||||||
|
# The path to the Markdown (.md) readme file. This path is relative to the root of the collection
|
||||||
|
readme: README.md
|
||||||
|
|
||||||
|
# A list of the collection's content authors. Can be just the name or in the format 'Full Name <email> (url)
|
||||||
|
# @nicks:irc/im.site#channel'
|
||||||
|
authors:
|
||||||
|
- your name <example@domain.com>
|
||||||
|
|
||||||
|
|
||||||
|
### OPTIONAL but strongly recommended
|
||||||
|
# A short summary description of the collection
|
||||||
|
description: >
|
||||||
|
The easiest way to bootstrap a self-hosted High Availability Kubernetes
|
||||||
|
cluster. A fully automated HA k3s etcd install with kube-vip, MetalLB,
|
||||||
|
and more.
|
||||||
|
|
||||||
|
# Either a single license or a list of licenses for content inside of a collection. Ansible Galaxy currently only
|
||||||
|
# accepts L(SPDX,https://spdx.org/licenses/) licenses. This key is mutually exclusive with 'license_file'
|
||||||
|
license:
|
||||||
|
- Apache-2.0
|
||||||
|
|
||||||
|
|
||||||
|
# A list of tags you want to associate with the collection for indexing/searching. A tag name has the same character
|
||||||
|
# requirements as 'namespace' and 'name'
|
||||||
|
tags:
|
||||||
|
- etcd
|
||||||
|
- high-availability
|
||||||
|
- k8s
|
||||||
|
- k3s
|
||||||
|
- k3s-cluster
|
||||||
|
- kube-vip
|
||||||
|
- kubernetes
|
||||||
|
- metallb
|
||||||
|
- rancher
|
||||||
|
|
||||||
|
# Collections that this collection requires to be installed for it to be usable. The key of the dict is the
|
||||||
|
# collection label 'namespace.name'. The value is a version range
|
||||||
|
# L(specifiers,https://python-semanticversion.readthedocs.io/en/latest/#requirement-specification). Multiple version
|
||||||
|
# range specifiers can be set and are separated by ','
|
||||||
|
dependencies:
|
||||||
|
ansible.utils: '*'
|
||||||
|
ansible.posix: '*'
|
||||||
|
community.general: '*'
|
||||||
|
kubernetes.core: '*'
|
||||||
|
|
||||||
|
# The URL of the originating SCM repository
|
||||||
|
repository: https://github.com/techno-tim/k3s-ansible
|
||||||
|
|
||||||
|
# The URL to any online docs
|
||||||
|
documentation: https://github.com/techno-tim/k3s-ansible
|
||||||
|
|
||||||
|
# The URL to the homepage of the collection/project
|
||||||
|
homepage: https://www.youtube.com/watch?v=CbkEWcUZ7zM
|
||||||
|
|
||||||
|
# The URL to the collection issue tracker
|
||||||
|
issues: https://github.com/techno-tim/k3s-ansible/issues
|
||||||
|
|
||||||
|
# A list of file glob-like patterns used to filter any files or directories that should not be included in the build
|
||||||
|
# artifact. A pattern is matched from the relative path of the file or directory of the collection directory. This
|
||||||
|
# uses 'fnmatch' to match the files or directories. Some directories and files like 'galaxy.yml', '*.pyc', '*.retry',
|
||||||
|
# and '.git' are always filtered. Mutually exclusive with 'manifest'
|
||||||
|
build_ignore: []
|
||||||
|
|
||||||
|
# A dict controlling use of manifest directives used in building the collection artifact. The key 'directives' is a
|
||||||
|
# list of MANIFEST.in style
|
||||||
|
# L(directives,https://packaging.python.org/en/latest/guides/using-manifest-in/#manifest-in-commands). The key
|
||||||
|
# 'omit_default_directives' is a boolean that controls whether the default directives are used. Mutually exclusive
|
||||||
|
# with 'build_ignore'
|
||||||
|
# manifest: null
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
---
|
---
|
||||||
k3s_version: v1.24.6+k3s1
|
k3s_version: v1.29.2+k3s1
|
||||||
# this is the user that has ssh access to these machines
|
# this is the user that has ssh access to these machines
|
||||||
ansible_user: ansibleuser
|
ansible_user: ansibleuser
|
||||||
systemd_dir: /etc/systemd/system
|
systemd_dir: /etc/systemd/system
|
||||||
@@ -10,6 +10,30 @@ system_timezone: "Your/Timezone"
|
|||||||
# interface which will be used for flannel
|
# interface which will be used for flannel
|
||||||
flannel_iface: "eth0"
|
flannel_iface: "eth0"
|
||||||
|
|
||||||
|
# uncomment calico_iface to use tigera operator/calico cni instead of flannel https://docs.tigera.io/calico/latest/about
|
||||||
|
# calico_iface: "eth0"
|
||||||
|
calico_ebpf: false # use eBPF dataplane instead of iptables
|
||||||
|
calico_tag: "v3.27.2" # calico version tag
|
||||||
|
|
||||||
|
# uncomment cilium_iface to use cilium cni instead of flannel or calico
|
||||||
|
# ensure v4.19.57, v5.1.16, v5.2.0 or more recent kernel
|
||||||
|
# cilium_iface: "eth0"
|
||||||
|
cilium_mode: "native" # native when nodes on same subnet or using bgp, else set routed
|
||||||
|
cilium_tag: "v1.15.2" # cilium version tag
|
||||||
|
cilium_hubble: true # enable hubble observability relay and ui
|
||||||
|
|
||||||
|
# if using calico or cilium, you may specify the cluster pod cidr pool
|
||||||
|
cluster_cidr: "10.52.0.0/16"
|
||||||
|
|
||||||
|
# enable cilium bgp control plane for lb services and pod cidrs. disables metallb.
|
||||||
|
cilium_bgp: false
|
||||||
|
|
||||||
|
# bgp parameters for cilium cni. only active when cilium_iface is defined and cilium_bgp is true.
|
||||||
|
cilium_bgp_my_asn: "64513"
|
||||||
|
cilium_bgp_peer_asn: "64512"
|
||||||
|
cilium_bgp_peer_address: "192.168.30.1"
|
||||||
|
cilium_bgp_lb_cidr: "192.168.31.0/24" # cidr for cilium loadbalancer ipam
|
||||||
|
|
||||||
# apiserver_endpoint is virtual ip-address which will be configured on each master
|
# apiserver_endpoint is virtual ip-address which will be configured on each master
|
||||||
apiserver_endpoint: "192.168.30.222"
|
apiserver_endpoint: "192.168.30.222"
|
||||||
|
|
||||||
@@ -20,32 +44,128 @@ k3s_token: "some-SUPER-DEDEUPER-secret-password"
|
|||||||
# The IP on which the node is reachable in the cluster.
|
# The IP on which the node is reachable in the cluster.
|
||||||
# Here, a sensible default is provided, you can still override
|
# Here, a sensible default is provided, you can still override
|
||||||
# it for each of your hosts, though.
|
# it for each of your hosts, though.
|
||||||
k3s_node_ip: '{{ ansible_facts[flannel_iface]["ipv4"]["address"] }}'
|
k3s_node_ip: "{{ ansible_facts[(cilium_iface | default(calico_iface | default(flannel_iface)))]['ipv4']['address'] }}"
|
||||||
|
|
||||||
# Disable the taint manually by setting: k3s_master_taint = false
|
# Disable the taint manually by setting: k3s_master_taint = false
|
||||||
k3s_master_taint: "{{ true if groups['node'] | default([]) | length >= 1 else false }}"
|
k3s_master_taint: "{{ true if groups['node'] | default([]) | length >= 1 else false }}"
|
||||||
|
|
||||||
# these arguments are recommended for servers as well as agents:
|
# these arguments are recommended for servers as well as agents:
|
||||||
extra_args: >-
|
extra_args: >-
|
||||||
--flannel-iface={{ flannel_iface }}
|
{{ '--flannel-iface=' + flannel_iface if calico_iface is not defined and cilium_iface is not defined else '' }}
|
||||||
--node-ip={{ k3s_node_ip }}
|
--node-ip={{ k3s_node_ip }}
|
||||||
|
|
||||||
# change these to your liking, the only required are: --disable servicelb, --tls-san {{ apiserver_endpoint }}
|
# change these to your liking, the only required are: --disable servicelb, --tls-san {{ apiserver_endpoint }}
|
||||||
|
# the contents of the if block is also required if using calico or cilium
|
||||||
extra_server_args: >-
|
extra_server_args: >-
|
||||||
{{ extra_args }}
|
{{ extra_args }}
|
||||||
{{ '--node-taint node-role.kubernetes.io/master=true:NoSchedule' if k3s_master_taint else '' }}
|
{{ '--node-taint node-role.kubernetes.io/master=true:NoSchedule' if k3s_master_taint else '' }}
|
||||||
|
{% if calico_iface is defined or cilium_iface is defined %}
|
||||||
|
--flannel-backend=none
|
||||||
|
--disable-network-policy
|
||||||
|
--cluster-cidr={{ cluster_cidr | default('10.52.0.0/16') }}
|
||||||
|
{% endif %}
|
||||||
--tls-san {{ apiserver_endpoint }}
|
--tls-san {{ apiserver_endpoint }}
|
||||||
--disable servicelb
|
--disable servicelb
|
||||||
--disable traefik
|
--disable traefik
|
||||||
|
|
||||||
extra_agent_args: >-
|
extra_agent_args: >-
|
||||||
{{ extra_args }}
|
{{ extra_args }}
|
||||||
|
|
||||||
# image tag for kube-vip
|
# image tag for kube-vip
|
||||||
kube_vip_tag_version: "v0.5.5"
|
kube_vip_tag_version: "v0.7.2"
|
||||||
|
|
||||||
|
# tag for kube-vip-cloud-provider manifest
|
||||||
|
# kube_vip_cloud_provider_tag_version: "main"
|
||||||
|
|
||||||
|
# kube-vip ip range for load balancer
|
||||||
|
# (uncomment to use kube-vip for services instead of MetalLB)
|
||||||
|
# kube_vip_lb_ip_range: "192.168.30.80-192.168.30.90"
|
||||||
|
|
||||||
|
# metallb type frr or native
|
||||||
|
metal_lb_type: "native"
|
||||||
|
|
||||||
|
# metallb mode layer2 or bgp
|
||||||
|
metal_lb_mode: "layer2"
|
||||||
|
|
||||||
|
# bgp options
|
||||||
|
# metal_lb_bgp_my_asn: "64513"
|
||||||
|
# metal_lb_bgp_peer_asn: "64512"
|
||||||
|
# metal_lb_bgp_peer_address: "192.168.30.1"
|
||||||
|
|
||||||
# image tag for metal lb
|
# image tag for metal lb
|
||||||
metal_lb_speaker_tag_version: "v0.13.6"
|
metal_lb_speaker_tag_version: "v0.14.3"
|
||||||
metal_lb_controller_tag_version: "v0.13.6"
|
metal_lb_controller_tag_version: "v0.14.3"
|
||||||
|
|
||||||
# metallb ip range for load balancer
|
# metallb ip range for load balancer
|
||||||
metal_lb_ip_range: "192.168.30.80-192.168.30.90"
|
metal_lb_ip_range: "192.168.30.80-192.168.30.90"
|
||||||
|
|
||||||
|
# Only enable if your nodes are proxmox LXC nodes, make sure to configure your proxmox nodes
|
||||||
|
# in your hosts.ini file.
|
||||||
|
# Please read https://gist.github.com/triangletodd/02f595cd4c0dc9aac5f7763ca2264185 before using this.
|
||||||
|
# Most notably, your containers must be privileged, and must not have nesting set to true.
|
||||||
|
# Please note this script disables most of the security of lxc containers, with the trade off being that lxc
|
||||||
|
# containers are significantly more resource efficient compared to full VMs.
|
||||||
|
# Mixing and matching VMs and lxc containers is not supported, ymmv if you want to do this.
|
||||||
|
# I would only really recommend using this if you have particularly low powered proxmox nodes where the overhead of
|
||||||
|
# VMs would use a significant portion of your available resources.
|
||||||
|
proxmox_lxc_configure: false
|
||||||
|
# the user that you would use to ssh into the host, for example if you run ssh some-user@my-proxmox-host,
|
||||||
|
# set this value to some-user
|
||||||
|
proxmox_lxc_ssh_user: root
|
||||||
|
# the unique proxmox ids for all of the containers in the cluster, both worker and master nodes
|
||||||
|
proxmox_lxc_ct_ids:
|
||||||
|
- 200
|
||||||
|
- 201
|
||||||
|
- 202
|
||||||
|
- 203
|
||||||
|
- 204
|
||||||
|
|
||||||
|
# Only enable this if you have set up your own container registry to act as a mirror / pull-through cache
|
||||||
|
# (harbor / nexus / docker's official registry / etc).
|
||||||
|
# Can be beneficial for larger dev/test environments (for example if you're getting rate limited by docker hub),
|
||||||
|
# or air-gapped environments where your nodes don't have internet access after the initial setup
|
||||||
|
# (which is still needed for downloading the k3s binary and such).
|
||||||
|
# k3s's documentation about private registries here: https://docs.k3s.io/installation/private-registry
|
||||||
|
custom_registries: false
|
||||||
|
# The registries can be authenticated or anonymous, depending on your registry server configuration.
|
||||||
|
# If they allow anonymous access, simply remove the following bit from custom_registries_yaml
|
||||||
|
# configs:
|
||||||
|
# "registry.domain.com":
|
||||||
|
# auth:
|
||||||
|
# username: yourusername
|
||||||
|
# password: yourpassword
|
||||||
|
# The following is an example that pulls all images used in this playbook through your private registries.
|
||||||
|
# It also allows you to pull your own images from your private registry, without having to use imagePullSecrets
|
||||||
|
# in your deployments.
|
||||||
|
# If all you need is your own images and you don't care about caching the docker/quay/ghcr.io images,
|
||||||
|
# you can just remove those from the mirrors: section.
|
||||||
|
custom_registries_yaml: |
|
||||||
|
mirrors:
|
||||||
|
docker.io:
|
||||||
|
endpoint:
|
||||||
|
- "https://registry.domain.com/v2/dockerhub"
|
||||||
|
quay.io:
|
||||||
|
endpoint:
|
||||||
|
- "https://registry.domain.com/v2/quayio"
|
||||||
|
ghcr.io:
|
||||||
|
endpoint:
|
||||||
|
- "https://registry.domain.com/v2/ghcrio"
|
||||||
|
registry.domain.com:
|
||||||
|
endpoint:
|
||||||
|
- "https://registry.domain.com"
|
||||||
|
|
||||||
|
configs:
|
||||||
|
"registry.domain.com":
|
||||||
|
auth:
|
||||||
|
username: yourusername
|
||||||
|
password: yourpassword
|
||||||
|
|
||||||
|
# On some distros like Diet Pi, there is no dbus installed. dbus required by the default reboot command.
|
||||||
|
# Uncomment if you need a custom reboot command
|
||||||
|
# custom_reboot_command: /usr/sbin/shutdown -r now
|
||||||
|
|
||||||
|
# Only enable and configure these if you access the internet through a proxy
|
||||||
|
# proxy_env:
|
||||||
|
# HTTP_PROXY: "http://proxy.domain.local:3128"
|
||||||
|
# HTTPS_PROXY: "http://proxy.domain.local:3128"
|
||||||
|
# NO_PROXY: "*.domain.local,127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16"
|
||||||
|
|||||||
2
inventory/sample/group_vars/proxmox.yml
Normal file
2
inventory/sample/group_vars/proxmox.yml
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
---
|
||||||
|
ansible_user: '{{ proxmox_lxc_ssh_user }}'
|
||||||
@@ -7,6 +7,11 @@
|
|||||||
192.168.30.41
|
192.168.30.41
|
||||||
192.168.30.42
|
192.168.30.42
|
||||||
|
|
||||||
|
# only required if proxmox_lxc_configure: true
|
||||||
|
# must contain all proxmox instances that have a master or worker node
|
||||||
|
# [proxmox]
|
||||||
|
# 192.168.30.43
|
||||||
|
|
||||||
[k3s_cluster:children]
|
[k3s_cluster:children]
|
||||||
master
|
master
|
||||||
node
|
node
|
||||||
|
|||||||
@@ -13,6 +13,12 @@ We have these scenarios:
|
|||||||
To save a bit of test time, this cluster is _not_ highly available, it consists of only one control and one worker node.
|
To save a bit of test time, this cluster is _not_ highly available, it consists of only one control and one worker node.
|
||||||
- **single_node**:
|
- **single_node**:
|
||||||
Very similar to the default scenario, but uses only a single node for all cluster functionality.
|
Very similar to the default scenario, but uses only a single node for all cluster functionality.
|
||||||
|
- **calico**:
|
||||||
|
The same as single node, but uses calico cni instead of flannel.
|
||||||
|
- **cilium**:
|
||||||
|
The same as single node, but uses cilium cni instead of flannel.
|
||||||
|
- **kube-vip**
|
||||||
|
The same as single node, but uses kube-vip as service loadbalancer instead of MetalLB
|
||||||
|
|
||||||
## How to execute
|
## How to execute
|
||||||
|
|
||||||
|
|||||||
49
molecule/calico/molecule.yml
Normal file
49
molecule/calico/molecule.yml
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
---
|
||||||
|
dependency:
|
||||||
|
name: galaxy
|
||||||
|
driver:
|
||||||
|
name: vagrant
|
||||||
|
platforms:
|
||||||
|
- name: control1
|
||||||
|
box: generic/ubuntu2204
|
||||||
|
memory: 4096
|
||||||
|
cpus: 4
|
||||||
|
config_options:
|
||||||
|
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||||
|
# see: https://github.com/chef/bento/issues/1405
|
||||||
|
ssh.username: "vagrant"
|
||||||
|
ssh.password: "vagrant"
|
||||||
|
groups:
|
||||||
|
- k3s_cluster
|
||||||
|
- master
|
||||||
|
interfaces:
|
||||||
|
- network_name: private_network
|
||||||
|
ip: 192.168.30.62
|
||||||
|
provisioner:
|
||||||
|
name: ansible
|
||||||
|
env:
|
||||||
|
ANSIBLE_VERBOSITY: 1
|
||||||
|
playbooks:
|
||||||
|
converge: ../resources/converge.yml
|
||||||
|
side_effect: ../resources/reset.yml
|
||||||
|
verify: ../resources/verify.yml
|
||||||
|
inventory:
|
||||||
|
links:
|
||||||
|
group_vars: ../../inventory/sample/group_vars
|
||||||
|
scenario:
|
||||||
|
test_sequence:
|
||||||
|
- dependency
|
||||||
|
- cleanup
|
||||||
|
- destroy
|
||||||
|
- syntax
|
||||||
|
- create
|
||||||
|
- prepare
|
||||||
|
- converge
|
||||||
|
# idempotence is not possible with the playbook in its current form.
|
||||||
|
- verify
|
||||||
|
# We are repurposing side_effect here to test the reset playbook.
|
||||||
|
# This is why we do not run it before verify (which tests the cluster),
|
||||||
|
# but after the verify step.
|
||||||
|
- side_effect
|
||||||
|
- cleanup
|
||||||
|
- destroy
|
||||||
16
molecule/calico/overrides.yml
Normal file
16
molecule/calico/overrides.yml
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
---
|
||||||
|
- name: Apply overrides
|
||||||
|
hosts: all
|
||||||
|
tasks:
|
||||||
|
- name: Override host variables
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
# See:
|
||||||
|
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
|
||||||
|
calico_iface: eth1
|
||||||
|
|
||||||
|
# The test VMs might be a bit slow, so we give them more time to join the cluster:
|
||||||
|
retry_count: 45
|
||||||
|
|
||||||
|
# Make sure that our IP ranges do not collide with those of the other scenarios
|
||||||
|
apiserver_endpoint: "192.168.30.224"
|
||||||
|
metal_lb_ip_range: "192.168.30.100-192.168.30.109"
|
||||||
49
molecule/cilium/molecule.yml
Normal file
49
molecule/cilium/molecule.yml
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
---
|
||||||
|
dependency:
|
||||||
|
name: galaxy
|
||||||
|
driver:
|
||||||
|
name: vagrant
|
||||||
|
platforms:
|
||||||
|
- name: control1
|
||||||
|
box: generic/ubuntu2204
|
||||||
|
memory: 4096
|
||||||
|
cpus: 4
|
||||||
|
config_options:
|
||||||
|
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||||
|
# see: https://github.com/chef/bento/issues/1405
|
||||||
|
ssh.username: "vagrant"
|
||||||
|
ssh.password: "vagrant"
|
||||||
|
groups:
|
||||||
|
- k3s_cluster
|
||||||
|
- master
|
||||||
|
interfaces:
|
||||||
|
- network_name: private_network
|
||||||
|
ip: 192.168.30.63
|
||||||
|
provisioner:
|
||||||
|
name: ansible
|
||||||
|
env:
|
||||||
|
ANSIBLE_VERBOSITY: 1
|
||||||
|
playbooks:
|
||||||
|
converge: ../resources/converge.yml
|
||||||
|
side_effect: ../resources/reset.yml
|
||||||
|
verify: ../resources/verify.yml
|
||||||
|
inventory:
|
||||||
|
links:
|
||||||
|
group_vars: ../../inventory/sample/group_vars
|
||||||
|
scenario:
|
||||||
|
test_sequence:
|
||||||
|
- dependency
|
||||||
|
- cleanup
|
||||||
|
- destroy
|
||||||
|
- syntax
|
||||||
|
- create
|
||||||
|
- prepare
|
||||||
|
- converge
|
||||||
|
# idempotence is not possible with the playbook in its current form.
|
||||||
|
- verify
|
||||||
|
# We are repurposing side_effect here to test the reset playbook.
|
||||||
|
# This is why we do not run it before verify (which tests the cluster),
|
||||||
|
# but after the verify step.
|
||||||
|
- side_effect
|
||||||
|
- cleanup
|
||||||
|
- destroy
|
||||||
16
molecule/cilium/overrides.yml
Normal file
16
molecule/cilium/overrides.yml
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
---
|
||||||
|
- name: Apply overrides
|
||||||
|
hosts: all
|
||||||
|
tasks:
|
||||||
|
- name: Override host variables
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
# See:
|
||||||
|
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
|
||||||
|
cilium_iface: eth1
|
||||||
|
|
||||||
|
# The test VMs might be a bit slow, so we give them more time to join the cluster:
|
||||||
|
retry_count: 45
|
||||||
|
|
||||||
|
# Make sure that our IP ranges do not collide with those of the other scenarios
|
||||||
|
apiserver_endpoint: "192.168.30.225"
|
||||||
|
metal_lb_ip_range: "192.168.30.110-192.168.30.119"
|
||||||
@@ -3,58 +3,77 @@ dependency:
|
|||||||
name: galaxy
|
name: galaxy
|
||||||
driver:
|
driver:
|
||||||
name: vagrant
|
name: vagrant
|
||||||
.platform_presets:
|
platforms:
|
||||||
- &control
|
|
||||||
memory: 2048
|
- name: control1
|
||||||
|
box: generic/ubuntu2204
|
||||||
|
memory: 1024
|
||||||
cpus: 2
|
cpus: 2
|
||||||
groups:
|
groups:
|
||||||
- k3s_cluster
|
- k3s_cluster
|
||||||
- master
|
- master
|
||||||
- &node
|
interfaces:
|
||||||
memory: 2048
|
- network_name: private_network
|
||||||
cpus: 2
|
ip: 192.168.30.38
|
||||||
groups:
|
|
||||||
- k3s_cluster
|
|
||||||
- node
|
|
||||||
- &debian
|
|
||||||
box: generic/debian11
|
|
||||||
- &rocky
|
|
||||||
box: generic/rocky9
|
|
||||||
- &ubuntu
|
|
||||||
box: generic/ubuntu2204
|
|
||||||
config_options:
|
config_options:
|
||||||
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||||
# see: https://github.com/chef/bento/issues/1405
|
# see: https://github.com/chef/bento/issues/1405
|
||||||
ssh.username: "vagrant"
|
ssh.username: "vagrant"
|
||||||
ssh.password: "vagrant"
|
ssh.password: "vagrant"
|
||||||
platforms:
|
|
||||||
- <<: [*control, *ubuntu]
|
- name: control2
|
||||||
name: control1
|
box: generic/debian12
|
||||||
interfaces:
|
memory: 1024
|
||||||
- network_name: private_network
|
cpus: 2
|
||||||
ip: 192.168.30.38
|
groups:
|
||||||
- <<: [*control, *debian]
|
- k3s_cluster
|
||||||
name: control2
|
- master
|
||||||
interfaces:
|
interfaces:
|
||||||
- network_name: private_network
|
- network_name: private_network
|
||||||
ip: 192.168.30.39
|
ip: 192.168.30.39
|
||||||
- <<: [*control, *rocky]
|
|
||||||
name: control3
|
- name: control3
|
||||||
|
box: generic/rocky9
|
||||||
|
memory: 1024
|
||||||
|
cpus: 2
|
||||||
|
groups:
|
||||||
|
- k3s_cluster
|
||||||
|
- master
|
||||||
interfaces:
|
interfaces:
|
||||||
- network_name: private_network
|
- network_name: private_network
|
||||||
ip: 192.168.30.40
|
ip: 192.168.30.40
|
||||||
- <<: [*node, *ubuntu]
|
|
||||||
name: node1
|
- name: node1
|
||||||
|
box: generic/ubuntu2204
|
||||||
|
memory: 1024
|
||||||
|
cpus: 2
|
||||||
|
groups:
|
||||||
|
- k3s_cluster
|
||||||
|
- node
|
||||||
interfaces:
|
interfaces:
|
||||||
- network_name: private_network
|
- network_name: private_network
|
||||||
ip: 192.168.30.41
|
ip: 192.168.30.41
|
||||||
- <<: [*node, *rocky]
|
config_options:
|
||||||
name: node2
|
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||||
|
# see: https://github.com/chef/bento/issues/1405
|
||||||
|
ssh.username: "vagrant"
|
||||||
|
ssh.password: "vagrant"
|
||||||
|
|
||||||
|
- name: node2
|
||||||
|
box: generic/rocky9
|
||||||
|
memory: 1024
|
||||||
|
cpus: 2
|
||||||
|
groups:
|
||||||
|
- k3s_cluster
|
||||||
|
- node
|
||||||
interfaces:
|
interfaces:
|
||||||
- network_name: private_network
|
- network_name: private_network
|
||||||
ip: 192.168.30.42
|
ip: 192.168.30.42
|
||||||
|
|
||||||
provisioner:
|
provisioner:
|
||||||
name: ansible
|
name: ansible
|
||||||
|
env:
|
||||||
|
ANSIBLE_VERBOSITY: 1
|
||||||
playbooks:
|
playbooks:
|
||||||
converge: ../resources/converge.yml
|
converge: ../resources/converge.yml
|
||||||
side_effect: ../resources/reset.yml
|
side_effect: ../resources/reset.yml
|
||||||
@@ -65,7 +84,6 @@ provisioner:
|
|||||||
scenario:
|
scenario:
|
||||||
test_sequence:
|
test_sequence:
|
||||||
- dependency
|
- dependency
|
||||||
- lint
|
|
||||||
- cleanup
|
- cleanup
|
||||||
- destroy
|
- destroy
|
||||||
- syntax
|
- syntax
|
||||||
|
|||||||
@@ -4,7 +4,8 @@
|
|||||||
tasks:
|
tasks:
|
||||||
- name: Override host variables
|
- name: Override host variables
|
||||||
ansible.builtin.set_fact:
|
ansible.builtin.set_fact:
|
||||||
# See: https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant # noqa yaml[line-length]
|
# See:
|
||||||
|
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
|
||||||
flannel_iface: eth1
|
flannel_iface: eth1
|
||||||
|
|
||||||
# The test VMs might be a bit slow, so we give them more time to join the cluster:
|
# The test VMs might be a bit slow, so we give them more time to join the cluster:
|
||||||
|
|||||||
@@ -17,6 +17,6 @@
|
|||||||
# and security needs.
|
# and security needs.
|
||||||
ansible.builtin.systemd:
|
ansible.builtin.systemd:
|
||||||
name: firewalld
|
name: firewalld
|
||||||
enabled: no
|
enabled: false
|
||||||
state: stopped
|
state: stopped
|
||||||
become: true
|
become: true
|
||||||
|
|||||||
3
molecule/ipv6/host_vars/control2.yml
Normal file
3
molecule/ipv6/host_vars/control2.yml
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
---
|
||||||
|
node_ipv4: 192.168.123.12
|
||||||
|
node_ipv6: fdad:bad:ba55::de:12
|
||||||
@@ -3,39 +3,58 @@ dependency:
|
|||||||
name: galaxy
|
name: galaxy
|
||||||
driver:
|
driver:
|
||||||
name: vagrant
|
name: vagrant
|
||||||
.platform_presets:
|
platforms:
|
||||||
- &control
|
- name: control1
|
||||||
memory: 2048
|
box: generic/ubuntu2204
|
||||||
|
memory: 1024
|
||||||
cpus: 2
|
cpus: 2
|
||||||
groups:
|
groups:
|
||||||
- k3s_cluster
|
- k3s_cluster
|
||||||
- master
|
- master
|
||||||
- &node
|
interfaces:
|
||||||
memory: 2048
|
- network_name: private_network
|
||||||
cpus: 2
|
ip: fdad:bad:ba55::de:11
|
||||||
groups:
|
|
||||||
- k3s_cluster
|
|
||||||
- node
|
|
||||||
- &ubuntu
|
|
||||||
box: generic/ubuntu2204
|
|
||||||
config_options:
|
config_options:
|
||||||
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||||
# see: https://github.com/chef/bento/issues/1405
|
# see: https://github.com/chef/bento/issues/1405
|
||||||
ssh.username: "vagrant"
|
ssh.username: "vagrant"
|
||||||
ssh.password: "vagrant"
|
ssh.password: "vagrant"
|
||||||
platforms:
|
|
||||||
- <<: [*control, *ubuntu]
|
- name: control2
|
||||||
name: control1
|
box: generic/ubuntu2204
|
||||||
|
memory: 1024
|
||||||
|
cpus: 2
|
||||||
|
groups:
|
||||||
|
- k3s_cluster
|
||||||
|
- master
|
||||||
interfaces:
|
interfaces:
|
||||||
- network_name: private_network
|
- network_name: private_network
|
||||||
ip: fdad:bad:ba55::de:11
|
ip: fdad:bad:ba55::de:12
|
||||||
- <<: [*node, *ubuntu]
|
config_options:
|
||||||
name: node1
|
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||||
|
# see: https://github.com/chef/bento/issues/1405
|
||||||
|
ssh.username: "vagrant"
|
||||||
|
ssh.password: "vagrant"
|
||||||
|
|
||||||
|
- name: node1
|
||||||
|
box: generic/ubuntu2204
|
||||||
|
memory: 1024
|
||||||
|
cpus: 2
|
||||||
|
groups:
|
||||||
|
- k3s_cluster
|
||||||
|
- node
|
||||||
interfaces:
|
interfaces:
|
||||||
- network_name: private_network
|
- network_name: private_network
|
||||||
ip: fdad:bad:ba55::de:21
|
ip: fdad:bad:ba55::de:21
|
||||||
|
config_options:
|
||||||
|
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||||
|
# see: https://github.com/chef/bento/issues/1405
|
||||||
|
ssh.username: "vagrant"
|
||||||
|
ssh.password: "vagrant"
|
||||||
provisioner:
|
provisioner:
|
||||||
name: ansible
|
name: ansible
|
||||||
|
env:
|
||||||
|
ANSIBLE_VERBOSITY: 1
|
||||||
playbooks:
|
playbooks:
|
||||||
converge: ../resources/converge.yml
|
converge: ../resources/converge.yml
|
||||||
side_effect: ../resources/reset.yml
|
side_effect: ../resources/reset.yml
|
||||||
@@ -46,7 +65,6 @@ provisioner:
|
|||||||
scenario:
|
scenario:
|
||||||
test_sequence:
|
test_sequence:
|
||||||
- dependency
|
- dependency
|
||||||
- lint
|
|
||||||
- cleanup
|
- cleanup
|
||||||
- destroy
|
- destroy
|
||||||
- syntax
|
- syntax
|
||||||
|
|||||||
@@ -4,9 +4,15 @@
|
|||||||
tasks:
|
tasks:
|
||||||
- name: Override host variables (1/2)
|
- name: Override host variables (1/2)
|
||||||
ansible.builtin.set_fact:
|
ansible.builtin.set_fact:
|
||||||
# See: https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant # noqa yaml[line-length]
|
# See:
|
||||||
|
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
|
||||||
flannel_iface: eth1
|
flannel_iface: eth1
|
||||||
|
|
||||||
|
# In this scenario, we have multiple interfaces that the VIP could be
|
||||||
|
# broadcasted on. Since we have assigned a dedicated private network
|
||||||
|
# here, let's make sure that it is used.
|
||||||
|
kube_vip_iface: eth1
|
||||||
|
|
||||||
# The test VMs might be a bit slow, so we give them more time to join the cluster:
|
# The test VMs might be a bit slow, so we give them more time to join the cluster:
|
||||||
retry_count: 45
|
retry_count: 45
|
||||||
|
|
||||||
|
|||||||
49
molecule/kube-vip/molecule.yml
Normal file
49
molecule/kube-vip/molecule.yml
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
---
|
||||||
|
dependency:
|
||||||
|
name: galaxy
|
||||||
|
driver:
|
||||||
|
name: vagrant
|
||||||
|
platforms:
|
||||||
|
- name: control1
|
||||||
|
box: generic/ubuntu2204
|
||||||
|
memory: 4096
|
||||||
|
cpus: 4
|
||||||
|
config_options:
|
||||||
|
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||||
|
# see: https://github.com/chef/bento/issues/1405
|
||||||
|
ssh.username: "vagrant"
|
||||||
|
ssh.password: "vagrant"
|
||||||
|
groups:
|
||||||
|
- k3s_cluster
|
||||||
|
- master
|
||||||
|
interfaces:
|
||||||
|
- network_name: private_network
|
||||||
|
ip: 192.168.30.62
|
||||||
|
provisioner:
|
||||||
|
name: ansible
|
||||||
|
env:
|
||||||
|
ANSIBLE_VERBOSITY: 1
|
||||||
|
playbooks:
|
||||||
|
converge: ../resources/converge.yml
|
||||||
|
side_effect: ../resources/reset.yml
|
||||||
|
verify: ../resources/verify.yml
|
||||||
|
inventory:
|
||||||
|
links:
|
||||||
|
group_vars: ../../inventory/sample/group_vars
|
||||||
|
scenario:
|
||||||
|
test_sequence:
|
||||||
|
- dependency
|
||||||
|
- cleanup
|
||||||
|
- destroy
|
||||||
|
- syntax
|
||||||
|
- create
|
||||||
|
- prepare
|
||||||
|
- converge
|
||||||
|
# idempotence is not possible with the playbook in its current form.
|
||||||
|
- verify
|
||||||
|
# We are repurposing side_effect here to test the reset playbook.
|
||||||
|
# This is why we do not run it before verify (which tests the cluster),
|
||||||
|
# but after the verify step.
|
||||||
|
- side_effect
|
||||||
|
- cleanup
|
||||||
|
- destroy
|
||||||
17
molecule/kube-vip/overrides.yml
Normal file
17
molecule/kube-vip/overrides.yml
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
---
|
||||||
|
- name: Apply overrides
|
||||||
|
hosts: all
|
||||||
|
tasks:
|
||||||
|
- name: Override host variables
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
# See:
|
||||||
|
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
|
||||||
|
flannel_iface: eth1
|
||||||
|
|
||||||
|
# The test VMs might be a bit slow, so we give them more time to join the cluster:
|
||||||
|
retry_count: 45
|
||||||
|
|
||||||
|
# Make sure that our IP ranges do not collide with those of the other scenarios
|
||||||
|
apiserver_endpoint: "192.168.30.225"
|
||||||
|
# Use kube-vip instead of MetalLB
|
||||||
|
kube_vip_lb_ip_range: "192.168.30.110-192.168.30.119"
|
||||||
@@ -2,4 +2,4 @@
|
|||||||
- name: Verify
|
- name: Verify
|
||||||
hosts: all
|
hosts: all
|
||||||
roles:
|
roles:
|
||||||
- verify/from_outside
|
- verify_from_outside
|
||||||
|
|||||||
@@ -6,4 +6,4 @@ outside_host: localhost
|
|||||||
testing_namespace: molecule-verify-from-outside
|
testing_namespace: molecule-verify-from-outside
|
||||||
|
|
||||||
# The directory in which the example manifests reside
|
# The directory in which the example manifests reside
|
||||||
example_manifests_path: ../../../../example
|
example_manifests_path: ../../../example
|
||||||
@@ -34,14 +34,14 @@
|
|||||||
|
|
||||||
- name: Assert that the nginx welcome page is available
|
- name: Assert that the nginx welcome page is available
|
||||||
ansible.builtin.uri:
|
ansible.builtin.uri:
|
||||||
url: http://{{ ip | ansible.utils.ipwrap }}:{{ port }}/
|
url: http://{{ ip | ansible.utils.ipwrap }}:{{ port_ }}/
|
||||||
return_content: yes
|
return_content: true
|
||||||
register: result
|
register: result
|
||||||
failed_when: "'Welcome to nginx!' not in result.content"
|
failed_when: "'Welcome to nginx!' not in result.content"
|
||||||
vars:
|
vars:
|
||||||
ip: >-
|
ip: >-
|
||||||
{{ nginx_services.resources[0].status.loadBalancer.ingress[0].ip }}
|
{{ nginx_services.resources[0].status.loadBalancer.ingress[0].ip }}
|
||||||
port: >-
|
port_: >-
|
||||||
{{ nginx_services.resources[0].spec.ports[0].port }}
|
{{ nginx_services.resources[0].spec.ports[0].port }}
|
||||||
# Deactivated linter rules:
|
# Deactivated linter rules:
|
||||||
# - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap
|
# - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap
|
||||||
@@ -21,6 +21,8 @@ platforms:
|
|||||||
ip: 192.168.30.50
|
ip: 192.168.30.50
|
||||||
provisioner:
|
provisioner:
|
||||||
name: ansible
|
name: ansible
|
||||||
|
env:
|
||||||
|
ANSIBLE_VERBOSITY: 1
|
||||||
playbooks:
|
playbooks:
|
||||||
converge: ../resources/converge.yml
|
converge: ../resources/converge.yml
|
||||||
side_effect: ../resources/reset.yml
|
side_effect: ../resources/reset.yml
|
||||||
@@ -31,7 +33,6 @@ provisioner:
|
|||||||
scenario:
|
scenario:
|
||||||
test_sequence:
|
test_sequence:
|
||||||
- dependency
|
- dependency
|
||||||
- lint
|
|
||||||
- cleanup
|
- cleanup
|
||||||
- destroy
|
- destroy
|
||||||
- syntax
|
- syntax
|
||||||
|
|||||||
@@ -4,7 +4,8 @@
|
|||||||
tasks:
|
tasks:
|
||||||
- name: Override host variables
|
- name: Override host variables
|
||||||
ansible.builtin.set_fact:
|
ansible.builtin.set_fact:
|
||||||
# See: https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant # noqa yaml[line-length]
|
# See:
|
||||||
|
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
|
||||||
flannel_iface: eth1
|
flannel_iface: eth1
|
||||||
|
|
||||||
# The test VMs might be a bit slow, so we give them more time to join the cluster:
|
# The test VMs might be a bit slow, so we give them more time to join the cluster:
|
||||||
|
|||||||
10
reboot.yml
Normal file
10
reboot.yml
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
---
|
||||||
|
- name: Reboot k3s_cluster
|
||||||
|
hosts: k3s_cluster
|
||||||
|
gather_facts: true
|
||||||
|
tasks:
|
||||||
|
- name: Reboot the nodes (and Wait upto 5 mins max)
|
||||||
|
become: true
|
||||||
|
reboot:
|
||||||
|
reboot_command: "{{ custom_reboot_command | default(omit) }}"
|
||||||
|
reboot_timeout: 300
|
||||||
10
requirements.in
Normal file
10
requirements.in
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
ansible-core>=2.16.2
|
||||||
|
jmespath>=1.0.1
|
||||||
|
jsonpatch>=1.33
|
||||||
|
kubernetes>=29.0.0
|
||||||
|
molecule-plugins[vagrant]
|
||||||
|
molecule>=6.0.3
|
||||||
|
netaddr>=0.10.1
|
||||||
|
pre-commit>=3.6.0
|
||||||
|
pre-commit-hooks>=4.5.0
|
||||||
|
pyyaml>=6.0.1
|
||||||
224
requirements.txt
224
requirements.txt
@@ -1,71 +1,169 @@
|
|||||||
ansible-compat==2.2.1
|
#
|
||||||
ansible-core==2.13.5
|
# This file is autogenerated by pip-compile with Python 3.11
|
||||||
ansible-lint==6.8.2
|
# by the following command:
|
||||||
arrow==1.2.3
|
#
|
||||||
attrs==22.1.0
|
# pip-compile requirements.in
|
||||||
binaryornot==0.4.4
|
#
|
||||||
black==22.10.0
|
ansible-compat==4.1.11
|
||||||
bracex==2.3.post1
|
# via molecule
|
||||||
cachetools==5.2.0
|
ansible-core==2.16.4
|
||||||
Cerberus==1.3.2
|
# via
|
||||||
certifi==2022.9.24
|
# -r requirements.in
|
||||||
cffi==1.15.1
|
# ansible-compat
|
||||||
chardet==5.0.0
|
# molecule
|
||||||
charset-normalizer==2.1.1
|
attrs==23.2.0
|
||||||
click==8.1.3
|
# via
|
||||||
click-help-colors==0.9.1
|
# jsonschema
|
||||||
commonmark==0.9.1
|
# referencing
|
||||||
cookiecutter==2.1.1
|
bracex==2.4
|
||||||
cryptography==38.0.1
|
# via wcmatch
|
||||||
distro==1.8.0
|
cachetools==5.3.2
|
||||||
|
# via google-auth
|
||||||
|
certifi==2023.11.17
|
||||||
|
# via
|
||||||
|
# kubernetes
|
||||||
|
# requests
|
||||||
|
cffi==1.16.0
|
||||||
|
# via cryptography
|
||||||
|
cfgv==3.4.0
|
||||||
|
# via pre-commit
|
||||||
|
charset-normalizer==3.3.2
|
||||||
|
# via requests
|
||||||
|
click==8.1.7
|
||||||
|
# via
|
||||||
|
# click-help-colors
|
||||||
|
# molecule
|
||||||
|
click-help-colors==0.9.4
|
||||||
|
# via molecule
|
||||||
|
cryptography==41.0.7
|
||||||
|
# via ansible-core
|
||||||
|
distlib==0.3.8
|
||||||
|
# via virtualenv
|
||||||
enrich==1.2.7
|
enrich==1.2.7
|
||||||
filelock==3.8.0
|
# via molecule
|
||||||
google-auth==2.12.0
|
filelock==3.13.1
|
||||||
idna==3.4
|
# via virtualenv
|
||||||
importlib-resources==5.10.0
|
google-auth==2.26.2
|
||||||
Jinja2==3.1.2
|
# via kubernetes
|
||||||
jinja2-time==0.2.0
|
identify==2.5.33
|
||||||
|
# via pre-commit
|
||||||
|
idna==3.6
|
||||||
|
# via requests
|
||||||
|
jinja2==3.1.3
|
||||||
|
# via
|
||||||
|
# ansible-core
|
||||||
|
# molecule
|
||||||
jmespath==1.0.1
|
jmespath==1.0.1
|
||||||
jsonpatch==1.32
|
# via -r requirements.in
|
||||||
jsonpointer==2.3
|
jsonpatch==1.33
|
||||||
jsonschema==4.16.0
|
# via -r requirements.in
|
||||||
kubernetes==24.2.0
|
jsonpointer==2.4
|
||||||
MarkupSafe==2.1.1
|
# via jsonpatch
|
||||||
molecule==4.0.1
|
jsonschema==4.21.1
|
||||||
molecule-vagrant==1.0.0
|
# via
|
||||||
mypy-extensions==0.4.3
|
# ansible-compat
|
||||||
netaddr==0.8.0
|
# molecule
|
||||||
oauthlib==3.2.1
|
jsonschema-specifications==2023.12.1
|
||||||
packaging==21.3
|
# via jsonschema
|
||||||
pathspec==0.10.1
|
kubernetes==29.0.0
|
||||||
pkgutil-resolve-name==1.3.10
|
# via -r requirements.in
|
||||||
platformdirs==2.5.2
|
markdown-it-py==3.0.0
|
||||||
pluggy==1.0.0
|
# via rich
|
||||||
pyasn1==0.4.8
|
markupsafe==2.1.4
|
||||||
pyasn1-modules==0.2.8
|
# via jinja2
|
||||||
|
mdurl==0.1.2
|
||||||
|
# via markdown-it-py
|
||||||
|
molecule==6.0.3
|
||||||
|
# via
|
||||||
|
# -r requirements.in
|
||||||
|
# molecule-plugins
|
||||||
|
molecule-plugins[vagrant]==23.5.3
|
||||||
|
# via -r requirements.in
|
||||||
|
netaddr==0.10.1
|
||||||
|
# via -r requirements.in
|
||||||
|
nodeenv==1.8.0
|
||||||
|
# via pre-commit
|
||||||
|
oauthlib==3.2.2
|
||||||
|
# via
|
||||||
|
# kubernetes
|
||||||
|
# requests-oauthlib
|
||||||
|
packaging==23.2
|
||||||
|
# via
|
||||||
|
# ansible-compat
|
||||||
|
# ansible-core
|
||||||
|
# molecule
|
||||||
|
platformdirs==4.1.0
|
||||||
|
# via virtualenv
|
||||||
|
pluggy==1.3.0
|
||||||
|
# via molecule
|
||||||
|
pre-commit==3.6.2
|
||||||
|
# via -r requirements.in
|
||||||
|
pre-commit-hooks==4.5.0
|
||||||
|
# via -r requirements.in
|
||||||
|
pyasn1==0.5.1
|
||||||
|
# via
|
||||||
|
# pyasn1-modules
|
||||||
|
# rsa
|
||||||
|
pyasn1-modules==0.3.0
|
||||||
|
# via google-auth
|
||||||
pycparser==2.21
|
pycparser==2.21
|
||||||
Pygments==2.13.0
|
# via cffi
|
||||||
pyparsing==3.0.9
|
pygments==2.17.2
|
||||||
pyrsistent==0.18.1
|
# via rich
|
||||||
python-dateutil==2.8.2
|
python-dateutil==2.8.2
|
||||||
python-slugify==6.1.2
|
# via kubernetes
|
||||||
python-vagrant==1.0.0
|
python-vagrant==1.0.0
|
||||||
PyYAML==6.0
|
# via molecule-plugins
|
||||||
requests==2.28.1
|
pyyaml==6.0.1
|
||||||
|
# via
|
||||||
|
# -r requirements.in
|
||||||
|
# ansible-compat
|
||||||
|
# ansible-core
|
||||||
|
# kubernetes
|
||||||
|
# molecule
|
||||||
|
# pre-commit
|
||||||
|
referencing==0.32.1
|
||||||
|
# via
|
||||||
|
# jsonschema
|
||||||
|
# jsonschema-specifications
|
||||||
|
requests==2.31.0
|
||||||
|
# via
|
||||||
|
# kubernetes
|
||||||
|
# requests-oauthlib
|
||||||
requests-oauthlib==1.3.1
|
requests-oauthlib==1.3.1
|
||||||
resolvelib==0.8.1
|
# via kubernetes
|
||||||
rich==12.6.0
|
resolvelib==1.0.1
|
||||||
|
# via ansible-core
|
||||||
|
rich==13.7.0
|
||||||
|
# via
|
||||||
|
# enrich
|
||||||
|
# molecule
|
||||||
|
rpds-py==0.17.1
|
||||||
|
# via
|
||||||
|
# jsonschema
|
||||||
|
# referencing
|
||||||
rsa==4.9
|
rsa==4.9
|
||||||
ruamel.yaml==0.17.21
|
# via google-auth
|
||||||
ruamel.yaml.clib==0.2.6
|
ruamel-yaml==0.18.5
|
||||||
selinux==0.2.1
|
# via pre-commit-hooks
|
||||||
|
ruamel-yaml-clib==0.2.8
|
||||||
|
# via ruamel-yaml
|
||||||
six==1.16.0
|
six==1.16.0
|
||||||
subprocess-tee==0.3.5
|
# via
|
||||||
text-unidecode==1.3
|
# kubernetes
|
||||||
tomli==2.0.1
|
# python-dateutil
|
||||||
typing-extensions==4.4.0
|
subprocess-tee==0.4.1
|
||||||
urllib3==1.26.12
|
# via ansible-compat
|
||||||
wcmatch==8.4.1
|
urllib3==2.1.0
|
||||||
websocket-client==1.4.1
|
# via
|
||||||
yamllint==1.28.0
|
# kubernetes
|
||||||
zipp==3.9.0
|
# requests
|
||||||
|
virtualenv==20.25.0
|
||||||
|
# via pre-commit
|
||||||
|
wcmatch==8.5
|
||||||
|
# via molecule
|
||||||
|
websocket-client==1.7.0
|
||||||
|
# via kubernetes
|
||||||
|
|
||||||
|
# The following packages are considered to be unsafe in a requirements file:
|
||||||
|
# setuptools
|
||||||
|
|||||||
2
reset.sh
2
reset.sh
@@ -1,3 +1,3 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
ansible-playbook reset.yml -i inventory/my-cluster/hosts.ini
|
ansible-playbook reset.yml
|
||||||
|
|||||||
20
reset.yml
20
reset.yml
@@ -1,13 +1,25 @@
|
|||||||
---
|
---
|
||||||
|
- name: Reset k3s cluster
|
||||||
- hosts: k3s_cluster
|
hosts: k3s_cluster
|
||||||
gather_facts: yes
|
gather_facts: true
|
||||||
become: yes
|
|
||||||
roles:
|
roles:
|
||||||
- role: reset
|
- role: reset
|
||||||
|
become: true
|
||||||
- role: raspberrypi
|
- role: raspberrypi
|
||||||
|
become: true
|
||||||
vars: {state: absent}
|
vars: {state: absent}
|
||||||
post_tasks:
|
post_tasks:
|
||||||
- name: Reboot and wait for node to come back up
|
- name: Reboot and wait for node to come back up
|
||||||
|
become: true
|
||||||
reboot:
|
reboot:
|
||||||
|
reboot_command: "{{ custom_reboot_command | default(omit) }}"
|
||||||
reboot_timeout: 3600
|
reboot_timeout: 3600
|
||||||
|
|
||||||
|
- name: Revert changes to Proxmox cluster
|
||||||
|
hosts: proxmox
|
||||||
|
gather_facts: true
|
||||||
|
become: true
|
||||||
|
remote_user: "{{ proxmox_lxc_ssh_user }}"
|
||||||
|
roles:
|
||||||
|
- role: reset_proxmox_lxc
|
||||||
|
when: proxmox_lxc_configure
|
||||||
|
|||||||
@@ -1,12 +0,0 @@
|
|||||||
---
|
|
||||||
ansible_user: root
|
|
||||||
server_init_args: >-
|
|
||||||
{% if groups['master'] | length > 1 %}
|
|
||||||
{% if ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] %}
|
|
||||||
--cluster-init
|
|
||||||
{% else %}
|
|
||||||
--server https://{{ hostvars[groups['master'][0]].k3s_node_ip }}:6443
|
|
||||||
{% endif %}
|
|
||||||
--token {{ k3s_token }}
|
|
||||||
{% endif %}
|
|
||||||
{{ extra_server_args | default('') }}
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,6 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: Namespace
|
|
||||||
metadata:
|
|
||||||
name: metallb-system
|
|
||||||
labels:
|
|
||||||
app: metallb
|
|
||||||
@@ -1,32 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: ServiceAccount
|
|
||||||
metadata:
|
|
||||||
name: kube-vip
|
|
||||||
namespace: kube-system
|
|
||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: ClusterRole
|
|
||||||
metadata:
|
|
||||||
annotations:
|
|
||||||
rbac.authorization.kubernetes.io/autoupdate: "true"
|
|
||||||
name: system:kube-vip-role
|
|
||||||
rules:
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["services", "services/status", "nodes", "endpoints"]
|
|
||||||
verbs: ["list","get","watch", "update"]
|
|
||||||
- apiGroups: ["coordination.k8s.io"]
|
|
||||||
resources: ["leases"]
|
|
||||||
verbs: ["list", "get", "watch", "update", "create"]
|
|
||||||
---
|
|
||||||
kind: ClusterRoleBinding
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
metadata:
|
|
||||||
name: system:kube-vip-binding
|
|
||||||
roleRef:
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
kind: ClusterRole
|
|
||||||
name: system:kube-vip-role
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: kube-vip
|
|
||||||
namespace: kube-system
|
|
||||||
3
roles/k3s/node/defaults/main.yml
Normal file
3
roles/k3s/node/defaults/main.yml
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
---
|
||||||
|
# Name of the master group
|
||||||
|
group_name_master: master
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
---
|
|
||||||
|
|
||||||
- name: Copy K3s service file
|
|
||||||
template:
|
|
||||||
src: "k3s.service.j2"
|
|
||||||
dest: "{{ systemd_dir }}/k3s-node.service"
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: 0755
|
|
||||||
|
|
||||||
- name: Enable and check K3s service
|
|
||||||
systemd:
|
|
||||||
name: k3s-node
|
|
||||||
daemon_reload: yes
|
|
||||||
state: restarted
|
|
||||||
enabled: yes
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
---
|
|
||||||
# Timeout to wait for MetalLB services to come up
|
|
||||||
metal_lb_available_timeout: 120s
|
|
||||||
18
roles/k3s_agent/tasks/http_proxy.yml
Normal file
18
roles/k3s_agent/tasks/http_proxy.yml
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
---
|
||||||
|
- name: Create k3s-node.service.d directory
|
||||||
|
file:
|
||||||
|
path: '{{ systemd_dir }}/k3s-node.service.d'
|
||||||
|
state: directory
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0755'
|
||||||
|
when: proxy_env is defined
|
||||||
|
|
||||||
|
- name: Copy K3s http_proxy conf file
|
||||||
|
template:
|
||||||
|
src: "http_proxy.conf.j2"
|
||||||
|
dest: "{{ systemd_dir }}/k3s-node.service.d/http_proxy.conf"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0755'
|
||||||
|
when: proxy_env is defined
|
||||||
36
roles/k3s_agent/tasks/main.yml
Normal file
36
roles/k3s_agent/tasks/main.yml
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
---
|
||||||
|
- name: Check for PXE-booted system
|
||||||
|
block:
|
||||||
|
- name: Check if system is PXE-booted
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: cat /proc/cmdline
|
||||||
|
register: boot_cmdline
|
||||||
|
changed_when: false
|
||||||
|
check_mode: false
|
||||||
|
|
||||||
|
- name: Set fact for PXE-booted system
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
is_pxe_booted: "{{ 'root=/dev/nfs' in boot_cmdline.stdout }}"
|
||||||
|
when: boot_cmdline.stdout is defined
|
||||||
|
|
||||||
|
- name: Include http_proxy configuration tasks
|
||||||
|
ansible.builtin.include_tasks: http_proxy.yml
|
||||||
|
|
||||||
|
- name: Deploy K3s http_proxy conf
|
||||||
|
include_tasks: http_proxy.yml
|
||||||
|
when: proxy_env is defined
|
||||||
|
|
||||||
|
- name: Configure the k3s service
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "k3s.service.j2"
|
||||||
|
dest: "{{ systemd_dir }}/k3s-node.service"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0755'
|
||||||
|
|
||||||
|
- name: Manage k3s service
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: k3s-node
|
||||||
|
daemon_reload: true
|
||||||
|
state: restarted
|
||||||
|
enabled: true
|
||||||
4
roles/k3s_agent/templates/http_proxy.conf.j2
Normal file
4
roles/k3s_agent/templates/http_proxy.conf.j2
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
[Service]
|
||||||
|
Environment=HTTP_PROXY={{ proxy_env.HTTP_PROXY }}
|
||||||
|
Environment=HTTPS_PROXY={{ proxy_env.HTTPS_PROXY }}
|
||||||
|
Environment=NO_PROXY={{ proxy_env.NO_PROXY }}
|
||||||
@@ -7,11 +7,14 @@ After=network-online.target
|
|||||||
Type=notify
|
Type=notify
|
||||||
ExecStartPre=-/sbin/modprobe br_netfilter
|
ExecStartPre=-/sbin/modprobe br_netfilter
|
||||||
ExecStartPre=-/sbin/modprobe overlay
|
ExecStartPre=-/sbin/modprobe overlay
|
||||||
ExecStart=/usr/local/bin/k3s agent --server https://{{ apiserver_endpoint | ansible.utils.ipwrap }}:6443 --token {{ hostvars[groups['master'][0]]['token'] | default(k3s_token) }} {{ extra_agent_args | default("") }}
|
# Conditional snapshotter based on PXE boot status
|
||||||
|
ExecStart=/usr/local/bin/k3s agent \
|
||||||
|
--server https://{{ apiserver_endpoint | ansible.utils.ipwrap }}:6443 \
|
||||||
|
{% if is_pxe_booted | default(false) %}--snapshotter native \
|
||||||
|
{% endif %}--token {{ hostvars[groups[group_name_master | default('master')][0]]['token'] | default(k3s_token) }} \
|
||||||
|
{{ extra_agent_args | default("") }}
|
||||||
KillMode=process
|
KillMode=process
|
||||||
Delegate=yes
|
Delegate=yes
|
||||||
# Having non-zero Limit*s causes performance problems due to accounting overhead
|
|
||||||
# in the kernel. We recommend using cgroups to do container-local accounting.
|
|
||||||
LimitNOFILE=1048576
|
LimitNOFILE=1048576
|
||||||
LimitNPROC=infinity
|
LimitNPROC=infinity
|
||||||
LimitCORE=infinity
|
LimitCORE=infinity
|
||||||
6
roles/k3s_custom_registries/defaults/main.yml
Normal file
6
roles/k3s_custom_registries/defaults/main.yml
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
# Indicates whether custom registries for k3s should be configured
|
||||||
|
# Possible values:
|
||||||
|
# - present
|
||||||
|
# - absent
|
||||||
|
state: present
|
||||||
17
roles/k3s_custom_registries/tasks/main.yml
Normal file
17
roles/k3s_custom_registries/tasks/main.yml
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
---
|
||||||
|
|
||||||
|
- name: Create directory /etc/rancher/k3s
|
||||||
|
file:
|
||||||
|
path: "/etc/{{ item }}"
|
||||||
|
state: directory
|
||||||
|
mode: '0755'
|
||||||
|
loop:
|
||||||
|
- rancher
|
||||||
|
- rancher/k3s
|
||||||
|
|
||||||
|
- name: Insert registries into /etc/rancher/k3s/registries.yaml
|
||||||
|
blockinfile:
|
||||||
|
path: /etc/rancher/k3s/registries.yaml
|
||||||
|
block: "{{ custom_registries_yaml }}"
|
||||||
|
mode: '0600'
|
||||||
|
create: true
|
||||||
20
roles/k3s_server/defaults/main.yml
Normal file
20
roles/k3s_server/defaults/main.yml
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
---
|
||||||
|
# If you want to explicitly define an interface that ALL control nodes
|
||||||
|
# should use to propagate the VIP, define it here. Otherwise, kube-vip
|
||||||
|
# will determine the right interface automatically at runtime.
|
||||||
|
kube_vip_iface: null
|
||||||
|
|
||||||
|
# Name of the master group
|
||||||
|
group_name_master: master
|
||||||
|
|
||||||
|
# yamllint disable rule:line-length
|
||||||
|
server_init_args: >-
|
||||||
|
{% if groups[group_name_master | default('master')] | length > 1 %}
|
||||||
|
{% if ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] %}
|
||||||
|
--cluster-init
|
||||||
|
{% else %}
|
||||||
|
--server https://{{ hostvars[groups[group_name_master | default('master')][0]].k3s_node_ip | split(",") | first | ansible.utils.ipwrap }}:6443
|
||||||
|
{% endif %}
|
||||||
|
--token {{ k3s_token }}
|
||||||
|
{% endif %}
|
||||||
|
{{ extra_server_args | default('') }}
|
||||||
18
roles/k3s_server/tasks/http_proxy.yml
Normal file
18
roles/k3s_server/tasks/http_proxy.yml
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
---
|
||||||
|
|
||||||
|
- name: Create k3s.service.d directory
|
||||||
|
file:
|
||||||
|
path: '{{ systemd_dir }}/k3s.service.d'
|
||||||
|
state: directory
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0755'
|
||||||
|
|
||||||
|
|
||||||
|
- name: Copy K3s http_proxy conf file
|
||||||
|
template:
|
||||||
|
src: "http_proxy.conf.j2"
|
||||||
|
dest: "{{ systemd_dir }}/k3s.service.d/http_proxy.conf"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0755'
|
||||||
27
roles/k3s_server/tasks/kube-vip.yml
Normal file
27
roles/k3s_server/tasks/kube-vip.yml
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
---
|
||||||
|
- name: Create manifests directory on first master
|
||||||
|
file:
|
||||||
|
path: /var/lib/rancher/k3s/server/manifests
|
||||||
|
state: directory
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0644
|
||||||
|
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||||
|
|
||||||
|
- name: Download vip cloud provider manifest to first master
|
||||||
|
ansible.builtin.get_url:
|
||||||
|
url: "https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/{{ kube_vip_cloud_provider_tag_version | default('main') }}/manifest/kube-vip-cloud-controller.yaml" # noqa yaml[line-length]
|
||||||
|
dest: "/var/lib/rancher/k3s/server/manifests/kube-vip-cloud-controller.yaml"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0644
|
||||||
|
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||||
|
|
||||||
|
- name: Copy kubevip configMap manifest to first master
|
||||||
|
template:
|
||||||
|
src: "kubevip.yaml.j2"
|
||||||
|
dest: "/var/lib/rancher/k3s/server/manifests/kubevip.yaml"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0644
|
||||||
|
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||||
@@ -1,63 +1,40 @@
|
|||||||
---
|
---
|
||||||
|
|
||||||
- name: Clean previous runs of k3s-init
|
- name: Stop k3s-init
|
||||||
systemd:
|
systemd:
|
||||||
name: k3s-init
|
name: k3s-init
|
||||||
state: stopped
|
state: stopped
|
||||||
failed_when: false
|
failed_when: false
|
||||||
|
|
||||||
- name: Clean previous runs of k3s-init
|
# k3s-init won't work if the port is already in use
|
||||||
|
- name: Stop k3s
|
||||||
|
systemd:
|
||||||
|
name: k3s
|
||||||
|
state: stopped
|
||||||
|
failed_when: false
|
||||||
|
|
||||||
|
- name: Clean previous runs of k3s-init # noqa command-instead-of-module
|
||||||
|
# The systemd module does not support "reset-failed", so we need to resort to command.
|
||||||
command: systemctl reset-failed k3s-init
|
command: systemctl reset-failed k3s-init
|
||||||
failed_when: false
|
failed_when: false
|
||||||
changed_when: false
|
changed_when: false
|
||||||
args:
|
|
||||||
warn: false # The ansible systemd module does not support reset-failed
|
|
||||||
|
|
||||||
- name: Create manifests directory on first master
|
- name: Deploy K3s http_proxy conf
|
||||||
file:
|
include_tasks: http_proxy.yml
|
||||||
path: /var/lib/rancher/k3s/server/manifests
|
when: proxy_env is defined
|
||||||
state: directory
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: 0644
|
|
||||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
|
||||||
|
|
||||||
- name: Copy vip rbac manifest to first master
|
- name: Deploy vip manifest
|
||||||
template:
|
include_tasks: vip.yml
|
||||||
src: "vip.rbac.yaml.j2"
|
|
||||||
dest: "/var/lib/rancher/k3s/server/manifests/vip-rbac.yaml"
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: 0644
|
|
||||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
|
||||||
|
|
||||||
- name: Copy vip manifest to first master
|
- name: Deploy metallb manifest
|
||||||
template:
|
include_tasks: metallb.yml
|
||||||
src: "vip.yaml.j2"
|
tags: metallb
|
||||||
dest: "/var/lib/rancher/k3s/server/manifests/vip.yaml"
|
when: kube_vip_lb_ip_range is not defined and (not cilium_bgp or cilium_iface is not defined)
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: 0644
|
|
||||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
|
||||||
|
|
||||||
# these will be copied and installed now, then tested later and apply config
|
- name: Deploy kube-vip manifest
|
||||||
- name: Copy metallb namespace to first master
|
include_tasks: kube-vip.yml
|
||||||
template:
|
tags: kubevip
|
||||||
src: "metallb.namespace.j2"
|
when: kube_vip_lb_ip_range is defined
|
||||||
dest: "/var/lib/rancher/k3s/server/manifests/metallb-namespace.yaml"
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: 0644
|
|
||||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
|
||||||
|
|
||||||
- name: Copy metallb namespace to first master
|
|
||||||
template:
|
|
||||||
src: "metallb.crds.j2"
|
|
||||||
dest: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml"
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: 0644
|
|
||||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
|
||||||
|
|
||||||
- name: Init cluster inside the transient k3s-init service
|
- name: Init cluster inside the transient k3s-init service
|
||||||
command:
|
command:
|
||||||
@@ -65,17 +42,16 @@
|
|||||||
-p Restart=on-failure \
|
-p Restart=on-failure \
|
||||||
--unit=k3s-init \
|
--unit=k3s-init \
|
||||||
k3s server {{ server_init_args }}"
|
k3s server {{ server_init_args }}"
|
||||||
creates: "{{ systemd_dir }}/k3s.service"
|
creates: "{{ systemd_dir }}/k3s-init.service"
|
||||||
args:
|
|
||||||
warn: false # The ansible systemd module does not support transient units
|
|
||||||
|
|
||||||
- name: Verification
|
- name: Verification
|
||||||
|
when: not ansible_check_mode
|
||||||
block:
|
block:
|
||||||
- name: Verify that all nodes actually joined (check k3s-init.service if this fails)
|
- name: Verify that all nodes actually joined (check k3s-init.service if this fails)
|
||||||
command:
|
command:
|
||||||
cmd: k3s kubectl get nodes -l "node-role.kubernetes.io/master=true" -o=jsonpath="{.items[*].metadata.name}"
|
cmd: k3s kubectl get nodes -l "node-role.kubernetes.io/master=true" -o=jsonpath="{.items[*].metadata.name}"
|
||||||
register: nodes
|
register: nodes
|
||||||
until: nodes.rc == 0 and (nodes.stdout.split() | length) == (groups['master'] | length)
|
until: nodes.rc == 0 and (nodes.stdout.split() | length) == (groups[group_name_master | default('master')] | length) # yamllint disable-line rule:line-length
|
||||||
retries: "{{ retry_count | default(20) }}"
|
retries: "{{ retry_count | default(20) }}"
|
||||||
delay: 10
|
delay: 10
|
||||||
changed_when: false
|
changed_when: false
|
||||||
@@ -91,7 +67,6 @@
|
|||||||
name: k3s-init
|
name: k3s-init
|
||||||
state: stopped
|
state: stopped
|
||||||
failed_when: false
|
failed_when: false
|
||||||
when: not ansible_check_mode
|
|
||||||
|
|
||||||
- name: Copy K3s service file
|
- name: Copy K3s service file
|
||||||
register: k3s_service
|
register: k3s_service
|
||||||
@@ -105,9 +80,9 @@
|
|||||||
- name: Enable and check K3s service
|
- name: Enable and check K3s service
|
||||||
systemd:
|
systemd:
|
||||||
name: k3s
|
name: k3s
|
||||||
daemon_reload: yes
|
daemon_reload: true
|
||||||
state: restarted
|
state: restarted
|
||||||
enabled: yes
|
enabled: true
|
||||||
|
|
||||||
- name: Wait for node-token
|
- name: Wait for node-token
|
||||||
wait_for:
|
wait_for:
|
||||||
@@ -139,24 +114,24 @@
|
|||||||
|
|
||||||
- name: Create directory .kube
|
- name: Create directory .kube
|
||||||
file:
|
file:
|
||||||
path: ~{{ ansible_user }}/.kube
|
path: "{{ ansible_user_dir }}/.kube"
|
||||||
state: directory
|
state: directory
|
||||||
owner: "{{ ansible_user }}"
|
owner: "{{ ansible_user_id }}"
|
||||||
mode: "u=rwx,g=rx,o="
|
mode: "u=rwx,g=rx,o="
|
||||||
|
|
||||||
- name: Copy config file to user home directory
|
- name: Copy config file to user home directory
|
||||||
copy:
|
copy:
|
||||||
src: /etc/rancher/k3s/k3s.yaml
|
src: /etc/rancher/k3s/k3s.yaml
|
||||||
dest: ~{{ ansible_user }}/.kube/config
|
dest: "{{ ansible_user_dir }}/.kube/config"
|
||||||
remote_src: yes
|
remote_src: true
|
||||||
owner: "{{ ansible_user }}"
|
owner: "{{ ansible_user_id }}"
|
||||||
mode: "u=rw,g=,o="
|
mode: "u=rw,g=,o="
|
||||||
|
|
||||||
- name: Configure kubectl cluster to {{ endpoint_url }}
|
- name: Configure kubectl cluster to {{ endpoint_url }}
|
||||||
command: >-
|
command: >-
|
||||||
k3s kubectl config set-cluster default
|
k3s kubectl config set-cluster default
|
||||||
--server={{ endpoint_url }}
|
--server={{ endpoint_url }}
|
||||||
--kubeconfig ~{{ ansible_user }}/.kube/config
|
--kubeconfig {{ ansible_user_dir }}/.kube/config
|
||||||
changed_when: true
|
changed_when: true
|
||||||
vars:
|
vars:
|
||||||
endpoint_url: >-
|
endpoint_url: >-
|
||||||
30
roles/k3s_server/tasks/metallb.yml
Normal file
30
roles/k3s_server/tasks/metallb.yml
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
---
|
||||||
|
- name: Create manifests directory on first master
|
||||||
|
file:
|
||||||
|
path: /var/lib/rancher/k3s/server/manifests
|
||||||
|
state: directory
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0644
|
||||||
|
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||||
|
|
||||||
|
- name: "Download to first master: manifest for metallb-{{ metal_lb_type }}"
|
||||||
|
ansible.builtin.get_url:
|
||||||
|
url: "https://raw.githubusercontent.com/metallb/metallb/{{ metal_lb_controller_tag_version }}/config/manifests/metallb-{{ metal_lb_type }}.yaml" # noqa yaml[line-length]
|
||||||
|
dest: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0644
|
||||||
|
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||||
|
|
||||||
|
- name: Set image versions in manifest for metallb-{{ metal_lb_type }}
|
||||||
|
ansible.builtin.replace:
|
||||||
|
path: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml"
|
||||||
|
regexp: "{{ item.change | ansible.builtin.regex_escape }}"
|
||||||
|
replace: "{{ item.to }}"
|
||||||
|
with_items:
|
||||||
|
- change: "metallb/speaker:{{ metal_lb_controller_tag_version }}"
|
||||||
|
to: "metallb/speaker:{{ metal_lb_speaker_tag_version }}"
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.change }} => {{ item.to }}"
|
||||||
|
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||||
27
roles/k3s_server/tasks/vip.yml
Normal file
27
roles/k3s_server/tasks/vip.yml
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
---
|
||||||
|
- name: Create manifests directory on first master
|
||||||
|
file:
|
||||||
|
path: /var/lib/rancher/k3s/server/manifests
|
||||||
|
state: directory
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0644
|
||||||
|
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||||
|
|
||||||
|
- name: Download vip rbac manifest to first master
|
||||||
|
ansible.builtin.get_url:
|
||||||
|
url: "https://kube-vip.io/manifests/rbac.yaml"
|
||||||
|
dest: "/var/lib/rancher/k3s/server/manifests/vip-rbac.yaml"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0644
|
||||||
|
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||||
|
|
||||||
|
- name: Copy vip manifest to first master
|
||||||
|
template:
|
||||||
|
src: "vip.yaml.j2"
|
||||||
|
dest: "/var/lib/rancher/k3s/server/manifests/vip.yaml"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0644
|
||||||
|
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||||
4
roles/k3s_server/templates/http_proxy.conf.j2
Normal file
4
roles/k3s_server/templates/http_proxy.conf.j2
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
[Service]
|
||||||
|
Environment=HTTP_PROXY={{ proxy_env.HTTP_PROXY }}
|
||||||
|
Environment=HTTPS_PROXY={{ proxy_env.HTTPS_PROXY }}
|
||||||
|
Environment=NO_PROXY={{ proxy_env.NO_PROXY }}
|
||||||
13
roles/k3s_server/templates/kubevip.yaml.j2
Normal file
13
roles/k3s_server/templates/kubevip.yaml.j2
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: kubevip
|
||||||
|
namespace: kube-system
|
||||||
|
data:
|
||||||
|
{% if kube_vip_lb_ip_range is string %}
|
||||||
|
{# kube_vip_lb_ip_range was used in the legacy way: single string instead of a list #}
|
||||||
|
{# => transform to list with single element #}
|
||||||
|
{% set kube_vip_lb_ip_range = [kube_vip_lb_ip_range] %}
|
||||||
|
{% endif %}
|
||||||
|
range-global: {{ kube_vip_lb_ip_range | join(',') }}
|
||||||
@@ -30,8 +30,10 @@ spec:
|
|||||||
value: "true"
|
value: "true"
|
||||||
- name: port
|
- name: port
|
||||||
value: "6443"
|
value: "6443"
|
||||||
|
{% if kube_vip_iface %}
|
||||||
- name: vip_interface
|
- name: vip_interface
|
||||||
value: {{ flannel_iface }}
|
value: {{ kube_vip_iface }}
|
||||||
|
{% endif %}
|
||||||
- name: vip_cidr
|
- name: vip_cidr
|
||||||
value: "{{ apiserver_endpoint | ansible.utils.ipsubnet | ansible.utils.ipaddr('prefix') }}"
|
value: "{{ apiserver_endpoint | ansible.utils.ipsubnet | ansible.utils.ipaddr('prefix') }}"
|
||||||
- name: cp_enable
|
- name: cp_enable
|
||||||
@@ -41,7 +43,7 @@ spec:
|
|||||||
- name: vip_ddns
|
- name: vip_ddns
|
||||||
value: "false"
|
value: "false"
|
||||||
- name: svc_enable
|
- name: svc_enable
|
||||||
value: "false"
|
value: "{{ 'true' if kube_vip_lb_ip_range is defined else 'false' }}"
|
||||||
- name: vip_leaderelection
|
- name: vip_leaderelection
|
||||||
value: "true"
|
value: "true"
|
||||||
- name: vip_leaseduration
|
- name: vip_leaseduration
|
||||||
6
roles/k3s_server_post/defaults/main.yml
Normal file
6
roles/k3s_server_post/defaults/main.yml
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
# Timeout to wait for MetalLB services to come up
|
||||||
|
metal_lb_available_timeout: 240s
|
||||||
|
|
||||||
|
# Name of the master group
|
||||||
|
group_name_master: master
|
||||||
114
roles/k3s_server_post/tasks/calico.yml
Normal file
114
roles/k3s_server_post/tasks/calico.yml
Normal file
@@ -0,0 +1,114 @@
|
|||||||
|
---
|
||||||
|
- name: Deploy Calico to cluster
|
||||||
|
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||||
|
run_once: true
|
||||||
|
block:
|
||||||
|
- name: Create manifests directory on first master
|
||||||
|
file:
|
||||||
|
path: /tmp/k3s
|
||||||
|
state: directory
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0755
|
||||||
|
|
||||||
|
- name: "Download to first master: manifest for Tigera Operator and Calico CRDs"
|
||||||
|
ansible.builtin.get_url:
|
||||||
|
url: "https://raw.githubusercontent.com/projectcalico/calico/{{ calico_tag }}/manifests/tigera-operator.yaml"
|
||||||
|
dest: "/tmp/k3s/tigera-operator.yaml"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0755
|
||||||
|
|
||||||
|
- name: Copy Calico custom resources manifest to first master
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "calico.crs.j2"
|
||||||
|
dest: /tmp/k3s/custom-resources.yaml
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0755
|
||||||
|
|
||||||
|
- name: Deploy or replace Tigera Operator
|
||||||
|
block:
|
||||||
|
- name: Deploy Tigera Operator
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: kubectl create -f /tmp/k3s/tigera-operator.yaml
|
||||||
|
register: create_operator
|
||||||
|
changed_when: "'created' in create_operator.stdout"
|
||||||
|
failed_when: "'Error' in create_operator.stderr and 'already exists' not in create_operator.stderr"
|
||||||
|
rescue:
|
||||||
|
- name: Replace existing Tigera Operator
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: kubectl replace -f /tmp/k3s/tigera-operator.yaml
|
||||||
|
register: replace_operator
|
||||||
|
changed_when: "'replaced' in replace_operator.stdout"
|
||||||
|
failed_when: "'Error' in replace_operator.stderr"
|
||||||
|
|
||||||
|
- name: Wait for Tigera Operator resources
|
||||||
|
command: >-
|
||||||
|
k3s kubectl wait {{ item.type }}/{{ item.name }}
|
||||||
|
--namespace='tigera-operator'
|
||||||
|
--for=condition=Available=True
|
||||||
|
--timeout=30s
|
||||||
|
register: tigera_result
|
||||||
|
changed_when: false
|
||||||
|
until: tigera_result is succeeded
|
||||||
|
retries: 7
|
||||||
|
delay: 7
|
||||||
|
with_items:
|
||||||
|
- {name: tigera-operator, type: deployment}
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.type }}/{{ item.name }}"
|
||||||
|
|
||||||
|
- name: Deploy Calico custom resources
|
||||||
|
block:
|
||||||
|
- name: Deploy custom resources for Calico
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: kubectl create -f /tmp/k3s/custom-resources.yaml
|
||||||
|
register: create_cr
|
||||||
|
changed_when: "'created' in create_cr.stdout"
|
||||||
|
failed_when: "'Error' in create_cr.stderr and 'already exists' not in create_cr.stderr"
|
||||||
|
rescue:
|
||||||
|
- name: Apply new Calico custom resource manifest
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: kubectl apply -f /tmp/k3s/custom-resources.yaml
|
||||||
|
register: apply_cr
|
||||||
|
changed_when: "'configured' in apply_cr.stdout or 'created' in apply_cr.stdout"
|
||||||
|
failed_when: "'Error' in apply_cr.stderr"
|
||||||
|
|
||||||
|
- name: Wait for Calico system resources to be available
|
||||||
|
command: >-
|
||||||
|
{% if item.type == 'daemonset' %}
|
||||||
|
k3s kubectl wait pods
|
||||||
|
--namespace='{{ item.namespace }}'
|
||||||
|
--selector={{ item.selector }}
|
||||||
|
--for=condition=Ready
|
||||||
|
{% else %}
|
||||||
|
k3s kubectl wait {{ item.type }}/{{ item.name }}
|
||||||
|
--namespace='{{ item.namespace }}'
|
||||||
|
--for=condition=Available
|
||||||
|
{% endif %}
|
||||||
|
--timeout=30s
|
||||||
|
register: cr_result
|
||||||
|
changed_when: false
|
||||||
|
until: cr_result is succeeded
|
||||||
|
retries: 30
|
||||||
|
delay: 7
|
||||||
|
with_items:
|
||||||
|
- {name: calico-typha, type: deployment, namespace: calico-system}
|
||||||
|
- {name: calico-kube-controllers, type: deployment, namespace: calico-system}
|
||||||
|
- {name: csi-node-driver, type: daemonset, selector: 'k8s-app=csi-node-driver', namespace: calico-system}
|
||||||
|
- {name: calico-node, type: daemonset, selector: 'k8s-app=calico-node', namespace: calico-system}
|
||||||
|
- {name: calico-apiserver, type: deployment, namespace: calico-apiserver}
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.type }}/{{ item.name }}"
|
||||||
|
|
||||||
|
- name: Patch Felix configuration for eBPF mode
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: >
|
||||||
|
kubectl patch felixconfiguration default
|
||||||
|
--type='merge'
|
||||||
|
--patch='{"spec": {"bpfKubeProxyIptablesCleanupEnabled": false}}'
|
||||||
|
register: patch_result
|
||||||
|
changed_when: "'felixconfiguration.projectcalico.org/default patched' in patch_result.stdout"
|
||||||
|
failed_when: "'Error' in patch_result.stderr"
|
||||||
|
when: calico_ebpf
|
||||||
253
roles/k3s_server_post/tasks/cilium.yml
Normal file
253
roles/k3s_server_post/tasks/cilium.yml
Normal file
@@ -0,0 +1,253 @@
|
|||||||
|
---
|
||||||
|
- name: Prepare Cilium CLI on first master and deploy CNI
|
||||||
|
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||||
|
run_once: true
|
||||||
|
block:
|
||||||
|
- name: Create tmp directory on first master
|
||||||
|
file:
|
||||||
|
path: /tmp/k3s
|
||||||
|
state: directory
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0755
|
||||||
|
|
||||||
|
- name: Check if Cilium CLI is installed
|
||||||
|
ansible.builtin.command: cilium version
|
||||||
|
register: cilium_cli_installed
|
||||||
|
failed_when: false
|
||||||
|
changed_when: false
|
||||||
|
ignore_errors: true
|
||||||
|
|
||||||
|
- name: Check for Cilium CLI version in command output
|
||||||
|
set_fact:
|
||||||
|
installed_cli_version: >-
|
||||||
|
{{
|
||||||
|
cilium_cli_installed.stdout_lines
|
||||||
|
| join(' ')
|
||||||
|
| regex_findall('cilium-cli: (v\d+\.\d+\.\d+)')
|
||||||
|
| first
|
||||||
|
| default('unknown')
|
||||||
|
}}
|
||||||
|
when: cilium_cli_installed.rc == 0
|
||||||
|
|
||||||
|
- name: Get latest stable Cilium CLI version file
|
||||||
|
ansible.builtin.get_url:
|
||||||
|
url: "https://raw.githubusercontent.com/cilium/cilium-cli/main/stable.txt"
|
||||||
|
dest: "/tmp/k3s/cilium-cli-stable.txt"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0755
|
||||||
|
|
||||||
|
- name: Read Cilium CLI stable version from file
|
||||||
|
ansible.builtin.command: cat /tmp/k3s/cilium-cli-stable.txt
|
||||||
|
register: cli_ver
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Log installed Cilium CLI version
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: "Installed Cilium CLI version: {{ installed_cli_version | default('Not installed') }}"
|
||||||
|
|
||||||
|
- name: Log latest stable Cilium CLI version
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: "Latest Cilium CLI version: {{ cli_ver.stdout }}"
|
||||||
|
|
||||||
|
- name: Determine if Cilium CLI needs installation or update
|
||||||
|
set_fact:
|
||||||
|
cilium_cli_needs_update: >-
|
||||||
|
{{
|
||||||
|
cilium_cli_installed.rc != 0 or
|
||||||
|
(cilium_cli_installed.rc == 0 and
|
||||||
|
installed_cli_version != cli_ver.stdout)
|
||||||
|
}}
|
||||||
|
|
||||||
|
- name: Install or update Cilium CLI
|
||||||
|
when: cilium_cli_needs_update
|
||||||
|
block:
|
||||||
|
- name: Set architecture variable
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
cli_arch: "{{ 'arm64' if ansible_architecture == 'aarch64' else 'amd64' }}"
|
||||||
|
|
||||||
|
- name: Download Cilium CLI and checksum
|
||||||
|
ansible.builtin.get_url:
|
||||||
|
url: "{{ cilium_base_url }}/cilium-linux-{{ cli_arch }}{{ item }}"
|
||||||
|
dest: "/tmp/k3s/cilium-linux-{{ cli_arch }}{{ item }}"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0755
|
||||||
|
loop:
|
||||||
|
- ".tar.gz"
|
||||||
|
- ".tar.gz.sha256sum"
|
||||||
|
vars:
|
||||||
|
cilium_base_url: "https://github.com/cilium/cilium-cli/releases/download/{{ cli_ver.stdout }}"
|
||||||
|
|
||||||
|
- name: Verify the downloaded tarball
|
||||||
|
ansible.builtin.shell: |
|
||||||
|
cd /tmp/k3s && sha256sum --check cilium-linux-{{ cli_arch }}.tar.gz.sha256sum
|
||||||
|
args:
|
||||||
|
executable: /bin/bash
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Extract Cilium CLI to /usr/local/bin
|
||||||
|
ansible.builtin.unarchive:
|
||||||
|
src: "/tmp/k3s/cilium-linux-{{ cli_arch }}.tar.gz"
|
||||||
|
dest: /usr/local/bin
|
||||||
|
remote_src: true
|
||||||
|
|
||||||
|
- name: Remove downloaded tarball and checksum file
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ item }}"
|
||||||
|
state: absent
|
||||||
|
loop:
|
||||||
|
- "/tmp/k3s/cilium-linux-{{ cli_arch }}.tar.gz"
|
||||||
|
- "/tmp/k3s/cilium-linux-{{ cli_arch }}.tar.gz.sha256sum"
|
||||||
|
|
||||||
|
- name: Wait for connectivity to kube VIP
|
||||||
|
ansible.builtin.command: ping -c 1 {{ apiserver_endpoint }}
|
||||||
|
register: ping_result
|
||||||
|
until: ping_result.rc == 0
|
||||||
|
retries: 21
|
||||||
|
delay: 1
|
||||||
|
ignore_errors: true
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Fail if kube VIP not reachable
|
||||||
|
ansible.builtin.fail:
|
||||||
|
msg: "API endpoint {{ apiserver_endpoint }} is not reachable"
|
||||||
|
when: ping_result.rc != 0
|
||||||
|
|
||||||
|
- name: Test for existing Cilium install
|
||||||
|
ansible.builtin.command: k3s kubectl -n kube-system get daemonsets cilium
|
||||||
|
register: cilium_installed
|
||||||
|
failed_when: false
|
||||||
|
changed_when: false
|
||||||
|
ignore_errors: true
|
||||||
|
|
||||||
|
- name: Check existing Cilium install
|
||||||
|
when: cilium_installed.rc == 0
|
||||||
|
block:
|
||||||
|
|
||||||
|
- name: Check Cilium version
|
||||||
|
ansible.builtin.command: cilium version
|
||||||
|
register: cilium_version
|
||||||
|
failed_when: false
|
||||||
|
changed_when: false
|
||||||
|
ignore_errors: true
|
||||||
|
|
||||||
|
- name: Parse installed Cilium version
|
||||||
|
set_fact:
|
||||||
|
installed_cilium_version: >-
|
||||||
|
{{
|
||||||
|
cilium_version.stdout_lines
|
||||||
|
| join(' ')
|
||||||
|
| regex_findall('cilium image.+(\d+\.\d+\.\d+)')
|
||||||
|
| first
|
||||||
|
| default('unknown')
|
||||||
|
}}
|
||||||
|
|
||||||
|
- name: Determine if Cilium needs update
|
||||||
|
set_fact:
|
||||||
|
cilium_needs_update: >-
|
||||||
|
{{ 'v' + installed_cilium_version != cilium_tag }}
|
||||||
|
|
||||||
|
- name: Log result
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: >
|
||||||
|
Installed Cilium version: {{ installed_cilium_version }},
|
||||||
|
Target Cilium version: {{ cilium_tag }},
|
||||||
|
Update needed: {{ cilium_needs_update }}
|
||||||
|
|
||||||
|
- name: Install Cilium
|
||||||
|
ansible.builtin.command: >-
|
||||||
|
{% if cilium_installed.rc != 0 %}
|
||||||
|
cilium install
|
||||||
|
{% else %}
|
||||||
|
cilium upgrade
|
||||||
|
{% endif %}
|
||||||
|
--version "{{ cilium_tag }}"
|
||||||
|
--helm-set operator.replicas="1"
|
||||||
|
{{ '--helm-set devices=' + cilium_iface if cilium_iface != 'auto' else '' }}
|
||||||
|
--helm-set ipam.operator.clusterPoolIPv4PodCIDRList={{ cluster_cidr }}
|
||||||
|
{% if cilium_mode == "native" or (cilium_bgp and cilium_exportPodCIDR != 'false') %}
|
||||||
|
--helm-set ipv4NativeRoutingCIDR={{ cluster_cidr }}
|
||||||
|
{% endif %}
|
||||||
|
--helm-set k8sServiceHost="127.0.0.1"
|
||||||
|
--helm-set k8sServicePort="6444"
|
||||||
|
--helm-set routingMode={{ cilium_mode | default("native") }}
|
||||||
|
--helm-set autoDirectNodeRoutes={{ "true" if cilium_mode == "native" else "false" }}
|
||||||
|
--helm-set kubeProxyReplacement={{ kube_proxy_replacement | default("true") }}
|
||||||
|
--helm-set bpf.masquerade={{ enable_bpf_masquerade | default("true") }}
|
||||||
|
--helm-set bgpControlPlane.enabled={{ cilium_bgp | default("false") }}
|
||||||
|
--helm-set hubble.enabled={{ "true" if cilium_hubble else "false" }}
|
||||||
|
--helm-set hubble.relay.enabled={{ "true" if cilium_hubble else "false" }}
|
||||||
|
--helm-set hubble.ui.enabled={{ "true" if cilium_hubble else "false" }}
|
||||||
|
{% if kube_proxy_replacement is not false %}
|
||||||
|
--helm-set bpf.loadBalancer.algorithm={{ bpf_lb_algorithm | default("maglev") }}
|
||||||
|
--helm-set bpf.loadBalancer.mode={{ bpf_lb_mode | default("hybrid") }}
|
||||||
|
{% endif %}
|
||||||
|
environment:
|
||||||
|
KUBECONFIG: "{{ ansible_user_dir }}/.kube/config"
|
||||||
|
register: cilium_install_result
|
||||||
|
changed_when: cilium_install_result.rc == 0
|
||||||
|
when: cilium_installed.rc != 0 or cilium_needs_update
|
||||||
|
|
||||||
|
- name: Wait for Cilium resources
|
||||||
|
command: >-
|
||||||
|
{% if item.type == 'daemonset' %}
|
||||||
|
k3s kubectl wait pods
|
||||||
|
--namespace=kube-system
|
||||||
|
--selector='k8s-app=cilium'
|
||||||
|
--for=condition=Ready
|
||||||
|
{% else %}
|
||||||
|
k3s kubectl wait {{ item.type }}/{{ item.name }}
|
||||||
|
--namespace=kube-system
|
||||||
|
--for=condition=Available
|
||||||
|
{% endif %}
|
||||||
|
--timeout=30s
|
||||||
|
register: cr_result
|
||||||
|
changed_when: false
|
||||||
|
until: cr_result is succeeded
|
||||||
|
retries: 30
|
||||||
|
delay: 7
|
||||||
|
with_items:
|
||||||
|
- {name: cilium-operator, type: deployment}
|
||||||
|
- {name: cilium, type: daemonset, selector: 'k8s-app=cilium'}
|
||||||
|
- {name: hubble-relay, type: deployment, check_hubble: true}
|
||||||
|
- {name: hubble-ui, type: deployment, check_hubble: true}
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.type }}/{{ item.name }}"
|
||||||
|
when: >-
|
||||||
|
not item.check_hubble | default(false) or (item.check_hubble | default(false) and cilium_hubble)
|
||||||
|
|
||||||
|
- name: Configure Cilium BGP
|
||||||
|
when: cilium_bgp
|
||||||
|
block:
|
||||||
|
|
||||||
|
- name: Copy BGP manifests to first master
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "cilium.crs.j2"
|
||||||
|
dest: /tmp/k3s/cilium-bgp.yaml
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0755
|
||||||
|
|
||||||
|
- name: Apply BGP manifests
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: kubectl apply -f /tmp/k3s/cilium-bgp.yaml
|
||||||
|
register: apply_cr
|
||||||
|
changed_when: "'configured' in apply_cr.stdout or 'created' in apply_cr.stdout"
|
||||||
|
failed_when: "'is invalid' in apply_cr.stderr"
|
||||||
|
ignore_errors: true
|
||||||
|
|
||||||
|
- name: Print error message if BGP manifests application fails
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: "{{ apply_cr.stderr }}"
|
||||||
|
when: "'is invalid' in apply_cr.stderr"
|
||||||
|
|
||||||
|
- name: Test for BGP config resources
|
||||||
|
ansible.builtin.command: "{{ item }}"
|
||||||
|
loop:
|
||||||
|
- k3s kubectl get CiliumBGPPeeringPolicy.cilium.io
|
||||||
|
- k3s kubectl get CiliumLoadBalancerIPPool.cilium.io
|
||||||
|
changed_when: false
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item }}"
|
||||||
20
roles/k3s_server_post/tasks/main.yml
Normal file
20
roles/k3s_server_post/tasks/main.yml
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
---
|
||||||
|
- name: Deploy calico
|
||||||
|
include_tasks: calico.yml
|
||||||
|
tags: calico
|
||||||
|
when: calico_iface is defined and cilium_iface is not defined
|
||||||
|
|
||||||
|
- name: Deploy cilium
|
||||||
|
include_tasks: cilium.yml
|
||||||
|
tags: cilium
|
||||||
|
when: cilium_iface is defined
|
||||||
|
|
||||||
|
- name: Deploy metallb pool
|
||||||
|
include_tasks: metallb.yml
|
||||||
|
tags: metallb
|
||||||
|
when: kube_vip_lb_ip_range is not defined and (not cilium_bgp or cilium_iface is not defined)
|
||||||
|
|
||||||
|
- name: Remove tmp directory used for manifests
|
||||||
|
file:
|
||||||
|
path: /tmp/k3s
|
||||||
|
state: absent
|
||||||
@@ -3,25 +3,46 @@
|
|||||||
file:
|
file:
|
||||||
path: /tmp/k3s
|
path: /tmp/k3s
|
||||||
state: directory
|
state: directory
|
||||||
owner: "{{ ansible_user }}"
|
owner: "{{ ansible_user_id }}"
|
||||||
mode: 0755
|
mode: 0755
|
||||||
with_items: "{{ groups['master'] }}"
|
with_items: "{{ groups[group_name_master | default('master')] }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
||||||
|
- name: Delete outdated metallb replicas
|
||||||
|
shell: |-
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
REPLICAS=$(k3s kubectl --namespace='metallb-system' get replicasets \
|
||||||
|
-l 'component=controller,app=metallb' \
|
||||||
|
-o jsonpath='{.items[0].spec.template.spec.containers[0].image}, {.items[0].metadata.name}' 2>/dev/null || true)
|
||||||
|
REPLICAS_SETS=$(echo ${REPLICAS} | grep -v '{{ metal_lb_controller_tag_version }}' | sed -e "s/^.*\s//g")
|
||||||
|
if [ -n "${REPLICAS_SETS}" ] ; then
|
||||||
|
for REPLICAS in "${REPLICAS_SETS}"
|
||||||
|
do
|
||||||
|
k3s kubectl --namespace='metallb-system' \
|
||||||
|
delete rs "${REPLICAS}"
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
args:
|
||||||
|
executable: /bin/bash
|
||||||
|
changed_when: false
|
||||||
|
run_once: true
|
||||||
|
with_items: "{{ groups[group_name_master | default('master')] }}"
|
||||||
|
|
||||||
- name: Copy metallb CRs manifest to first master
|
- name: Copy metallb CRs manifest to first master
|
||||||
template:
|
template:
|
||||||
src: "metallb.crs.j2"
|
src: "metallb.crs.j2"
|
||||||
dest: "/tmp/k3s/metallb-crs.yaml"
|
dest: "/tmp/k3s/metallb-crs.yaml"
|
||||||
owner: "{{ ansible_user }}"
|
owner: "{{ ansible_user_id }}"
|
||||||
mode: 0755
|
mode: 0755
|
||||||
with_items: "{{ groups['master'] }}"
|
with_items: "{{ groups[group_name_master | default('master')] }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
||||||
- name: Test metallb-system namespace
|
- name: Test metallb-system namespace
|
||||||
command: >-
|
command: >-
|
||||||
k3s kubectl -n metallb-system
|
k3s kubectl -n metallb-system
|
||||||
changed_when: false
|
changed_when: false
|
||||||
with_items: "{{ groups['master'] }}"
|
with_items: "{{ groups[group_name_master | default('master')] }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
||||||
- name: Wait for MetalLB resources
|
- name: Wait for MetalLB resources
|
||||||
@@ -66,7 +87,7 @@
|
|||||||
command: >-
|
command: >-
|
||||||
k3s kubectl -n metallb-system get endpoints webhook-service
|
k3s kubectl -n metallb-system get endpoints webhook-service
|
||||||
changed_when: false
|
changed_when: false
|
||||||
with_items: "{{ groups['master'] }}"
|
with_items: "{{ groups[group_name_master | default('master')] }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
||||||
- name: Apply metallb CRs
|
- name: Apply metallb CRs
|
||||||
@@ -79,16 +100,23 @@
|
|||||||
until: this.rc == 0
|
until: this.rc == 0
|
||||||
retries: 5
|
retries: 5
|
||||||
|
|
||||||
- name: Test metallb-system resources
|
- name: Test metallb-system resources for Layer 2 configuration
|
||||||
command: >-
|
command: >-
|
||||||
k3s kubectl -n metallb-system get {{ item }}
|
k3s kubectl -n metallb-system get {{ item }}
|
||||||
changed_when: false
|
changed_when: false
|
||||||
run_once: true
|
run_once: true
|
||||||
|
when: metal_lb_mode == "layer2"
|
||||||
with_items:
|
with_items:
|
||||||
- IPAddressPool
|
- IPAddressPool
|
||||||
- L2Advertisement
|
- L2Advertisement
|
||||||
|
|
||||||
- name: Remove tmp directory used for manifests
|
- name: Test metallb-system resources for BGP configuration
|
||||||
file:
|
command: >-
|
||||||
path: /tmp/k3s
|
k3s kubectl -n metallb-system get {{ item }}
|
||||||
state: absent
|
changed_when: false
|
||||||
|
run_once: true
|
||||||
|
when: metal_lb_mode == "bgp"
|
||||||
|
with_items:
|
||||||
|
- IPAddressPool
|
||||||
|
- BGPPeer
|
||||||
|
- BGPAdvertisement
|
||||||
41
roles/k3s_server_post/templates/calico.crs.j2
Normal file
41
roles/k3s_server_post/templates/calico.crs.j2
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
# This section includes base Calico installation configuration.
|
||||||
|
# For more information, see: https://docs.tigera.io/calico/latest/reference/installation/api#operator.tigera.io/v1.Installation
|
||||||
|
apiVersion: operator.tigera.io/v1
|
||||||
|
kind: Installation
|
||||||
|
metadata:
|
||||||
|
name: default
|
||||||
|
spec:
|
||||||
|
# Configures Calico networking.
|
||||||
|
calicoNetwork:
|
||||||
|
# Note: The ipPools section cannot be modified post-install.
|
||||||
|
ipPools:
|
||||||
|
- blockSize: {{ calico_blockSize | default('26') }}
|
||||||
|
cidr: {{ cluster_cidr | default('10.52.0.0/16') }}
|
||||||
|
encapsulation: {{ calico_encapsulation | default('VXLANCrossSubnet') }}
|
||||||
|
natOutgoing: {{ calico_natOutgoing | default('Enabled') }}
|
||||||
|
nodeSelector: {{ calico_nodeSelector | default('all()') }}
|
||||||
|
nodeAddressAutodetectionV4:
|
||||||
|
interface: {{ calico_iface }}
|
||||||
|
linuxDataplane: {{ 'BPF' if calico_ebpf else 'Iptables' }}
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# This section configures the Calico API server.
|
||||||
|
# For more information, see: https://docs.tigera.io/calico/latest/reference/installation/api#operator.tigera.io/v1.APIServer
|
||||||
|
apiVersion: operator.tigera.io/v1
|
||||||
|
kind: APIServer
|
||||||
|
metadata:
|
||||||
|
name: default
|
||||||
|
spec: {}
|
||||||
|
|
||||||
|
{% if calico_ebpf %}
|
||||||
|
---
|
||||||
|
kind: ConfigMap
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
name: kubernetes-services-endpoint
|
||||||
|
namespace: tigera-operator
|
||||||
|
data:
|
||||||
|
KUBERNETES_SERVICE_HOST: '{{ apiserver_endpoint }}'
|
||||||
|
KUBERNETES_SERVICE_PORT: '6443'
|
||||||
|
{% endif %}
|
||||||
29
roles/k3s_server_post/templates/cilium.crs.j2
Normal file
29
roles/k3s_server_post/templates/cilium.crs.j2
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
apiVersion: "cilium.io/v2alpha1"
|
||||||
|
kind: CiliumBGPPeeringPolicy
|
||||||
|
metadata:
|
||||||
|
name: 01-bgp-peering-policy
|
||||||
|
spec: # CiliumBGPPeeringPolicySpec
|
||||||
|
virtualRouters: # []CiliumBGPVirtualRouter
|
||||||
|
- localASN: {{ cilium_bgp_my_asn }}
|
||||||
|
exportPodCIDR: {{ cilium_exportPodCIDR | default('true') }}
|
||||||
|
neighbors: # []CiliumBGPNeighbor
|
||||||
|
- peerAddress: '{{ cilium_bgp_peer_address + "/32"}}'
|
||||||
|
peerASN: {{ cilium_bgp_peer_asn }}
|
||||||
|
eBGPMultihopTTL: 10
|
||||||
|
connectRetryTimeSeconds: 120
|
||||||
|
holdTimeSeconds: 90
|
||||||
|
keepAliveTimeSeconds: 30
|
||||||
|
gracefulRestart:
|
||||||
|
enabled: true
|
||||||
|
restartTimeSeconds: 120
|
||||||
|
serviceSelector:
|
||||||
|
matchExpressions:
|
||||||
|
- {key: somekey, operator: NotIn, values: ['never-used-value']}
|
||||||
|
---
|
||||||
|
apiVersion: "cilium.io/v2alpha1"
|
||||||
|
kind: CiliumLoadBalancerIPPool
|
||||||
|
metadata:
|
||||||
|
name: "01-lb-pool"
|
||||||
|
spec:
|
||||||
|
cidrs:
|
||||||
|
- cidr: "{{ cilium_bgp_lb_cidr }}"
|
||||||
@@ -13,9 +13,31 @@ spec:
|
|||||||
{% for range in metal_lb_ip_range %}
|
{% for range in metal_lb_ip_range %}
|
||||||
- {{ range }}
|
- {{ range }}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
|
||||||
|
{% if metal_lb_mode == "layer2" %}
|
||||||
---
|
---
|
||||||
apiVersion: metallb.io/v1beta1
|
apiVersion: metallb.io/v1beta1
|
||||||
kind: L2Advertisement
|
kind: L2Advertisement
|
||||||
metadata:
|
metadata:
|
||||||
name: default
|
name: default
|
||||||
namespace: metallb-system
|
namespace: metallb-system
|
||||||
|
{% endif %}
|
||||||
|
{% if metal_lb_mode == "bgp" %}
|
||||||
|
---
|
||||||
|
apiVersion: metallb.io/v1beta2
|
||||||
|
kind: BGPPeer
|
||||||
|
metadata:
|
||||||
|
name: default
|
||||||
|
namespace: metallb-system
|
||||||
|
spec:
|
||||||
|
myASN: {{ metal_lb_bgp_my_asn }}
|
||||||
|
peerASN: {{ metal_lb_bgp_peer_asn }}
|
||||||
|
peerAddress: {{ metal_lb_bgp_peer_address }}
|
||||||
|
|
||||||
|
---
|
||||||
|
apiVersion: metallb.io/v1beta1
|
||||||
|
kind: BGPAdvertisement
|
||||||
|
metadata:
|
||||||
|
name: default
|
||||||
|
namespace: metallb-system
|
||||||
|
{% endif %}
|
||||||
6
roles/lxc/handlers/main.yml
Normal file
6
roles/lxc/handlers/main.yml
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
- name: Reboot server
|
||||||
|
become: true
|
||||||
|
reboot:
|
||||||
|
reboot_command: "{{ custom_reboot_command | default(omit) }}"
|
||||||
|
listen: reboot server
|
||||||
21
roles/lxc/tasks/main.yml
Normal file
21
roles/lxc/tasks/main.yml
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
---
|
||||||
|
- name: Check for rc.local file
|
||||||
|
stat:
|
||||||
|
path: /etc/rc.local
|
||||||
|
register: rcfile
|
||||||
|
|
||||||
|
- name: Create rc.local if needed
|
||||||
|
lineinfile:
|
||||||
|
path: /etc/rc.local
|
||||||
|
line: "#!/bin/sh -e"
|
||||||
|
create: true
|
||||||
|
insertbefore: BOF
|
||||||
|
mode: "u=rwx,g=rx,o=rx"
|
||||||
|
when: not rcfile.stat.exists
|
||||||
|
|
||||||
|
- name: Write rc.local file
|
||||||
|
blockinfile:
|
||||||
|
path: /etc/rc.local
|
||||||
|
content: "{{ lookup('template', 'templates/rc.local.j2') }}"
|
||||||
|
state: present
|
||||||
|
notify: reboot server
|
||||||
4
roles/prereq/defaults/main.yml
Normal file
4
roles/prereq/defaults/main.yml
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
---
|
||||||
|
secure_path:
|
||||||
|
RedHat: '/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/bin'
|
||||||
|
Suse: '/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/bin'
|
||||||
@@ -1,34 +1,37 @@
|
|||||||
---
|
---
|
||||||
- name: Set same timezone on every Server
|
- name: Set same timezone on every Server
|
||||||
timezone:
|
community.general.timezone:
|
||||||
name: "{{ system_timezone }}"
|
name: "{{ system_timezone }}"
|
||||||
when: (system_timezone is defined) and (system_timezone != "Your/Timezone")
|
when: (system_timezone is defined) and (system_timezone != "Your/Timezone")
|
||||||
|
|
||||||
- name: Set SELinux to disabled state
|
- name: Set SELinux to disabled state
|
||||||
selinux:
|
ansible.posix.selinux:
|
||||||
state: disabled
|
state: disabled
|
||||||
when: ansible_os_family == "RedHat"
|
when: ansible_os_family == "RedHat"
|
||||||
|
|
||||||
- name: Enable IPv4 forwarding
|
- name: Enable IPv4 forwarding
|
||||||
sysctl:
|
ansible.posix.sysctl:
|
||||||
name: net.ipv4.ip_forward
|
name: net.ipv4.ip_forward
|
||||||
value: "1"
|
value: "1"
|
||||||
state: present
|
state: present
|
||||||
reload: yes
|
reload: true
|
||||||
|
tags: sysctl
|
||||||
|
|
||||||
- name: Enable IPv6 forwarding
|
- name: Enable IPv6 forwarding
|
||||||
sysctl:
|
ansible.posix.sysctl:
|
||||||
name: net.ipv6.conf.all.forwarding
|
name: net.ipv6.conf.all.forwarding
|
||||||
value: "1"
|
value: "1"
|
||||||
state: present
|
state: present
|
||||||
reload: yes
|
reload: true
|
||||||
|
tags: sysctl
|
||||||
|
|
||||||
- name: Enable IPv6 router advertisements
|
- name: Enable IPv6 router advertisements
|
||||||
sysctl:
|
ansible.posix.sysctl:
|
||||||
name: net.ipv6.conf.all.accept_ra
|
name: net.ipv6.conf.all.accept_ra
|
||||||
value: "2"
|
value: "2"
|
||||||
state: present
|
state: present
|
||||||
reload: yes
|
reload: true
|
||||||
|
tags: sysctl
|
||||||
|
|
||||||
- name: Add br_netfilter to /etc/modules-load.d/
|
- name: Add br_netfilter to /etc/modules-load.d/
|
||||||
copy:
|
copy:
|
||||||
@@ -38,28 +41,29 @@
|
|||||||
when: ansible_os_family == "RedHat"
|
when: ansible_os_family == "RedHat"
|
||||||
|
|
||||||
- name: Load br_netfilter
|
- name: Load br_netfilter
|
||||||
modprobe:
|
community.general.modprobe:
|
||||||
name: br_netfilter
|
name: br_netfilter
|
||||||
state: present
|
state: present
|
||||||
when: ansible_os_family == "RedHat"
|
when: ansible_os_family == "RedHat"
|
||||||
|
|
||||||
- name: Set bridge-nf-call-iptables (just to be sure)
|
- name: Set bridge-nf-call-iptables (just to be sure)
|
||||||
sysctl:
|
ansible.posix.sysctl:
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
value: "1"
|
value: "1"
|
||||||
state: present
|
state: present
|
||||||
reload: yes
|
reload: true
|
||||||
when: ansible_os_family == "RedHat"
|
when: ansible_os_family == "RedHat"
|
||||||
loop:
|
loop:
|
||||||
- net.bridge.bridge-nf-call-iptables
|
- net.bridge.bridge-nf-call-iptables
|
||||||
- net.bridge.bridge-nf-call-ip6tables
|
- net.bridge.bridge-nf-call-ip6tables
|
||||||
|
tags: sysctl
|
||||||
|
|
||||||
- name: Add /usr/local/bin to sudo secure_path
|
- name: Add /usr/local/bin to sudo secure_path
|
||||||
lineinfile:
|
lineinfile:
|
||||||
line: 'Defaults secure_path = /sbin:/bin:/usr/sbin:/usr/bin:/usr/local/bin'
|
line: 'Defaults secure_path = {{ secure_path[ansible_os_family] }}'
|
||||||
regexp: "Defaults(\\s)*secure_path(\\s)*="
|
regexp: "Defaults(\\s)*secure_path(\\s)*="
|
||||||
state: present
|
state: present
|
||||||
insertafter: EOF
|
insertafter: EOF
|
||||||
path: /etc/sudoers
|
path: /etc/sudoers
|
||||||
validate: 'visudo -cf %s'
|
validate: 'visudo -cf %s'
|
||||||
when: ansible_os_family == "RedHat"
|
when: ansible_os_family in [ "RedHat", "Suse" ]
|
||||||
|
|||||||
13
roles/proxmox_lxc/handlers/main.yml
Normal file
13
roles/proxmox_lxc/handlers/main.yml
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
---
|
||||||
|
- name: Reboot containers
|
||||||
|
block:
|
||||||
|
- name: Get container ids from filtered files
|
||||||
|
set_fact:
|
||||||
|
proxmox_lxc_filtered_ids: >-
|
||||||
|
{{ proxmox_lxc_filtered_files | map("split", "/") | map("last") | map("split", ".") | map("first") }}
|
||||||
|
listen: reboot containers
|
||||||
|
- name: Reboot container
|
||||||
|
command: "pct reboot {{ item }}"
|
||||||
|
loop: "{{ proxmox_lxc_filtered_ids }}"
|
||||||
|
changed_when: true
|
||||||
|
listen: reboot containers
|
||||||
44
roles/proxmox_lxc/tasks/main.yml
Normal file
44
roles/proxmox_lxc/tasks/main.yml
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
---
|
||||||
|
- name: Check for container files that exist on this host
|
||||||
|
stat:
|
||||||
|
path: "/etc/pve/lxc/{{ item }}.conf"
|
||||||
|
loop: "{{ proxmox_lxc_ct_ids }}"
|
||||||
|
register: stat_results
|
||||||
|
|
||||||
|
- name: Filter out files that do not exist
|
||||||
|
set_fact:
|
||||||
|
proxmox_lxc_filtered_files:
|
||||||
|
'{{ stat_results.results | rejectattr("stat.exists", "false") | map(attribute="stat.path") }}'
|
||||||
|
|
||||||
|
# https://gist.github.com/triangletodd/02f595cd4c0dc9aac5f7763ca2264185
|
||||||
|
- name: Ensure lxc config has the right apparmor profile
|
||||||
|
lineinfile:
|
||||||
|
dest: "{{ item }}"
|
||||||
|
regexp: "^lxc.apparmor.profile"
|
||||||
|
line: "lxc.apparmor.profile: unconfined"
|
||||||
|
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||||
|
notify: reboot containers
|
||||||
|
|
||||||
|
- name: Ensure lxc config has the right cgroup
|
||||||
|
lineinfile:
|
||||||
|
dest: "{{ item }}"
|
||||||
|
regexp: "^lxc.cgroup.devices.allow"
|
||||||
|
line: "lxc.cgroup.devices.allow: a"
|
||||||
|
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||||
|
notify: reboot containers
|
||||||
|
|
||||||
|
- name: Ensure lxc config has the right cap drop
|
||||||
|
lineinfile:
|
||||||
|
dest: "{{ item }}"
|
||||||
|
regexp: "^lxc.cap.drop"
|
||||||
|
line: "lxc.cap.drop: "
|
||||||
|
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||||
|
notify: reboot containers
|
||||||
|
|
||||||
|
- name: Ensure lxc config has the right mounts
|
||||||
|
lineinfile:
|
||||||
|
dest: "{{ item }}"
|
||||||
|
regexp: "^lxc.mount.auto"
|
||||||
|
line: 'lxc.mount.auto: "proc:rw sys:rw"'
|
||||||
|
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||||
|
notify: reboot containers
|
||||||
@@ -1,3 +1,5 @@
|
|||||||
---
|
---
|
||||||
- name: Reboot
|
- name: Reboot
|
||||||
reboot:
|
reboot:
|
||||||
|
reboot_command: "{{ custom_reboot_command | default(omit) }}"
|
||||||
|
listen: reboot
|
||||||
|
|||||||
@@ -17,21 +17,27 @@
|
|||||||
when:
|
when:
|
||||||
grep_cpuinfo_raspberrypi.rc == 0 or grep_device_tree_model_raspberrypi.rc == 0
|
grep_cpuinfo_raspberrypi.rc == 0 or grep_device_tree_model_raspberrypi.rc == 0
|
||||||
|
|
||||||
- name: Set detected_distribution to Raspbian
|
- name: Set detected_distribution to Raspbian (ARM64 on Raspbian, Debian Buster/Bullseye/Bookworm)
|
||||||
set_fact:
|
set_fact:
|
||||||
detected_distribution: Raspbian
|
detected_distribution: Raspbian
|
||||||
when: >
|
vars:
|
||||||
raspberry_pi|default(false) and
|
allowed_descriptions:
|
||||||
( ansible_facts.lsb.id|default("") == "Raspbian" or
|
- "[Rr]aspbian.*"
|
||||||
ansible_facts.lsb.description|default("") is match("[Rr]aspbian.*") )
|
- "Debian.*buster"
|
||||||
|
- "Debian.*bullseye"
|
||||||
|
- "Debian.*bookworm"
|
||||||
|
when:
|
||||||
|
- ansible_facts.architecture is search("aarch64")
|
||||||
|
- raspberry_pi|default(false)
|
||||||
|
- ansible_facts.lsb.description|default("") is match(allowed_descriptions | join('|'))
|
||||||
|
|
||||||
- name: Set detected_distribution to Raspbian (ARM64 on Debian Buster)
|
- name: Set detected_distribution to Raspbian (ARM64 on Debian Bookworm)
|
||||||
set_fact:
|
set_fact:
|
||||||
detected_distribution: Raspbian
|
detected_distribution: Raspbian
|
||||||
when:
|
when:
|
||||||
- ansible_facts.architecture is search("aarch64")
|
- ansible_facts.architecture is search("aarch64")
|
||||||
- raspberry_pi|default(false)
|
- raspberry_pi|default(false)
|
||||||
- ansible_facts.lsb.description|default("") is match("Debian.*buster")
|
- ansible_facts.lsb.description|default("") is match("Debian.*bookworm")
|
||||||
|
|
||||||
- name: Set detected_distribution_major_version
|
- name: Set detected_distribution_major_version
|
||||||
set_fact:
|
set_fact:
|
||||||
@@ -39,28 +45,16 @@
|
|||||||
when:
|
when:
|
||||||
- detected_distribution | default("") == "Raspbian"
|
- detected_distribution | default("") == "Raspbian"
|
||||||
|
|
||||||
- name: Set detected_distribution to Raspbian (ARM64 on Debian Bullseye)
|
- name: Execute OS related tasks on the Raspberry Pi - {{ action_ }}
|
||||||
set_fact:
|
|
||||||
detected_distribution: Raspbian
|
|
||||||
when:
|
|
||||||
- ansible_facts.architecture is search("aarch64")
|
|
||||||
- raspberry_pi|default(false)
|
|
||||||
- ansible_facts.lsb.description|default("") is match("Debian.*bullseye")
|
|
||||||
|
|
||||||
- name: execute OS related tasks on the Raspberry Pi - {{ action }}
|
|
||||||
include_tasks: "{{ item }}"
|
include_tasks: "{{ item }}"
|
||||||
with_first_found:
|
with_first_found:
|
||||||
- "{{ action }}/{{ detected_distribution }}-{{ detected_distribution_major_version }}.yml"
|
- "{{ action_ }}/{{ detected_distribution }}-{{ detected_distribution_major_version }}.yml"
|
||||||
- "{{ action }}/{{ detected_distribution }}.yml"
|
- "{{ action_ }}/{{ detected_distribution }}.yml"
|
||||||
- "{{ action }}/{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml"
|
- "{{ action_ }}/{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml"
|
||||||
- "{{ action }}/{{ ansible_distribution }}.yml"
|
- "{{ action_ }}/{{ ansible_distribution }}.yml"
|
||||||
- "{{ action }}/default.yml"
|
- "{{ action_ }}/default.yml"
|
||||||
vars:
|
vars:
|
||||||
action: >-
|
action_: >-
|
||||||
{% if state == "present" -%}
|
{% if state == "present" %}setup{% else %}teardown{% endif %}
|
||||||
setup
|
|
||||||
{%- else -%}
|
|
||||||
teardown
|
|
||||||
{%- endif %}
|
|
||||||
when:
|
when:
|
||||||
- raspberry_pi|default(false)
|
- raspberry_pi|default(false)
|
||||||
|
|||||||
@@ -1,27 +1,49 @@
|
|||||||
---
|
---
|
||||||
|
- name: Test for cmdline path
|
||||||
|
stat:
|
||||||
|
path: /boot/firmware/cmdline.txt
|
||||||
|
register: boot_cmdline_path
|
||||||
|
failed_when: false
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Set cmdline path based on Debian version and command result
|
||||||
|
set_fact:
|
||||||
|
cmdline_path: >-
|
||||||
|
{{
|
||||||
|
(
|
||||||
|
boot_cmdline_path.stat.exists and
|
||||||
|
ansible_facts.lsb.description | default('') is match('Debian.*(?!(bookworm|sid))')
|
||||||
|
) | ternary(
|
||||||
|
'/boot/firmware/cmdline.txt',
|
||||||
|
'/boot/cmdline.txt'
|
||||||
|
)
|
||||||
|
}}
|
||||||
|
|
||||||
- name: Activating cgroup support
|
- name: Activating cgroup support
|
||||||
lineinfile:
|
lineinfile:
|
||||||
path: /boot/cmdline.txt
|
path: "{{ cmdline_path }}"
|
||||||
regexp: '^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$'
|
regexp: '^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$'
|
||||||
line: '\1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory'
|
line: '\1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory'
|
||||||
backrefs: true
|
backrefs: true
|
||||||
notify: reboot
|
notify: reboot
|
||||||
|
|
||||||
- name: Install iptables
|
- name: Install iptables
|
||||||
apt: name=iptables state=present
|
apt:
|
||||||
|
name: iptables
|
||||||
|
state: present
|
||||||
|
|
||||||
- name: Flush iptables before changing to iptables-legacy
|
- name: Flush iptables before changing to iptables-legacy
|
||||||
iptables:
|
iptables:
|
||||||
flush: true
|
flush: true
|
||||||
|
|
||||||
- name: Changing to iptables-legacy
|
- name: Changing to iptables-legacy
|
||||||
alternatives:
|
community.general.alternatives:
|
||||||
path: /usr/sbin/iptables-legacy
|
path: /usr/sbin/iptables-legacy
|
||||||
name: iptables
|
name: iptables
|
||||||
register: ip4_legacy
|
register: ip4_legacy
|
||||||
|
|
||||||
- name: Changing to ip6tables-legacy
|
- name: Changing to ip6tables-legacy
|
||||||
alternatives:
|
community.general.alternatives:
|
||||||
path: /usr/sbin/ip6tables-legacy
|
path: /usr/sbin/ip6tables-legacy
|
||||||
name: ip6tables
|
name: ip6tables
|
||||||
register: ip6_legacy
|
register: ip6_legacy
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
- name: Enable cgroup via boot commandline if not already enabled for Rocky
|
- name: Enable cgroup via boot commandline if not already enabled for Rocky
|
||||||
lineinfile:
|
lineinfile:
|
||||||
path: /boot/cmdline.txt
|
path: /boot/cmdline.txt
|
||||||
backrefs: yes
|
backrefs: true
|
||||||
regexp: '^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$'
|
regexp: '^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$'
|
||||||
line: '\1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory'
|
line: '\1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory'
|
||||||
notify: reboot
|
notify: reboot
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
- name: Enable cgroup via boot commandline if not already enabled for Ubuntu on a Raspberry Pi
|
- name: Enable cgroup via boot commandline if not already enabled for Ubuntu on a Raspberry Pi
|
||||||
lineinfile:
|
lineinfile:
|
||||||
path: /boot/firmware/cmdline.txt
|
path: /boot/firmware/cmdline.txt
|
||||||
backrefs: yes
|
backrefs: true
|
||||||
regexp: '^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$'
|
regexp: '^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$'
|
||||||
line: '\1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory'
|
line: '\1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory'
|
||||||
notify: reboot
|
notify: reboot
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
systemd:
|
systemd:
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
state: stopped
|
state: stopped
|
||||||
enabled: no
|
enabled: false
|
||||||
failed_when: false
|
failed_when: false
|
||||||
with_items:
|
with_items:
|
||||||
- k3s
|
- k3s
|
||||||
@@ -45,12 +45,52 @@
|
|||||||
- /var/lib/rancher/k3s
|
- /var/lib/rancher/k3s
|
||||||
- /var/lib/rancher/
|
- /var/lib/rancher/
|
||||||
- /var/lib/cni/
|
- /var/lib/cni/
|
||||||
|
- /etc/cni/net.d
|
||||||
|
|
||||||
|
- name: Remove K3s http_proxy files
|
||||||
|
file:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: absent
|
||||||
|
with_items:
|
||||||
|
- "{{ systemd_dir }}/k3s.service.d/http_proxy.conf"
|
||||||
|
- "{{ systemd_dir }}/k3s.service.d"
|
||||||
|
- "{{ systemd_dir }}/k3s-node.service.d/http_proxy.conf"
|
||||||
|
- "{{ systemd_dir }}/k3s-node.service.d"
|
||||||
|
when: proxy_env is defined
|
||||||
|
|
||||||
- name: Reload daemon_reload
|
- name: Reload daemon_reload
|
||||||
systemd:
|
systemd:
|
||||||
daemon_reload: yes
|
daemon_reload: true
|
||||||
|
|
||||||
- name: Remove tmp directory used for manifests
|
- name: Remove tmp directory used for manifests
|
||||||
file:
|
file:
|
||||||
path: /tmp/k3s
|
path: /tmp/k3s
|
||||||
state: absent
|
state: absent
|
||||||
|
|
||||||
|
- name: Check if rc.local exists
|
||||||
|
stat:
|
||||||
|
path: /etc/rc.local
|
||||||
|
register: rcfile
|
||||||
|
|
||||||
|
- name: Remove rc.local modifications for proxmox lxc containers
|
||||||
|
become: true
|
||||||
|
blockinfile:
|
||||||
|
path: /etc/rc.local
|
||||||
|
content: "{{ lookup('template', 'templates/rc.local.j2') }}"
|
||||||
|
create: false
|
||||||
|
state: absent
|
||||||
|
when: proxmox_lxc_configure and rcfile.stat.exists
|
||||||
|
|
||||||
|
- name: Check rc.local for cleanup
|
||||||
|
become: true
|
||||||
|
slurp:
|
||||||
|
src: /etc/rc.local
|
||||||
|
register: rcslurp
|
||||||
|
when: proxmox_lxc_configure and rcfile.stat.exists
|
||||||
|
|
||||||
|
- name: Cleanup rc.local if we only have a Shebang line
|
||||||
|
become: true
|
||||||
|
file:
|
||||||
|
path: /etc/rc.local
|
||||||
|
state: absent
|
||||||
|
when: proxmox_lxc_configure and rcfile.stat.exists and ((rcslurp.content | b64decode).splitlines() | length) <= 1
|
||||||
|
|||||||
@@ -9,7 +9,7 @@
|
|||||||
check_mode: false
|
check_mode: false
|
||||||
|
|
||||||
- name: Umount filesystem
|
- name: Umount filesystem
|
||||||
mount:
|
ansible.posix.mount:
|
||||||
path: "{{ item }}"
|
path: "{{ item }}"
|
||||||
state: unmounted
|
state: unmounted
|
||||||
with_items:
|
with_items:
|
||||||
|
|||||||
1
roles/reset_proxmox_lxc/handlers/main.yml
Symbolic link
1
roles/reset_proxmox_lxc/handlers/main.yml
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
../../proxmox_lxc/handlers/main.yml
|
||||||
47
roles/reset_proxmox_lxc/tasks/main.yml
Normal file
47
roles/reset_proxmox_lxc/tasks/main.yml
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
---
|
||||||
|
- name: Check for container files that exist on this host
|
||||||
|
stat:
|
||||||
|
path: "/etc/pve/lxc/{{ item }}.conf"
|
||||||
|
loop: "{{ proxmox_lxc_ct_ids }}"
|
||||||
|
register: stat_results
|
||||||
|
|
||||||
|
- name: Filter out files that do not exist
|
||||||
|
set_fact:
|
||||||
|
proxmox_lxc_filtered_files:
|
||||||
|
'{{ stat_results.results | rejectattr("stat.exists", "false") | map(attribute="stat.path") }}'
|
||||||
|
|
||||||
|
- name: Remove LXC apparmor profile
|
||||||
|
lineinfile:
|
||||||
|
dest: "{{ item }}"
|
||||||
|
regexp: "^lxc.apparmor.profile"
|
||||||
|
line: "lxc.apparmor.profile: unconfined"
|
||||||
|
state: absent
|
||||||
|
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||||
|
notify: reboot containers
|
||||||
|
|
||||||
|
- name: Remove lxc cgroups
|
||||||
|
lineinfile:
|
||||||
|
dest: "{{ item }}"
|
||||||
|
regexp: "^lxc.cgroup.devices.allow"
|
||||||
|
line: "lxc.cgroup.devices.allow: a"
|
||||||
|
state: absent
|
||||||
|
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||||
|
notify: reboot containers
|
||||||
|
|
||||||
|
- name: Remove lxc cap drop
|
||||||
|
lineinfile:
|
||||||
|
dest: "{{ item }}"
|
||||||
|
regexp: "^lxc.cap.drop"
|
||||||
|
line: "lxc.cap.drop: "
|
||||||
|
state: absent
|
||||||
|
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||||
|
notify: reboot containers
|
||||||
|
|
||||||
|
- name: Remove lxc mounts
|
||||||
|
lineinfile:
|
||||||
|
dest: "{{ item }}"
|
||||||
|
regexp: "^lxc.mount.auto"
|
||||||
|
line: 'lxc.mount.auto: "proc:rw sys:rw"'
|
||||||
|
state: absent
|
||||||
|
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||||
|
notify: reboot containers
|
||||||
68
site.yml
68
site.yml
@@ -1,24 +1,68 @@
|
|||||||
---
|
---
|
||||||
|
- name: Pre tasks
|
||||||
|
hosts: all
|
||||||
|
pre_tasks:
|
||||||
|
- name: Verify Ansible is version 2.11 or above. (If this fails you may need to update Ansible)
|
||||||
|
assert:
|
||||||
|
that: "ansible_version.full is version_compare('2.11', '>=')"
|
||||||
|
msg: >
|
||||||
|
"Ansible is out of date. See here for more info: https://docs.technotim.live/posts/ansible-automation/"
|
||||||
|
|
||||||
- hosts: k3s_cluster
|
- name: Prepare Proxmox cluster
|
||||||
gather_facts: yes
|
hosts: proxmox
|
||||||
become: yes
|
gather_facts: true
|
||||||
|
become: true
|
||||||
|
environment: "{{ proxy_env | default({}) }}"
|
||||||
roles:
|
roles:
|
||||||
|
- role: proxmox_lxc
|
||||||
|
when: proxmox_lxc_configure
|
||||||
|
|
||||||
|
- name: Prepare k3s nodes
|
||||||
|
hosts: k3s_cluster
|
||||||
|
gather_facts: true
|
||||||
|
environment: "{{ proxy_env | default({}) }}"
|
||||||
|
roles:
|
||||||
|
- role: lxc
|
||||||
|
become: true
|
||||||
|
when: proxmox_lxc_configure
|
||||||
- role: prereq
|
- role: prereq
|
||||||
|
become: true
|
||||||
- role: download
|
- role: download
|
||||||
|
become: true
|
||||||
- role: raspberrypi
|
- role: raspberrypi
|
||||||
|
become: true
|
||||||
|
- role: k3s_custom_registries
|
||||||
|
become: true
|
||||||
|
when: custom_registries
|
||||||
|
|
||||||
- hosts: master
|
- name: Setup k3s servers
|
||||||
become: yes
|
hosts: master
|
||||||
|
environment: "{{ proxy_env | default({}) }}"
|
||||||
roles:
|
roles:
|
||||||
- role: k3s/master
|
- role: k3s_server
|
||||||
|
become: true
|
||||||
|
|
||||||
- hosts: node
|
- name: Setup k3s agents
|
||||||
become: yes
|
hosts: node
|
||||||
|
environment: "{{ proxy_env | default({}) }}"
|
||||||
roles:
|
roles:
|
||||||
- role: k3s/node
|
- role: k3s_agent
|
||||||
|
become: true
|
||||||
|
|
||||||
- hosts: master
|
- name: Configure k3s cluster
|
||||||
become: yes
|
hosts: master
|
||||||
|
environment: "{{ proxy_env | default({}) }}"
|
||||||
roles:
|
roles:
|
||||||
- role: k3s/post
|
- role: k3s_server_post
|
||||||
|
become: true
|
||||||
|
|
||||||
|
- name: Storing kubeconfig in the playbook directory
|
||||||
|
hosts: master
|
||||||
|
environment: "{{ proxy_env | default({}) }}"
|
||||||
|
tasks:
|
||||||
|
- name: Copying kubeconfig from {{ hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] }}
|
||||||
|
ansible.builtin.fetch:
|
||||||
|
src: "{{ ansible_user_dir }}/.kube/config"
|
||||||
|
dest: ./kubeconfig
|
||||||
|
flat: true
|
||||||
|
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||||
|
|||||||
8
templates/rc.local.j2
Normal file
8
templates/rc.local.j2
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
# Kubeadm 1.15 needs /dev/kmsg to be there, but it's not in lxc, but we can just use /dev/console instead
|
||||||
|
# see: https://github.com/kubernetes-sigs/kind/issues/662
|
||||||
|
if [ ! -e /dev/kmsg ]; then
|
||||||
|
ln -s /dev/console /dev/kmsg
|
||||||
|
fi
|
||||||
|
|
||||||
|
# https://medium.com/@kvaps/run-kubernetes-in-lxc-container-f04aa94b6c9c
|
||||||
|
mount --make-rshared /
|
||||||
Reference in New Issue
Block a user