mirror of
https://github.com/techno-tim/k3s-ansible.git
synced 2025-12-25 10:12:38 +01:00
Compare commits
98 Commits
v1.23.4+k3
...
v1.24.11+k
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3e16ab6809 | ||
|
|
83fe50797c | ||
|
|
2db0b3024c | ||
|
|
6b2af77e74 | ||
|
|
d1d1bc3d91 | ||
|
|
3a1a7a19aa | ||
|
|
030eeb4b75 | ||
|
|
4aeeb124ef | ||
|
|
511c020bec | ||
|
|
c47da38b53 | ||
|
|
6448948e9f | ||
|
|
7bc198ab26 | ||
|
|
65bbc8e2ac | ||
|
|
dc2976e7f6 | ||
|
|
5a7ba98968 | ||
|
|
10c6ef1d57 | ||
|
|
ed4d888e3d | ||
|
|
49d6d484ae | ||
|
|
96c49c864e | ||
|
|
60adb1de42 | ||
|
|
e023808f2f | ||
|
|
511ec493d6 | ||
|
|
be3e72e173 | ||
|
|
e33cbe52c1 | ||
|
|
c06af919f3 | ||
|
|
b86384c439 | ||
|
|
bf2bd1edc5 | ||
|
|
e98e3ee77c | ||
|
|
78f7a60378 | ||
|
|
e64fea760d | ||
|
|
764e32c778 | ||
|
|
e6cf14ea78 | ||
|
|
da049dcc28 | ||
|
|
2604caa483 | ||
|
|
82d820805f | ||
|
|
da72884a5b | ||
|
|
17a74b66c8 | ||
|
|
88d679ecb6 | ||
|
|
6bf3bcce92 | ||
|
|
cff815a031 | ||
|
|
f892029fcf | ||
|
|
6b37ba5e60 | ||
|
|
b1fee44403 | ||
|
|
a1c7175bd1 | ||
|
|
69d3bdcd88 | ||
|
|
5268ef305a | ||
|
|
a840571733 | ||
|
|
b1370406ea | ||
|
|
12d57a07d0 | ||
|
|
4f3b8ec9e0 | ||
|
|
45ddd65e74 | ||
|
|
b2a62ea4eb | ||
|
|
a8697edc99 | ||
|
|
d3218f5d5c | ||
|
|
590a8029fd | ||
|
|
cb2fa7c441 | ||
|
|
14508ec8dc | ||
|
|
fb6c9a6866 | ||
|
|
d5d02280c1 | ||
|
|
57e528832b | ||
|
|
cd76fa05a7 | ||
|
|
d5b37acd8a | ||
|
|
5225493ca0 | ||
|
|
4acbe91b6c | ||
|
|
f1c2f3b7dd | ||
|
|
76718a010c | ||
|
|
a1ef590442 | ||
|
|
9ff3bb6b87 | ||
|
|
b1df9663fa | ||
|
|
58c3a61bbb | ||
|
|
60bc09b085 | ||
|
|
4365a2a54b | ||
|
|
a6b2a95b7e | ||
|
|
3c36dc8bfd | ||
|
|
6695d13683 | ||
|
|
74e1dc1dfe | ||
|
|
56f8f21850 | ||
|
|
117c608a73 | ||
|
|
e28d8f38e2 | ||
|
|
9d8a5cc2b8 | ||
|
|
2296959894 | ||
|
|
6d793c5c96 | ||
|
|
47ac514dc6 | ||
|
|
611cf5ab0b | ||
|
|
c82cbfc501 | ||
|
|
f603a048c3 | ||
|
|
4b959719ba | ||
|
|
db8fbd9447 | ||
|
|
aa05ab153e | ||
|
|
370e19169b | ||
|
|
e04f3bac61 | ||
|
|
cdd7c4e668 | ||
|
|
90bbc0a399 | ||
|
|
1e4b48f039 | ||
|
|
ac5325a670 | ||
|
|
a33ed487e0 | ||
|
|
1830b9c9a1 | ||
|
|
39581f4ba7 |
@@ -1,3 +1,17 @@
|
|||||||
---
|
---
|
||||||
|
exclude_paths:
|
||||||
|
# default paths
|
||||||
|
- '.cache/'
|
||||||
|
- '.github/'
|
||||||
|
- 'test/fixtures/formatting-before/'
|
||||||
|
- 'test/fixtures/formatting-prettier/'
|
||||||
|
|
||||||
|
# The "converge" and "reset" playbooks use import_playbook in
|
||||||
|
# conjunction with the "env" lookup plugin, which lets the
|
||||||
|
# syntax check of ansible-lint fail.
|
||||||
|
- 'molecule/**/converge.yml'
|
||||||
|
- 'molecule/**/prepare.yml'
|
||||||
|
- 'molecule/**/reset.yml'
|
||||||
|
|
||||||
skip_list:
|
skip_list:
|
||||||
- 'fqcn-builtins'
|
- 'fqcn-builtins'
|
||||||
|
|||||||
13
.editorconfig
Normal file
13
.editorconfig
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
root = true
|
||||||
|
[*]
|
||||||
|
indent_style = space
|
||||||
|
indent_size = 2
|
||||||
|
charset = utf-8
|
||||||
|
trim_trailing_whitespace = true
|
||||||
|
insert_final_newline = true
|
||||||
|
end_of_line = lf
|
||||||
|
max_line_length = off
|
||||||
|
[Makefile]
|
||||||
|
indent_style = tab
|
||||||
|
[*.go]
|
||||||
|
indent_style = tab
|
||||||
6
.github/ISSUE_TEMPLATE.md
vendored
6
.github/ISSUE_TEMPLATE.md
vendored
@@ -26,7 +26,7 @@ Operating system:
|
|||||||
|
|
||||||
Hardware:
|
Hardware:
|
||||||
|
|
||||||
### Variables Used:
|
### Variables Used
|
||||||
|
|
||||||
`all.yml`
|
`all.yml`
|
||||||
|
|
||||||
@@ -52,7 +52,7 @@ metal_lb_controller_tag_version: ""
|
|||||||
metal_lb_ip_range: ""
|
metal_lb_ip_range: ""
|
||||||
```
|
```
|
||||||
|
|
||||||
### Hosts
|
### Hosts
|
||||||
|
|
||||||
`host.ini`
|
`host.ini`
|
||||||
|
|
||||||
@@ -73,3 +73,5 @@ node
|
|||||||
|
|
||||||
## Possible Solution
|
## Possible Solution
|
||||||
<!--- Not obligatory, but suggest a fix/reason for the bug, -->
|
<!--- Not obligatory, but suggest a fix/reason for the bug, -->
|
||||||
|
|
||||||
|
- [ ] I've checked the [General Troubleshooting Guide](https://github.com/techno-tim/k3s-ansible/discussions/20)
|
||||||
|
|||||||
1
.github/PULL_REQUEST_TEMPLATE.md
vendored
1
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -11,4 +11,5 @@
|
|||||||
- [ ] Ran `site.yml` playbook
|
- [ ] Ran `site.yml` playbook
|
||||||
- [ ] Ran `reset.yml` playbook
|
- [ ] Ran `reset.yml` playbook
|
||||||
- [ ] Did not add any unnecessary changes
|
- [ ] Did not add any unnecessary changes
|
||||||
|
- [ ] Ran pre-commit install at least once before committing
|
||||||
- [ ] 🚀
|
- [ ] 🚀
|
||||||
|
|||||||
11
.github/dependabot.yml
vendored
Normal file
11
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
version: 2
|
||||||
|
updates:
|
||||||
|
- package-ecosystem: "pip"
|
||||||
|
directory: "/"
|
||||||
|
schedule:
|
||||||
|
interval: "daily"
|
||||||
|
rebase-strategy: "auto"
|
||||||
|
ignore:
|
||||||
|
- dependency-name: "*"
|
||||||
|
update-types: ["version-update:semver-major"]
|
||||||
37
.github/download-boxes.sh
vendored
Executable file
37
.github/download-boxes.sh
vendored
Executable file
@@ -0,0 +1,37 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# download-boxes.sh
|
||||||
|
# Check all molecule.yml files for required Vagrant boxes and download the ones that are not
|
||||||
|
# already present on the system.
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
GIT_ROOT=$(git rev-parse --show-toplevel)
|
||||||
|
PROVIDER=virtualbox
|
||||||
|
|
||||||
|
# Read all boxes for all platforms from the "molecule.yml" files
|
||||||
|
all_boxes=$(cat "${GIT_ROOT}"/molecule/*/molecule.yml |
|
||||||
|
yq -r '.platforms[].box' | # Read the "box" property of each node under "platforms"
|
||||||
|
grep --invert-match --regexp=--- | # Filter out file separators
|
||||||
|
sort |
|
||||||
|
uniq)
|
||||||
|
|
||||||
|
# Read the boxes that are currently present on the system (for the current provider)
|
||||||
|
present_boxes=$(
|
||||||
|
(vagrant box list |
|
||||||
|
grep "${PROVIDER}" | # Filter by boxes available for the current provider
|
||||||
|
awk '{print $1;}' | # The box name is the first word in each line
|
||||||
|
sort |
|
||||||
|
uniq) ||
|
||||||
|
echo "" # In case any of these commands fails, just use an empty list
|
||||||
|
)
|
||||||
|
|
||||||
|
# The boxes that we need to download are the ones present in $all_boxes, but not $present_boxes.
|
||||||
|
download_boxes=$(comm -2 -3 <(echo "${all_boxes}") <(echo "${present_boxes}"))
|
||||||
|
|
||||||
|
# Actually download the necessary boxes
|
||||||
|
if [ -n "${download_boxes}" ]; then
|
||||||
|
echo "${download_boxes}" | while IFS= read -r box; do
|
||||||
|
vagrant box add --provider "${PROVIDER}" "${box}"
|
||||||
|
done
|
||||||
|
fi
|
||||||
15
.github/workflows/ci.yml
vendored
Normal file
15
.github/workflows/ci.yml
vendored
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
---
|
||||||
|
name: "CI"
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
paths-ignore:
|
||||||
|
- '**/README.md'
|
||||||
|
jobs:
|
||||||
|
lint:
|
||||||
|
uses: ./.github/workflows/lint.yml
|
||||||
|
test:
|
||||||
|
uses: ./.github/workflows/test.yml
|
||||||
|
needs: [lint]
|
||||||
81
.github/workflows/lint.yml
vendored
81
.github/workflows/lint.yml
vendored
@@ -1,31 +1,68 @@
|
|||||||
---
|
---
|
||||||
name: Lint
|
name: Linting
|
||||||
'on':
|
on:
|
||||||
pull_request:
|
workflow_call:
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
pre-commit-ci:
|
||||||
test:
|
name: Pre-Commit
|
||||||
name: Lint
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
env:
|
||||||
|
PYTHON_VERSION: "3.10"
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Check out the codebase.
|
- name: Check out the codebase
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v3 2.5.0
|
||||||
|
|
||||||
- name: Set up Python 3.7.
|
|
||||||
uses: actions/setup-python@v2
|
|
||||||
with:
|
with:
|
||||||
python-version: '3.x'
|
ref: ${{ github.event.pull_request.head.sha }}
|
||||||
|
|
||||||
- name: Install test dependencies.
|
- name: Set up Python ${{ env.PYTHON_VERSION }}
|
||||||
run: pip3 install yamllint ansible-lint ansible
|
uses: actions/setup-python@75f3110429a8c05be0e1bf360334e4cced2b63fa # 2.3.3
|
||||||
|
with:
|
||||||
|
python-version: ${{ env.PYTHON_VERSION }}
|
||||||
|
cache: 'pip' # caching pip dependencies
|
||||||
|
|
||||||
- name: Run yamllint.
|
- name: Cache pip
|
||||||
run: yamllint .
|
uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # 3.0.11
|
||||||
|
with:
|
||||||
|
path: ~/.cache/pip
|
||||||
|
key: ${{ runner.os }}-pip-${{ hashFiles('./requirements.txt') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-pip-
|
||||||
|
|
||||||
- name: Run ansible-lint.
|
- name: Cache Ansible
|
||||||
run: ansible-lint
|
uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # 3.0.11
|
||||||
|
with:
|
||||||
|
path: ~/.ansible/collections
|
||||||
|
key: ${{ runner.os }}-ansible-${{ hashFiles('collections/requirements.txt') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-ansible-
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
echo "::group::Upgrade pip"
|
||||||
|
python3 -m pip install --upgrade pip
|
||||||
|
echo "::endgroup::"
|
||||||
|
|
||||||
|
echo "::group::Install Python requirements from requirements.txt"
|
||||||
|
python3 -m pip install -r requirements.txt
|
||||||
|
echo "::endgroup::"
|
||||||
|
|
||||||
|
echo "::group::Install Ansible role requirements from collections/requirements.yml"
|
||||||
|
ansible-galaxy install -r collections/requirements.yml
|
||||||
|
echo "::endgroup::"
|
||||||
|
|
||||||
|
- name: Run pre-commit
|
||||||
|
uses: pre-commit/action@646c83fcd040023954eafda54b4db0192ce70507 # 3.0.0
|
||||||
|
|
||||||
|
ensure-pinned-actions:
|
||||||
|
name: Ensure SHA Pinned Actions
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v3 2.5.0
|
||||||
|
- name: Ensure SHA pinned actions
|
||||||
|
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@af2eb3226618e2494e3d9084f515ad6dcf16e229 # 2.0.1
|
||||||
|
with:
|
||||||
|
allowlist: |
|
||||||
|
aws-actions/
|
||||||
|
docker/login-action
|
||||||
|
|||||||
92
.github/workflows/test.yml
vendored
Normal file
92
.github/workflows/test.yml
vendored
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
---
|
||||||
|
name: Test
|
||||||
|
on:
|
||||||
|
workflow_call:
|
||||||
|
jobs:
|
||||||
|
molecule:
|
||||||
|
name: Molecule
|
||||||
|
runs-on: macos-12
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
scenario:
|
||||||
|
- default
|
||||||
|
- ipv6
|
||||||
|
- single_node
|
||||||
|
fail-fast: false
|
||||||
|
env:
|
||||||
|
PYTHON_VERSION: "3.10"
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Check out the codebase
|
||||||
|
uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v3 2.5.0
|
||||||
|
with:
|
||||||
|
ref: ${{ github.event.pull_request.head.sha }}
|
||||||
|
|
||||||
|
- name: Configure VirtualBox
|
||||||
|
run: |-
|
||||||
|
sudo mkdir -p /etc/vbox
|
||||||
|
cat <<EOF | sudo tee -a /etc/vbox/networks.conf > /dev/null
|
||||||
|
* 192.168.30.0/24
|
||||||
|
* fdad:bad:ba55::/64
|
||||||
|
EOF
|
||||||
|
|
||||||
|
- name: Cache pip
|
||||||
|
uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # 3.0.11
|
||||||
|
with:
|
||||||
|
path: ~/.cache/pip
|
||||||
|
key: ${{ runner.os }}-pip-${{ hashFiles('./requirements.txt') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-pip-
|
||||||
|
|
||||||
|
- name: Cache Vagrant boxes
|
||||||
|
uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # 3.0.11
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
~/.vagrant.d/boxes
|
||||||
|
key: vagrant-boxes-${{ hashFiles('**/molecule.yml') }}
|
||||||
|
restore-keys: |
|
||||||
|
vagrant-boxes
|
||||||
|
|
||||||
|
- name: Download Vagrant boxes for all scenarios
|
||||||
|
# To save some cache space, all scenarios share the same cache key.
|
||||||
|
# On the other hand, this means that the cache contents should be
|
||||||
|
# the same across all scenarios. This step ensures that.
|
||||||
|
run: ./.github/download-boxes.sh
|
||||||
|
|
||||||
|
- name: Set up Python ${{ env.PYTHON_VERSION }}
|
||||||
|
uses: actions/setup-python@75f3110429a8c05be0e1bf360334e4cced2b63fa # 2.3.3
|
||||||
|
with:
|
||||||
|
python-version: ${{ env.PYTHON_VERSION }}
|
||||||
|
cache: 'pip' # caching pip dependencies
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
echo "::group::Upgrade pip"
|
||||||
|
python3 -m pip install --upgrade pip
|
||||||
|
echo "::endgroup::"
|
||||||
|
|
||||||
|
echo "::group::Install Python requirements from requirements.txt"
|
||||||
|
python3 -m pip install -r requirements.txt
|
||||||
|
echo "::endgroup::"
|
||||||
|
|
||||||
|
- name: Test with molecule
|
||||||
|
run: molecule test --scenario-name ${{ matrix.scenario }}
|
||||||
|
timeout-minutes: 90
|
||||||
|
env:
|
||||||
|
ANSIBLE_K3S_LOG_DIR: ${{ runner.temp }}/logs/k3s-ansible/${{ matrix.scenario }}
|
||||||
|
ANSIBLE_SSH_RETRIES: 4
|
||||||
|
ANSIBLE_TIMEOUT: 60
|
||||||
|
PY_COLORS: 1
|
||||||
|
ANSIBLE_FORCE_COLOR: 1
|
||||||
|
|
||||||
|
- name: Upload log files
|
||||||
|
if: always() # do this even if a step before has failed
|
||||||
|
uses: actions/upload-artifact@83fd05a356d7e2593de66fc9913b3002723633cb # 3.1.1
|
||||||
|
with:
|
||||||
|
name: logs
|
||||||
|
path: |
|
||||||
|
${{ runner.temp }}/logs
|
||||||
|
|
||||||
|
- name: Delete old box versions
|
||||||
|
if: always() # do this even if a step before has failed
|
||||||
|
run: vagrant box prune --force
|
||||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -1 +1,3 @@
|
|||||||
.vagrant
|
.env/
|
||||||
|
*.log
|
||||||
|
ansible.cfg
|
||||||
|
|||||||
35
.pre-commit-config.yaml
Normal file
35
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
---
|
||||||
|
repos:
|
||||||
|
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||||
|
rev: 3298ddab3c13dd77d6ce1fc0baf97691430d84b0 # v4.3.0
|
||||||
|
hooks:
|
||||||
|
- id: requirements-txt-fixer
|
||||||
|
- id: sort-simple-yaml
|
||||||
|
- id: detect-private-key
|
||||||
|
- id: check-merge-conflict
|
||||||
|
- id: end-of-file-fixer
|
||||||
|
- id: mixed-line-ending
|
||||||
|
- id: trailing-whitespace
|
||||||
|
args: [--markdown-linebreak-ext=md]
|
||||||
|
- repo: https://github.com/adrienverge/yamllint.git
|
||||||
|
rev: 9cce2940414e9560ae4c8518ddaee2ac1863a4d2 # v1.28.0
|
||||||
|
hooks:
|
||||||
|
- id: yamllint
|
||||||
|
args: [-c=.yamllint]
|
||||||
|
- repo: https://github.com/ansible-community/ansible-lint.git
|
||||||
|
rev: a058554b9bcf88f12ad09ab9fb93b267a214368f # v6.8.6
|
||||||
|
hooks:
|
||||||
|
- id: ansible-lint
|
||||||
|
- repo: https://github.com/shellcheck-py/shellcheck-py
|
||||||
|
rev: 4c7c3dd7161ef39e984cb295e93a968236dc8e8a # v0.8.0.4
|
||||||
|
hooks:
|
||||||
|
- id: shellcheck
|
||||||
|
- repo: https://github.com/Lucas-C/pre-commit-hooks
|
||||||
|
rev: 04618e68aa2380828a36a23ff5f65a06ae8f59b9 # v1.3.1
|
||||||
|
hooks:
|
||||||
|
- id: remove-crlf
|
||||||
|
- id: remove-tabs
|
||||||
|
- repo: https://github.com/sirosen/texthooks
|
||||||
|
rev: 30d9af95631de0d7cff4e282bde9160d38bb0359 # 0.4.0
|
||||||
|
hooks:
|
||||||
|
- id: fix-smartquotes
|
||||||
2
LICENSE
2
LICENSE
@@ -174,4 +174,4 @@
|
|||||||
incurred by, or claims asserted against, such Contributor by reason
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
of your accepting any such warranty or additional liability.
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
END OF TERMS AND CONDITIONS
|
||||||
|
|||||||
35
README.md
35
README.md
@@ -10,15 +10,15 @@ If you want more context on how this works, see:
|
|||||||
|
|
||||||
📄 [Documentation](https://docs.technotim.live/posts/k3s-etcd-ansible/) (including example commands)
|
📄 [Documentation](https://docs.technotim.live/posts/k3s-etcd-ansible/) (including example commands)
|
||||||
|
|
||||||
📺 [Video](https://www.youtube.com/watch?v=CbkEWcUZ7zM)
|
📺 [Watch the Video](https://www.youtube.com/watch?v=CbkEWcUZ7zM)
|
||||||
|
|
||||||
## 📖 k3s Ansible Playbook
|
## 📖 k3s Ansible Playbook
|
||||||
|
|
||||||
Build a Kubernetes cluster using Ansible with k3s. The goal is easily install a HA Kubernetes cluster on machines running:
|
Build a Kubernetes cluster using Ansible with k3s. The goal is easily install a HA Kubernetes cluster on machines running:
|
||||||
|
|
||||||
- [X] Debian
|
- [x] Debian (tested on version 11)
|
||||||
- [X] Ubuntu
|
- [x] Ubuntu (tested on version 22.04)
|
||||||
- [X] CentOS
|
- [x] Rocky (tested on version 9)
|
||||||
|
|
||||||
on processor architecture:
|
on processor architecture:
|
||||||
|
|
||||||
@@ -28,7 +28,12 @@ on processor architecture:
|
|||||||
|
|
||||||
## ✅ System requirements
|
## ✅ System requirements
|
||||||
|
|
||||||
- Deployment environment must have Ansible 2.4.0+. If you need a quick primer on Ansible [you can check out my docs and setting up Ansible](https://docs.technotim.live/posts/ansible-automation/).
|
- Control Node (the machine you are running `ansible` commands) must have Ansible 2.11+ If you need a quick primer on Ansible [you can check out my docs and setting up Ansible](https://docs.technotim.live/posts/ansible-automation/).
|
||||||
|
|
||||||
|
- You will also need to install collections that this playbook uses by running `ansible-galaxy collection install -r ./collections/requirements.yml` (important❗)
|
||||||
|
|
||||||
|
- [`netaddr` package](https://pypi.org/project/netaddr/) must be available to Ansible. If you have installed Ansible via apt, this is already taken care of. If you have installed Ansible via `pip`, make sure to install `netaddr` into the respective virtual environment.
|
||||||
|
|
||||||
- `server` and `agent` nodes should have passwordless SSH access, if not you can supply arguments to provide credentials `--ask-pass --ask-become-pass` to each command.
|
- `server` and `agent` nodes should have passwordless SSH access, if not you can supply arguments to provide credentials `--ask-pass --ask-become-pass` to each command.
|
||||||
|
|
||||||
## 🚀 Getting Started
|
## 🚀 Getting Started
|
||||||
@@ -62,6 +67,8 @@ node
|
|||||||
|
|
||||||
If multiple hosts are in the master group, the playbook will automatically set up k3s in [HA mode with etcd](https://rancher.com/docs/k3s/latest/en/installation/ha-embedded/).
|
If multiple hosts are in the master group, the playbook will automatically set up k3s in [HA mode with etcd](https://rancher.com/docs/k3s/latest/en/installation/ha-embedded/).
|
||||||
|
|
||||||
|
Finally, copy `ansible.example.cfg` to `ansible.cfg` and adapt the inventory path to match the files that you just created.
|
||||||
|
|
||||||
This requires at least k3s version `1.19.1` however the version is configurable by using the `k3s_version` variable.
|
This requires at least k3s version `1.19.1` however the version is configurable by using the `k3s_version` variable.
|
||||||
|
|
||||||
If needed, you can also edit `inventory/my-cluster/group_vars/all.yml` to match your environment.
|
If needed, you can also edit `inventory/my-cluster/group_vars/all.yml` to match your environment.
|
||||||
@@ -100,18 +107,20 @@ See the commands [here](https://docs.technotim.live/posts/k3s-etcd-ansible/#test
|
|||||||
|
|
||||||
Be sure to see [this post](https://github.com/techno-tim/k3s-ansible/discussions/20) on how to troubleshoot common problems
|
Be sure to see [this post](https://github.com/techno-tim/k3s-ansible/discussions/20) on how to troubleshoot common problems
|
||||||
|
|
||||||
### 🔷 Vagrant
|
### Testing the playbook using molecule
|
||||||
|
|
||||||
You may want to kickstart your k3s cluster by using Vagrant to quickly build you all needed VMs with one command.
|
This playbook includes a [molecule](https://molecule.rtfd.io/)-based test setup.
|
||||||
Head to the `vagrant` subfolder and type `vagrant up` to get your environment setup.
|
It is run automatically in CI, but you can also run the tests locally.
|
||||||
After the VMs have got build, deploy k3s using the Ansible playbook `site.yml` by the
|
This might be helpful for quick feedback in a few cases.
|
||||||
`vagrant provision --provision-with ansible` command.
|
You can find more information about it [here](molecule/README.md).
|
||||||
|
|
||||||
|
### Pre-commit Hooks
|
||||||
|
|
||||||
|
This repo uses `pre-commit` and `pre-commit-hooks` to lint and fix common style and syntax errors. Be sure to install python packages and then run `pre-commit install`. For more information, see [pre-commit](https://pre-commit.com/)
|
||||||
|
|
||||||
## Thanks 🤝
|
## Thanks 🤝
|
||||||
|
|
||||||
This repo is really standing on the shoulders of giants. To all those who have contributed.
|
This repo is really standing on the shoulders of giants. Thank you to all those who have contributed and thanks to these repos for code and ideas:
|
||||||
|
|
||||||
Thanks to these repos for code and ideas:
|
|
||||||
|
|
||||||
- [k3s-io/k3s-ansible](https://github.com/k3s-io/k3s-ansible)
|
- [k3s-io/k3s-ansible](https://github.com/k3s-io/k3s-ansible)
|
||||||
- [geerlingguy/turing-pi-cluster](https://github.com/geerlingguy/turing-pi-cluster)
|
- [geerlingguy/turing-pi-cluster](https://github.com/geerlingguy/turing-pi-cluster)
|
||||||
|
|||||||
12
ansible.cfg
12
ansible.cfg
@@ -1,12 +0,0 @@
|
|||||||
[defaults]
|
|
||||||
nocows = True
|
|
||||||
roles_path = ./roles
|
|
||||||
inventory = ./hosts.ini
|
|
||||||
|
|
||||||
remote_tmp = $HOME/.ansible/tmp
|
|
||||||
local_tmp = $HOME/.ansible/tmp
|
|
||||||
pipelining = True
|
|
||||||
become = True
|
|
||||||
host_key_checking = False
|
|
||||||
deprecation_warnings = False
|
|
||||||
callback_whitelist = profile_tasks
|
|
||||||
2
ansible.example.cfg
Normal file
2
ansible.example.cfg
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
[defaults]
|
||||||
|
inventory = inventory/my-cluster/hosts.ini ; Adapt this to the path to your inventory file
|
||||||
@@ -1,3 +1,6 @@
|
|||||||
---
|
---
|
||||||
collections:
|
collections:
|
||||||
|
- name: ansible.utils
|
||||||
- name: community.general
|
- name: community.general
|
||||||
|
- name: ansible.posix
|
||||||
|
- name: kubernetes.core
|
||||||
|
|||||||
@@ -1,3 +1,3 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
ansible-playbook site.yml -i inventory/my-cluster/hosts.ini
|
ansible-playbook site.yml
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ kind: Service
|
|||||||
metadata:
|
metadata:
|
||||||
name: nginx
|
name: nginx
|
||||||
spec:
|
spec:
|
||||||
|
ipFamilyPolicy: PreferDualStack
|
||||||
selector:
|
selector:
|
||||||
app: nginx
|
app: nginx
|
||||||
ports:
|
ports:
|
||||||
|
|||||||
4
inventory/.gitignore
vendored
4
inventory/.gitignore
vendored
@@ -1,3 +1,3 @@
|
|||||||
*
|
/*
|
||||||
!.gitignore
|
!.gitignore
|
||||||
!sample/
|
!sample/
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
---
|
---
|
||||||
k3s_version: v1.23.4+k3s1
|
k3s_version: v1.24.11+k3s1
|
||||||
# this is the user that has ssh access to these machines
|
# this is the user that has ssh access to these machines
|
||||||
ansible_user: ansibleuser
|
ansible_user: ansibleuser
|
||||||
systemd_dir: /etc/systemd/system
|
systemd_dir: /etc/systemd/system
|
||||||
@@ -17,16 +17,68 @@ apiserver_endpoint: "192.168.30.222"
|
|||||||
# this token should be alpha numeric only
|
# this token should be alpha numeric only
|
||||||
k3s_token: "some-SUPER-DEDEUPER-secret-password"
|
k3s_token: "some-SUPER-DEDEUPER-secret-password"
|
||||||
|
|
||||||
# change these to your liking, the only required one is--no-deploy servicelb
|
# The IP on which the node is reachable in the cluster.
|
||||||
extra_server_args: "--no-deploy servicelb --no-deploy traefik"
|
# Here, a sensible default is provided, you can still override
|
||||||
extra_agent_args: ""
|
# it for each of your hosts, though.
|
||||||
|
k3s_node_ip: '{{ ansible_facts[flannel_iface]["ipv4"]["address"] }}'
|
||||||
|
|
||||||
|
# Disable the taint manually by setting: k3s_master_taint = false
|
||||||
|
k3s_master_taint: "{{ true if groups['node'] | default([]) | length >= 1 else false }}"
|
||||||
|
|
||||||
|
# these arguments are recommended for servers as well as agents:
|
||||||
|
extra_args: >-
|
||||||
|
--flannel-iface={{ flannel_iface }}
|
||||||
|
--node-ip={{ k3s_node_ip }}
|
||||||
|
|
||||||
|
# change these to your liking, the only required are: --disable servicelb, --tls-san {{ apiserver_endpoint }}
|
||||||
|
extra_server_args: >-
|
||||||
|
{{ extra_args }}
|
||||||
|
{{ '--node-taint node-role.kubernetes.io/master=true:NoSchedule' if k3s_master_taint else '' }}
|
||||||
|
--tls-san {{ apiserver_endpoint }}
|
||||||
|
--disable servicelb
|
||||||
|
--disable traefik
|
||||||
|
extra_agent_args: >-
|
||||||
|
{{ extra_args }}
|
||||||
|
|
||||||
# image tag for kube-vip
|
# image tag for kube-vip
|
||||||
kube_vip_tag_version: "v0.4.4"
|
kube_vip_tag_version: "v0.5.11"
|
||||||
|
|
||||||
|
# metallb type frr or native
|
||||||
|
metal_lb_type: "native"
|
||||||
|
|
||||||
|
# metallb mode layer2 or bgp
|
||||||
|
metal_lb_mode: "layer2"
|
||||||
|
|
||||||
|
# bgp options
|
||||||
|
# metal_lb_bgp_my_asn: "64513"
|
||||||
|
# metal_lb_bgp_peer_asn: "64512"
|
||||||
|
# metal_lb_bgp_peer_address: "192.168.30.1"
|
||||||
|
|
||||||
# image tag for metal lb
|
# image tag for metal lb
|
||||||
metal_lb_speaker_tag_version: "v0.12.1"
|
metal_lb_frr_tag_version: "v7.5.1"
|
||||||
metal_lb_controller_tag_version: "v0.12.1"
|
metal_lb_speaker_tag_version: "v0.13.9"
|
||||||
|
metal_lb_controller_tag_version: "v0.13.9"
|
||||||
|
|
||||||
# metallb ip range for load balancer
|
# metallb ip range for load balancer
|
||||||
metal_lb_ip_range: "192.168.30.80-192.168.30.90"
|
metal_lb_ip_range: "192.168.30.80-192.168.30.90"
|
||||||
|
|
||||||
|
# Only enable if your nodes are proxmox LXC nodes, make sure to configure your proxmox nodes
|
||||||
|
# in your hosts.ini file.
|
||||||
|
# Please read https://gist.github.com/triangletodd/02f595cd4c0dc9aac5f7763ca2264185 before using this.
|
||||||
|
# Most notably, your containers must be privileged, and must not have nesting set to true.
|
||||||
|
# Please note this script disables most of the security of lxc containers, with the trade off being that lxc
|
||||||
|
# containers are significantly more resource efficent compared to full VMs.
|
||||||
|
# Mixing and matching VMs and lxc containers is not supported, ymmv if you want to do this.
|
||||||
|
# I would only really recommend using this if you have partiularly low powered proxmox nodes where the overhead of
|
||||||
|
# VMs would use a significant portion of your available resources.
|
||||||
|
proxmox_lxc_configure: false
|
||||||
|
# the user that you would use to ssh into the host, for example if you run ssh some-user@my-proxmox-host,
|
||||||
|
# set this value to some-user
|
||||||
|
proxmox_lxc_ssh_user: root
|
||||||
|
# the unique proxmox ids for all of the containers in the cluster, both worker and master nodes
|
||||||
|
proxmox_lxc_ct_ids:
|
||||||
|
- 200
|
||||||
|
- 201
|
||||||
|
- 202
|
||||||
|
- 203
|
||||||
|
- 204
|
||||||
|
|||||||
@@ -7,6 +7,11 @@
|
|||||||
192.168.30.41
|
192.168.30.41
|
||||||
192.168.30.42
|
192.168.30.42
|
||||||
|
|
||||||
|
# only required if proxmox_lxc_configure: true
|
||||||
|
# must contain all proxmox instances that have a master or worker node
|
||||||
|
# [proxmox]
|
||||||
|
# 192.168.30.43
|
||||||
|
|
||||||
[k3s_cluster:children]
|
[k3s_cluster:children]
|
||||||
master
|
master
|
||||||
node
|
node
|
||||||
|
|||||||
73
molecule/README.md
Normal file
73
molecule/README.md
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
# Test suites for `k3s-ansible`
|
||||||
|
|
||||||
|
This folder contains the [molecule](https://molecule.rtfd.io/)-based test setup for this playbook.
|
||||||
|
|
||||||
|
## Scenarios
|
||||||
|
|
||||||
|
We have these scenarios:
|
||||||
|
|
||||||
|
- **default**:
|
||||||
|
A 3 control + 2 worker node cluster based very closely on the [sample inventory](../inventory/sample/).
|
||||||
|
- **ipv6**:
|
||||||
|
A cluster that is externally accessible via IPv6 ([more information](ipv6/README.md))
|
||||||
|
To save a bit of test time, this cluster is _not_ highly available, it consists of only one control and one worker node.
|
||||||
|
- **single_node**:
|
||||||
|
Very similar to the default scenario, but uses only a single node for all cluster functionality.
|
||||||
|
|
||||||
|
## How to execute
|
||||||
|
|
||||||
|
To test on your local machine, follow these steps:
|
||||||
|
|
||||||
|
### System requirements
|
||||||
|
|
||||||
|
Make sure that the following software packages are available on your system:
|
||||||
|
|
||||||
|
- [Python 3](https://www.python.org/downloads)
|
||||||
|
- [Vagrant](https://www.vagrantup.com/downloads)
|
||||||
|
- [VirtualBox](https://www.virtualbox.org/wiki/Downloads)
|
||||||
|
|
||||||
|
### Set up VirtualBox networking on Linux and macOS
|
||||||
|
|
||||||
|
_You can safely skip this if you are working on Windows._
|
||||||
|
|
||||||
|
Furthermore, the test cluster uses the `192.168.30.0/24` subnet which is [not set up by VirtualBox automatically](https://www.virtualbox.org/manual/ch06.html#network_hostonly).
|
||||||
|
To set the subnet up for use with VirtualBox, please make sure that `/etc/vbox/networks.conf` exists and that it contains this line:
|
||||||
|
|
||||||
|
```
|
||||||
|
* 192.168.30.0/24
|
||||||
|
* fdad:bad:ba55::/64
|
||||||
|
```
|
||||||
|
|
||||||
|
### Install Python dependencies
|
||||||
|
|
||||||
|
You will get [Molecule, Ansible and a few extra dependencies](../requirements.txt) via [pip](https://pip.pypa.io/).
|
||||||
|
Usually, it is advisable to work in a [virtual environment](https://docs.python.org/3/tutorial/venv.html) for this:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd /path/to/k3s-ansible
|
||||||
|
|
||||||
|
# Create a virtualenv at ".env". You only need to do this once.
|
||||||
|
python3 -m venv .env
|
||||||
|
|
||||||
|
# Activate the virtualenv for your current shell session.
|
||||||
|
# If you start a new session, you will have to repeat this.
|
||||||
|
source .env/bin/activate
|
||||||
|
|
||||||
|
# Install the required packages into the virtualenv.
|
||||||
|
# These remain installed across shell sessions.
|
||||||
|
python3 -m pip install -r requirements.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
### Run molecule
|
||||||
|
|
||||||
|
With the virtual environment from the previous step active in your shell session, you can now use molecule to test the playbook.
|
||||||
|
Interesting commands are:
|
||||||
|
|
||||||
|
- `molecule create`: Create virtual machines for the test cluster nodes.
|
||||||
|
- `molecule destroy`: Delete the virtual machines for the test cluster nodes.
|
||||||
|
- `molecule converge`: Run the `site` playbook on the nodes of the test cluster.
|
||||||
|
- `molecule side_effect`: Run the `reset` playbook on the nodes of the test cluster.
|
||||||
|
- `molecule verify`: Verify that the cluster works correctly.
|
||||||
|
- `molecule test`: The "all-in-one" sequence of steps that is executed in CI.
|
||||||
|
This includes the `create`, `converge`, `verify`, `side_effect` and `destroy` steps.
|
||||||
|
See [`molecule.yml`](default/molecule.yml) for more details.
|
||||||
99
molecule/default/molecule.yml
Normal file
99
molecule/default/molecule.yml
Normal file
@@ -0,0 +1,99 @@
|
|||||||
|
---
|
||||||
|
dependency:
|
||||||
|
name: galaxy
|
||||||
|
driver:
|
||||||
|
name: vagrant
|
||||||
|
platforms:
|
||||||
|
|
||||||
|
- name: control1
|
||||||
|
box: generic/ubuntu2204
|
||||||
|
memory: 2048
|
||||||
|
cpus: 2
|
||||||
|
groups:
|
||||||
|
- k3s_cluster
|
||||||
|
- master
|
||||||
|
interfaces:
|
||||||
|
- network_name: private_network
|
||||||
|
ip: 192.168.30.38
|
||||||
|
config_options:
|
||||||
|
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||||
|
# see: https://github.com/chef/bento/issues/1405
|
||||||
|
ssh.username: "vagrant"
|
||||||
|
ssh.password: "vagrant"
|
||||||
|
|
||||||
|
- name: control2
|
||||||
|
box: generic/debian11
|
||||||
|
memory: 2048
|
||||||
|
cpus: 2
|
||||||
|
groups:
|
||||||
|
- k3s_cluster
|
||||||
|
- master
|
||||||
|
interfaces:
|
||||||
|
- network_name: private_network
|
||||||
|
ip: 192.168.30.39
|
||||||
|
|
||||||
|
- name: control3
|
||||||
|
box: generic/rocky9
|
||||||
|
memory: 2048
|
||||||
|
cpus: 2
|
||||||
|
groups:
|
||||||
|
- k3s_cluster
|
||||||
|
- master
|
||||||
|
interfaces:
|
||||||
|
- network_name: private_network
|
||||||
|
ip: 192.168.30.40
|
||||||
|
|
||||||
|
- name: node1
|
||||||
|
box: generic/ubuntu2204
|
||||||
|
memory: 2048
|
||||||
|
cpus: 2
|
||||||
|
groups:
|
||||||
|
- k3s_cluster
|
||||||
|
- node
|
||||||
|
interfaces:
|
||||||
|
- network_name: private_network
|
||||||
|
ip: 192.168.30.41
|
||||||
|
config_options:
|
||||||
|
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||||
|
# see: https://github.com/chef/bento/issues/1405
|
||||||
|
ssh.username: "vagrant"
|
||||||
|
ssh.password: "vagrant"
|
||||||
|
|
||||||
|
- name: node2
|
||||||
|
box: generic/rocky9
|
||||||
|
memory: 2048
|
||||||
|
cpus: 2
|
||||||
|
groups:
|
||||||
|
- k3s_cluster
|
||||||
|
- node
|
||||||
|
interfaces:
|
||||||
|
- network_name: private_network
|
||||||
|
ip: 192.168.30.42
|
||||||
|
|
||||||
|
provisioner:
|
||||||
|
name: ansible
|
||||||
|
playbooks:
|
||||||
|
converge: ../resources/converge.yml
|
||||||
|
side_effect: ../resources/reset.yml
|
||||||
|
verify: ../resources/verify.yml
|
||||||
|
inventory:
|
||||||
|
links:
|
||||||
|
group_vars: ../../inventory/sample/group_vars
|
||||||
|
scenario:
|
||||||
|
test_sequence:
|
||||||
|
- dependency
|
||||||
|
- lint
|
||||||
|
- cleanup
|
||||||
|
- destroy
|
||||||
|
- syntax
|
||||||
|
- create
|
||||||
|
- prepare
|
||||||
|
- converge
|
||||||
|
# idempotence is not possible with the playbook in its current form.
|
||||||
|
- verify
|
||||||
|
# We are repurposing side_effect here to test the reset playbook.
|
||||||
|
# This is why we do not run it before verify (which tests the cluster),
|
||||||
|
# but after the verify step.
|
||||||
|
- side_effect
|
||||||
|
- cleanup
|
||||||
|
- destroy
|
||||||
11
molecule/default/overrides.yml
Normal file
11
molecule/default/overrides.yml
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
- name: Apply overrides
|
||||||
|
hosts: all
|
||||||
|
tasks:
|
||||||
|
- name: Override host variables
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
# See: https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant # noqa yaml[line-length]
|
||||||
|
flannel_iface: eth1
|
||||||
|
|
||||||
|
# The test VMs might be a bit slow, so we give them more time to join the cluster:
|
||||||
|
retry_count: 45
|
||||||
22
molecule/default/prepare.yml
Normal file
22
molecule/default/prepare.yml
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
---
|
||||||
|
- name: Apply overrides
|
||||||
|
ansible.builtin.import_playbook: >-
|
||||||
|
{{ lookup("ansible.builtin.env", "MOLECULE_SCENARIO_DIRECTORY") }}/overrides.yml
|
||||||
|
|
||||||
|
- name: Network setup
|
||||||
|
hosts: all
|
||||||
|
tasks:
|
||||||
|
- name: Disable firewalld
|
||||||
|
when: ansible_distribution == "Rocky"
|
||||||
|
# Rocky Linux comes with firewalld enabled. It blocks some of the network
|
||||||
|
# connections needed for our k3s cluster. For our test setup, we just disable
|
||||||
|
# it since the VM host's firewall is still active for connections to and from
|
||||||
|
# the Internet.
|
||||||
|
# When building your own cluster, please DO NOT blindly copy this. Instead,
|
||||||
|
# please create a custom firewall configuration that fits your network design
|
||||||
|
# and security needs.
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: firewalld
|
||||||
|
enabled: no
|
||||||
|
state: stopped
|
||||||
|
become: true
|
||||||
35
molecule/ipv6/README.md
Normal file
35
molecule/ipv6/README.md
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
# Sample IPv6 configuration for `k3s-ansible`
|
||||||
|
|
||||||
|
This scenario contains a cluster configuration which is _IPv6 first_, but still supports dual-stack networking with IPv4 for most things.
|
||||||
|
This means:
|
||||||
|
|
||||||
|
- The API server VIP is an IPv6 address.
|
||||||
|
- The MetalLB pool consists of both IPv4 and IPv4 addresses.
|
||||||
|
- Nodes as well as cluster-internal resources (pods and services) are accessible via IPv4 as well as IPv6.
|
||||||
|
|
||||||
|
## Network design
|
||||||
|
|
||||||
|
All IPv6 addresses used in this scenario share a single `/48` prefix: `fdad:bad:ba55`.
|
||||||
|
The following subnets are used:
|
||||||
|
|
||||||
|
- `fdad:bad:ba55:`**`0`**`::/64` is the subnet which contains the cluster components meant for external access.
|
||||||
|
That includes:
|
||||||
|
|
||||||
|
- The VIP for the Kubernetes API server: `fdad:bad:ba55::333`
|
||||||
|
- Services load-balanced by MetalLB: `fdad:bad:ba55::1b:0/112`
|
||||||
|
- Cluster nodes: `fdad:bad:ba55::de:0/112`
|
||||||
|
- The host executing Vagrant: `fdad:bad:ba55::1`
|
||||||
|
|
||||||
|
In a home lab setup, this might be your LAN.
|
||||||
|
|
||||||
|
- `fdad:bad:ba55:`**`4200`**`::/56` is used internally by the cluster for pods.
|
||||||
|
|
||||||
|
- `fdad:bad:ba55:`**`4300`**`::/108` is used internally by the cluster for services.
|
||||||
|
|
||||||
|
IPv4 networking is also available:
|
||||||
|
|
||||||
|
- The nodes have addresses inside `192.168.123.0/24`.
|
||||||
|
MetalLB also has a bit of address space in this range: `192.168.123.80-192.168.123.90`
|
||||||
|
- For pods and services, the k3s defaults (`10.42.0.0/16` and `10.43.0.0/16)` are used.
|
||||||
|
|
||||||
|
Note that the host running Vagrant is not part any of these IPv4 networks.
|
||||||
3
molecule/ipv6/host_vars/control1.yml
Normal file
3
molecule/ipv6/host_vars/control1.yml
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
---
|
||||||
|
node_ipv4: 192.168.123.11
|
||||||
|
node_ipv6: fdad:bad:ba55::de:11
|
||||||
3
molecule/ipv6/host_vars/control2.yml
Normal file
3
molecule/ipv6/host_vars/control2.yml
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
---
|
||||||
|
node_ipv4: 192.168.123.12
|
||||||
|
node_ipv6: fdad:bad:ba55::de:12
|
||||||
3
molecule/ipv6/host_vars/node1.yml
Normal file
3
molecule/ipv6/host_vars/node1.yml
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
---
|
||||||
|
node_ipv4: 192.168.123.21
|
||||||
|
node_ipv6: fdad:bad:ba55::de:21
|
||||||
80
molecule/ipv6/molecule.yml
Normal file
80
molecule/ipv6/molecule.yml
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
---
|
||||||
|
dependency:
|
||||||
|
name: galaxy
|
||||||
|
driver:
|
||||||
|
name: vagrant
|
||||||
|
platforms:
|
||||||
|
- name: control1
|
||||||
|
box: generic/ubuntu2204
|
||||||
|
memory: 2048
|
||||||
|
cpus: 2
|
||||||
|
groups:
|
||||||
|
- k3s_cluster
|
||||||
|
- master
|
||||||
|
interfaces:
|
||||||
|
- network_name: private_network
|
||||||
|
ip: fdad:bad:ba55::de:11
|
||||||
|
config_options:
|
||||||
|
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||||
|
# see: https://github.com/chef/bento/issues/1405
|
||||||
|
ssh.username: "vagrant"
|
||||||
|
ssh.password: "vagrant"
|
||||||
|
|
||||||
|
- name: control2
|
||||||
|
box: generic/ubuntu2204
|
||||||
|
memory: 2048
|
||||||
|
cpus: 2
|
||||||
|
groups:
|
||||||
|
- k3s_cluster
|
||||||
|
- master
|
||||||
|
interfaces:
|
||||||
|
- network_name: private_network
|
||||||
|
ip: fdad:bad:ba55::de:12
|
||||||
|
config_options:
|
||||||
|
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||||
|
# see: https://github.com/chef/bento/issues/1405
|
||||||
|
ssh.username: "vagrant"
|
||||||
|
ssh.password: "vagrant"
|
||||||
|
|
||||||
|
- name: node1
|
||||||
|
box: generic/ubuntu2204
|
||||||
|
memory: 2048
|
||||||
|
cpus: 2
|
||||||
|
groups:
|
||||||
|
- k3s_cluster
|
||||||
|
- node
|
||||||
|
interfaces:
|
||||||
|
- network_name: private_network
|
||||||
|
ip: fdad:bad:ba55::de:21
|
||||||
|
config_options:
|
||||||
|
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||||
|
# see: https://github.com/chef/bento/issues/1405
|
||||||
|
ssh.username: "vagrant"
|
||||||
|
ssh.password: "vagrant"
|
||||||
|
provisioner:
|
||||||
|
name: ansible
|
||||||
|
playbooks:
|
||||||
|
converge: ../resources/converge.yml
|
||||||
|
side_effect: ../resources/reset.yml
|
||||||
|
verify: ../resources/verify.yml
|
||||||
|
inventory:
|
||||||
|
links:
|
||||||
|
group_vars: ../../inventory/sample/group_vars
|
||||||
|
scenario:
|
||||||
|
test_sequence:
|
||||||
|
- dependency
|
||||||
|
- lint
|
||||||
|
- cleanup
|
||||||
|
- destroy
|
||||||
|
- syntax
|
||||||
|
- create
|
||||||
|
- prepare
|
||||||
|
- converge
|
||||||
|
# idempotence is not possible with the playbook in its current form.
|
||||||
|
- verify
|
||||||
|
# We are repurposing side_effect here to test the reset playbook.
|
||||||
|
# This is why we do not run it before verify (which tests the cluster),
|
||||||
|
# but after the verify step.
|
||||||
|
- side_effect
|
||||||
|
- cleanup
|
||||||
|
- destroy
|
||||||
50
molecule/ipv6/overrides.yml
Normal file
50
molecule/ipv6/overrides.yml
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
---
|
||||||
|
- name: Apply overrides
|
||||||
|
hosts: all
|
||||||
|
tasks:
|
||||||
|
- name: Override host variables (1/2)
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
# See: https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant # noqa yaml[line-length]
|
||||||
|
flannel_iface: eth1
|
||||||
|
|
||||||
|
# In this scenario, we have multiple interfaces that the VIP could be
|
||||||
|
# broadcasted on. Since we have assigned a dedicated private network
|
||||||
|
# here, let's make sure that it is used.
|
||||||
|
kube_vip_iface: eth1
|
||||||
|
|
||||||
|
# The test VMs might be a bit slow, so we give them more time to join the cluster:
|
||||||
|
retry_count: 45
|
||||||
|
|
||||||
|
# IPv6 configuration
|
||||||
|
# ######################################################################
|
||||||
|
|
||||||
|
# The API server will be reachable on IPv6 only
|
||||||
|
apiserver_endpoint: fdad:bad:ba55::333
|
||||||
|
|
||||||
|
# We give MetalLB address space for both IPv4 and IPv6
|
||||||
|
metal_lb_ip_range:
|
||||||
|
- fdad:bad:ba55::1b:0/112
|
||||||
|
- 192.168.123.80-192.168.123.90
|
||||||
|
|
||||||
|
# k3s_node_ip is by default set to the IPv4 address of flannel_iface.
|
||||||
|
# We want IPv6 addresses here of course, so we just specify them
|
||||||
|
# manually below.
|
||||||
|
k3s_node_ip: "{{ node_ipv4 }},{{ node_ipv6 }}"
|
||||||
|
|
||||||
|
- name: Override host variables (2/2)
|
||||||
|
# Since "extra_args" depends on "k3s_node_ip" and "flannel_iface" we have
|
||||||
|
# to set this AFTER overriding the both of them.
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
# A few extra server args are necessary:
|
||||||
|
# - the network policy needs to be disabled.
|
||||||
|
# - we need to manually specify the subnets for services and pods, as
|
||||||
|
# the default has IPv4 ranges only.
|
||||||
|
extra_server_args: >-
|
||||||
|
{{ extra_args }}
|
||||||
|
--tls-san {{ apiserver_endpoint }}
|
||||||
|
{{ '--node-taint node-role.kubernetes.io/master=true:NoSchedule' if k3s_master_taint else '' }}
|
||||||
|
--disable servicelb
|
||||||
|
--disable traefik
|
||||||
|
--disable-network-policy
|
||||||
|
--cluster-cidr=10.42.0.0/16,fdad:bad:ba55:4200::/56
|
||||||
|
--service-cidr=10.43.0.0/16,fdad:bad:ba55:4300::/108
|
||||||
51
molecule/ipv6/prepare.yml
Normal file
51
molecule/ipv6/prepare.yml
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
---
|
||||||
|
- name: Apply overrides
|
||||||
|
ansible.builtin.import_playbook: >-
|
||||||
|
{{ lookup("ansible.builtin.env", "MOLECULE_SCENARIO_DIRECTORY") }}/overrides.yml
|
||||||
|
|
||||||
|
- name: Configure dual-stack networking
|
||||||
|
hosts: all
|
||||||
|
become: true
|
||||||
|
|
||||||
|
# Unfortunately, as of 2022-09, Vagrant does not support the configuration
|
||||||
|
# of both IPv4 and IPv6 addresses for a single network adapter. So we have
|
||||||
|
# to configure that ourselves.
|
||||||
|
# Moreover, we have to explicitly enable IPv6 for the loopback interface.
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Enable IPv6 for network interfaces
|
||||||
|
ansible.posix.sysctl:
|
||||||
|
name: net.ipv6.conf.{{ item }}.disable_ipv6
|
||||||
|
value: "0"
|
||||||
|
with_items:
|
||||||
|
- all
|
||||||
|
- default
|
||||||
|
- lo
|
||||||
|
|
||||||
|
- name: Disable duplicate address detection
|
||||||
|
# Duplicate address detection did repeatedly fail within the virtual
|
||||||
|
# network. But since this setup does not use SLAAC anyway, we can safely
|
||||||
|
# disable it.
|
||||||
|
ansible.posix.sysctl:
|
||||||
|
name: net.ipv6.conf.{{ item }}.accept_dad
|
||||||
|
value: "0"
|
||||||
|
with_items:
|
||||||
|
- "{{ flannel_iface }}"
|
||||||
|
|
||||||
|
- name: Write IPv4 configuration
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: 55-flannel-ipv4.yaml.j2
|
||||||
|
dest: /etc/netplan/55-flannel-ipv4.yaml
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0644
|
||||||
|
register: netplan_template
|
||||||
|
|
||||||
|
- name: Apply netplan configuration
|
||||||
|
# Conceptually, this should be a handler rather than a task.
|
||||||
|
# However, we are currently not in a role context - creating
|
||||||
|
# one just for this seemed overkill.
|
||||||
|
when: netplan_template.changed
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: netplan apply
|
||||||
|
changed_when: true
|
||||||
8
molecule/ipv6/templates/55-flannel-ipv4.yaml.j2
Normal file
8
molecule/ipv6/templates/55-flannel-ipv4.yaml.j2
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
---
|
||||||
|
network:
|
||||||
|
version: 2
|
||||||
|
renderer: networkd
|
||||||
|
ethernets:
|
||||||
|
{{ flannel_iface }}:
|
||||||
|
addresses:
|
||||||
|
- {{ node_ipv4 }}/24
|
||||||
7
molecule/resources/converge.yml
Normal file
7
molecule/resources/converge.yml
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
---
|
||||||
|
- name: Apply overrides
|
||||||
|
ansible.builtin.import_playbook: >-
|
||||||
|
{{ lookup("ansible.builtin.env", "MOLECULE_SCENARIO_DIRECTORY") }}/overrides.yml
|
||||||
|
|
||||||
|
- name: Converge
|
||||||
|
ansible.builtin.import_playbook: ../../site.yml
|
||||||
7
molecule/resources/reset.yml
Normal file
7
molecule/resources/reset.yml
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
---
|
||||||
|
- name: Apply overrides
|
||||||
|
ansible.builtin.import_playbook: >-
|
||||||
|
{{ lookup("ansible.builtin.env", "MOLECULE_SCENARIO_DIRECTORY") }}/overrides.yml
|
||||||
|
|
||||||
|
- name: Reset
|
||||||
|
ansible.builtin.import_playbook: ../../reset.yml
|
||||||
5
molecule/resources/verify.yml
Normal file
5
molecule/resources/verify.yml
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
- name: Verify
|
||||||
|
hosts: all
|
||||||
|
roles:
|
||||||
|
- verify/from_outside
|
||||||
9
molecule/resources/verify/from_outside/defaults/main.yml
Normal file
9
molecule/resources/verify/from_outside/defaults/main.yml
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
# A host outside of the cluster from which the checks shall be performed
|
||||||
|
outside_host: localhost
|
||||||
|
|
||||||
|
# This kubernetes namespace will be used for testing
|
||||||
|
testing_namespace: molecule-verify-from-outside
|
||||||
|
|
||||||
|
# The directory in which the example manifests reside
|
||||||
|
example_manifests_path: ../../../../example
|
||||||
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
- name: Clean up kubecfg
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ kubecfg.path }}"
|
||||||
|
state: absent
|
||||||
@@ -0,0 +1,19 @@
|
|||||||
|
---
|
||||||
|
- name: Create temporary directory for kubecfg
|
||||||
|
ansible.builtin.tempfile:
|
||||||
|
state: directory
|
||||||
|
suffix: kubecfg
|
||||||
|
register: kubecfg
|
||||||
|
- name: Gathering facts
|
||||||
|
delegate_to: "{{ groups['master'][0] }}"
|
||||||
|
ansible.builtin.gather_facts:
|
||||||
|
- name: Download kubecfg
|
||||||
|
ansible.builtin.fetch:
|
||||||
|
src: "{{ ansible_env.HOME }}/.kube/config"
|
||||||
|
dest: "{{ kubecfg.path }}/"
|
||||||
|
flat: true
|
||||||
|
delegate_to: "{{ groups['master'][0] }}"
|
||||||
|
delegate_facts: true
|
||||||
|
- name: Store path to kubecfg
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
kubecfg_path: "{{ kubecfg.path }}/config"
|
||||||
14
molecule/resources/verify/from_outside/tasks/main.yml
Normal file
14
molecule/resources/verify/from_outside/tasks/main.yml
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
---
|
||||||
|
- name: Verify
|
||||||
|
run_once: true
|
||||||
|
delegate_to: "{{ outside_host }}"
|
||||||
|
block:
|
||||||
|
- name: "Test CASE: Get kube config"
|
||||||
|
ansible.builtin.import_tasks: kubecfg-fetch.yml
|
||||||
|
- name: "TEST CASE: Get nodes"
|
||||||
|
ansible.builtin.include_tasks: test/get-nodes.yml
|
||||||
|
- name: "TEST CASE: Deploy example"
|
||||||
|
ansible.builtin.include_tasks: test/deploy-example.yml
|
||||||
|
always:
|
||||||
|
- name: "TEST CASE: Cleanup"
|
||||||
|
ansible.builtin.import_tasks: kubecfg-cleanup.yml
|
||||||
@@ -0,0 +1,58 @@
|
|||||||
|
---
|
||||||
|
- name: Deploy example
|
||||||
|
block:
|
||||||
|
- name: "Create namespace: {{ testing_namespace }}"
|
||||||
|
kubernetes.core.k8s:
|
||||||
|
api_version: v1
|
||||||
|
kind: Namespace
|
||||||
|
name: "{{ testing_namespace }}"
|
||||||
|
state: present
|
||||||
|
wait: true
|
||||||
|
kubeconfig: "{{ kubecfg_path }}"
|
||||||
|
|
||||||
|
- name: Apply example manifests
|
||||||
|
kubernetes.core.k8s:
|
||||||
|
src: "{{ example_manifests_path }}/{{ item }}"
|
||||||
|
namespace: "{{ testing_namespace }}"
|
||||||
|
state: present
|
||||||
|
wait: true
|
||||||
|
kubeconfig: "{{ kubecfg_path }}"
|
||||||
|
with_items:
|
||||||
|
- deployment.yml
|
||||||
|
- service.yml
|
||||||
|
|
||||||
|
- name: Get info about nginx service
|
||||||
|
kubernetes.core.k8s_info:
|
||||||
|
kind: service
|
||||||
|
name: nginx
|
||||||
|
namespace: "{{ testing_namespace }}"
|
||||||
|
kubeconfig: "{{ kubecfg_path }}"
|
||||||
|
vars: &load_balancer_metadata
|
||||||
|
metallb_ip: status.loadBalancer.ingress[0].ip
|
||||||
|
metallb_port: spec.ports[0].port
|
||||||
|
register: nginx_services
|
||||||
|
|
||||||
|
- name: Assert that the nginx welcome page is available
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: http://{{ ip | ansible.utils.ipwrap }}:{{ port }}/
|
||||||
|
return_content: yes
|
||||||
|
register: result
|
||||||
|
failed_when: "'Welcome to nginx!' not in result.content"
|
||||||
|
vars:
|
||||||
|
ip: >-
|
||||||
|
{{ nginx_services.resources[0].status.loadBalancer.ingress[0].ip }}
|
||||||
|
port: >-
|
||||||
|
{{ nginx_services.resources[0].spec.ports[0].port }}
|
||||||
|
# Deactivated linter rules:
|
||||||
|
# - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap
|
||||||
|
# would be undefined. This will not be the case during playbook execution.
|
||||||
|
# noqa jinja[invalid]
|
||||||
|
|
||||||
|
always:
|
||||||
|
- name: "Remove namespace: {{ testing_namespace }}"
|
||||||
|
kubernetes.core.k8s:
|
||||||
|
api_version: v1
|
||||||
|
kind: Namespace
|
||||||
|
name: "{{ testing_namespace }}"
|
||||||
|
state: absent
|
||||||
|
kubeconfig: "{{ kubecfg_path }}"
|
||||||
@@ -0,0 +1,28 @@
|
|||||||
|
---
|
||||||
|
- name: Get all nodes in cluster
|
||||||
|
kubernetes.core.k8s_info:
|
||||||
|
kind: node
|
||||||
|
kubeconfig: "{{ kubecfg_path }}"
|
||||||
|
register: cluster_nodes
|
||||||
|
|
||||||
|
- name: Assert that the cluster contains exactly the expected nodes
|
||||||
|
ansible.builtin.assert:
|
||||||
|
that: found_nodes == expected_nodes
|
||||||
|
success_msg: "Found nodes as expected: {{ found_nodes }}"
|
||||||
|
fail_msg: "Expected nodes {{ expected_nodes }}, but found nodes {{ found_nodes }}"
|
||||||
|
vars:
|
||||||
|
found_nodes: >-
|
||||||
|
{{ cluster_nodes | json_query('resources[*].metadata.name') | unique | sort }}
|
||||||
|
expected_nodes: |-
|
||||||
|
{{
|
||||||
|
(
|
||||||
|
( groups['master'] | default([]) ) +
|
||||||
|
( groups['node'] | default([]) )
|
||||||
|
)
|
||||||
|
| unique
|
||||||
|
| sort
|
||||||
|
}}
|
||||||
|
# Deactivated linter rules:
|
||||||
|
# - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap
|
||||||
|
# would be undefined. This will not be the case during playbook execution.
|
||||||
|
# noqa jinja[invalid]
|
||||||
48
molecule/single_node/molecule.yml
Normal file
48
molecule/single_node/molecule.yml
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
---
|
||||||
|
dependency:
|
||||||
|
name: galaxy
|
||||||
|
driver:
|
||||||
|
name: vagrant
|
||||||
|
platforms:
|
||||||
|
- name: control1
|
||||||
|
box: generic/ubuntu2204
|
||||||
|
memory: 4096
|
||||||
|
cpus: 4
|
||||||
|
config_options:
|
||||||
|
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||||
|
# see: https://github.com/chef/bento/issues/1405
|
||||||
|
ssh.username: "vagrant"
|
||||||
|
ssh.password: "vagrant"
|
||||||
|
groups:
|
||||||
|
- k3s_cluster
|
||||||
|
- master
|
||||||
|
interfaces:
|
||||||
|
- network_name: private_network
|
||||||
|
ip: 192.168.30.50
|
||||||
|
provisioner:
|
||||||
|
name: ansible
|
||||||
|
playbooks:
|
||||||
|
converge: ../resources/converge.yml
|
||||||
|
side_effect: ../resources/reset.yml
|
||||||
|
verify: ../resources/verify.yml
|
||||||
|
inventory:
|
||||||
|
links:
|
||||||
|
group_vars: ../../inventory/sample/group_vars
|
||||||
|
scenario:
|
||||||
|
test_sequence:
|
||||||
|
- dependency
|
||||||
|
- lint
|
||||||
|
- cleanup
|
||||||
|
- destroy
|
||||||
|
- syntax
|
||||||
|
- create
|
||||||
|
- prepare
|
||||||
|
- converge
|
||||||
|
# idempotence is not possible with the playbook in its current form.
|
||||||
|
- verify
|
||||||
|
# We are repurposing side_effect here to test the reset playbook.
|
||||||
|
# This is why we do not run it before verify (which tests the cluster),
|
||||||
|
# but after the verify step.
|
||||||
|
- side_effect
|
||||||
|
- cleanup
|
||||||
|
- destroy
|
||||||
15
molecule/single_node/overrides.yml
Normal file
15
molecule/single_node/overrides.yml
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
---
|
||||||
|
- name: Apply overrides
|
||||||
|
hosts: all
|
||||||
|
tasks:
|
||||||
|
- name: Override host variables
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
# See: https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant # noqa yaml[line-length]
|
||||||
|
flannel_iface: eth1
|
||||||
|
|
||||||
|
# The test VMs might be a bit slow, so we give them more time to join the cluster:
|
||||||
|
retry_count: 45
|
||||||
|
|
||||||
|
# Make sure that our IP ranges do not collide with those of the default scenario
|
||||||
|
apiserver_endpoint: "192.168.30.223"
|
||||||
|
metal_lb_ip_range: "192.168.30.91-192.168.30.99"
|
||||||
9
reboot.yml
Normal file
9
reboot.yml
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
- name: Reboot k3s_cluster
|
||||||
|
hosts: k3s_cluster
|
||||||
|
gather_facts: yes
|
||||||
|
tasks:
|
||||||
|
- name: Reboot the nodes (and Wait upto 5 mins max)
|
||||||
|
become: true
|
||||||
|
reboot:
|
||||||
|
reboot_timeout: 300
|
||||||
12
requirements.in
Normal file
12
requirements.in
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
ansible-core>=2.13.5
|
||||||
|
ansible-lint>=6.8.6
|
||||||
|
jmespath>=1.0.1
|
||||||
|
jsonpatch>=1.32
|
||||||
|
kubernetes>=25.3.0
|
||||||
|
molecule-vagrant>=1.0.0
|
||||||
|
molecule>=4.0.3
|
||||||
|
netaddr>=0.8.0
|
||||||
|
pre-commit>=2.20.0
|
||||||
|
pre-commit-hooks>=1.3.1
|
||||||
|
pyyaml>=6.0
|
||||||
|
yamllint>=1.28.0
|
||||||
211
requirements.txt
Normal file
211
requirements.txt
Normal file
@@ -0,0 +1,211 @@
|
|||||||
|
#
|
||||||
|
# This file is autogenerated by pip-compile with python 3.8
|
||||||
|
# To update, run:
|
||||||
|
#
|
||||||
|
# pip-compile requirements.in
|
||||||
|
#
|
||||||
|
ansible-compat==3.0.1
|
||||||
|
# via molecule
|
||||||
|
ansible-core==2.14.3
|
||||||
|
# via
|
||||||
|
# -r requirements.in
|
||||||
|
# ansible-compat
|
||||||
|
# ansible-lint
|
||||||
|
ansible-lint==6.14.2
|
||||||
|
# via -r requirements.in
|
||||||
|
arrow==1.2.3
|
||||||
|
# via jinja2-time
|
||||||
|
attrs==22.1.0
|
||||||
|
# via jsonschema
|
||||||
|
binaryornot==0.4.4
|
||||||
|
# via cookiecutter
|
||||||
|
black==22.10.0
|
||||||
|
# via ansible-lint
|
||||||
|
bracex==2.3.post1
|
||||||
|
# via wcmatch
|
||||||
|
cachetools==5.2.0
|
||||||
|
# via google-auth
|
||||||
|
certifi==2022.9.24
|
||||||
|
# via
|
||||||
|
# kubernetes
|
||||||
|
# requests
|
||||||
|
cffi==1.15.1
|
||||||
|
# via cryptography
|
||||||
|
cfgv==3.3.1
|
||||||
|
# via pre-commit
|
||||||
|
chardet==5.0.0
|
||||||
|
# via binaryornot
|
||||||
|
charset-normalizer==2.1.1
|
||||||
|
# via requests
|
||||||
|
click==8.1.3
|
||||||
|
# via
|
||||||
|
# black
|
||||||
|
# click-help-colors
|
||||||
|
# cookiecutter
|
||||||
|
# molecule
|
||||||
|
click-help-colors==0.9.1
|
||||||
|
# via molecule
|
||||||
|
commonmark==0.9.1
|
||||||
|
# via rich
|
||||||
|
cookiecutter==2.1.1
|
||||||
|
# via molecule
|
||||||
|
cryptography==38.0.3
|
||||||
|
# via ansible-core
|
||||||
|
distlib==0.3.6
|
||||||
|
# via virtualenv
|
||||||
|
distro==1.8.0
|
||||||
|
# via selinux
|
||||||
|
enrich==1.2.7
|
||||||
|
# via molecule
|
||||||
|
filelock==3.8.0
|
||||||
|
# via
|
||||||
|
# ansible-lint
|
||||||
|
# virtualenv
|
||||||
|
google-auth==2.14.0
|
||||||
|
# via kubernetes
|
||||||
|
identify==2.5.8
|
||||||
|
# via pre-commit
|
||||||
|
idna==3.4
|
||||||
|
# via requests
|
||||||
|
jinja2==3.1.2
|
||||||
|
# via
|
||||||
|
# ansible-core
|
||||||
|
# cookiecutter
|
||||||
|
# jinja2-time
|
||||||
|
# molecule
|
||||||
|
# molecule-vagrant
|
||||||
|
jinja2-time==0.2.0
|
||||||
|
# via cookiecutter
|
||||||
|
jmespath==1.0.1
|
||||||
|
# via -r requirements.in
|
||||||
|
jsonpatch==1.32
|
||||||
|
# via -r requirements.in
|
||||||
|
jsonpointer==2.3
|
||||||
|
# via jsonpatch
|
||||||
|
jsonschema==4.17.0
|
||||||
|
# via
|
||||||
|
# ansible-compat
|
||||||
|
# ansible-lint
|
||||||
|
# molecule
|
||||||
|
kubernetes==25.3.0
|
||||||
|
# via -r requirements.in
|
||||||
|
markupsafe==2.1.1
|
||||||
|
# via jinja2
|
||||||
|
molecule==4.0.4
|
||||||
|
# via
|
||||||
|
# -r requirements.in
|
||||||
|
# molecule-vagrant
|
||||||
|
molecule-vagrant==1.0.0
|
||||||
|
# via -r requirements.in
|
||||||
|
mypy-extensions==0.4.3
|
||||||
|
# via black
|
||||||
|
netaddr==0.8.0
|
||||||
|
# via -r requirements.in
|
||||||
|
nodeenv==1.7.0
|
||||||
|
# via pre-commit
|
||||||
|
oauthlib==3.2.2
|
||||||
|
# via requests-oauthlib
|
||||||
|
packaging==21.3
|
||||||
|
# via
|
||||||
|
# ansible-compat
|
||||||
|
# ansible-core
|
||||||
|
# ansible-lint
|
||||||
|
# molecule
|
||||||
|
pathspec==0.10.1
|
||||||
|
# via
|
||||||
|
# black
|
||||||
|
# yamllint
|
||||||
|
platformdirs==2.5.2
|
||||||
|
# via
|
||||||
|
# black
|
||||||
|
# virtualenv
|
||||||
|
pluggy==1.0.0
|
||||||
|
# via molecule
|
||||||
|
pre-commit==2.21.0
|
||||||
|
# via -r requirements.in
|
||||||
|
pre-commit-hooks==4.4.0
|
||||||
|
# via -r requirements.in
|
||||||
|
pyasn1==0.4.8
|
||||||
|
# via
|
||||||
|
# pyasn1-modules
|
||||||
|
# rsa
|
||||||
|
pyasn1-modules==0.2.8
|
||||||
|
# via google-auth
|
||||||
|
pycparser==2.21
|
||||||
|
# via cffi
|
||||||
|
pygments==2.13.0
|
||||||
|
# via rich
|
||||||
|
pyparsing==3.0.9
|
||||||
|
# via packaging
|
||||||
|
pyrsistent==0.19.2
|
||||||
|
# via jsonschema
|
||||||
|
python-dateutil==2.8.2
|
||||||
|
# via
|
||||||
|
# arrow
|
||||||
|
# kubernetes
|
||||||
|
python-slugify==6.1.2
|
||||||
|
# via cookiecutter
|
||||||
|
python-vagrant==1.0.0
|
||||||
|
# via molecule-vagrant
|
||||||
|
pyyaml==6.0
|
||||||
|
# via
|
||||||
|
# -r requirements.in
|
||||||
|
# ansible-compat
|
||||||
|
# ansible-core
|
||||||
|
# ansible-lint
|
||||||
|
# cookiecutter
|
||||||
|
# kubernetes
|
||||||
|
# molecule
|
||||||
|
# molecule-vagrant
|
||||||
|
# pre-commit
|
||||||
|
# yamllint
|
||||||
|
requests==2.28.1
|
||||||
|
# via
|
||||||
|
# cookiecutter
|
||||||
|
# kubernetes
|
||||||
|
# requests-oauthlib
|
||||||
|
requests-oauthlib==1.3.1
|
||||||
|
# via kubernetes
|
||||||
|
resolvelib==0.8.1
|
||||||
|
# via ansible-core
|
||||||
|
rich==12.6.0
|
||||||
|
# via
|
||||||
|
# ansible-lint
|
||||||
|
# enrich
|
||||||
|
# molecule
|
||||||
|
rsa==4.9
|
||||||
|
# via google-auth
|
||||||
|
ruamel-yaml==0.17.21
|
||||||
|
# via
|
||||||
|
# ansible-lint
|
||||||
|
# pre-commit-hooks
|
||||||
|
selinux==0.2.1
|
||||||
|
# via molecule-vagrant
|
||||||
|
six==1.16.0
|
||||||
|
# via
|
||||||
|
# google-auth
|
||||||
|
# kubernetes
|
||||||
|
# python-dateutil
|
||||||
|
subprocess-tee==0.4.1
|
||||||
|
# via
|
||||||
|
# ansible-compat
|
||||||
|
# ansible-lint
|
||||||
|
text-unidecode==1.3
|
||||||
|
# via python-slugify
|
||||||
|
urllib3==1.26.12
|
||||||
|
# via
|
||||||
|
# kubernetes
|
||||||
|
# requests
|
||||||
|
virtualenv==20.16.6
|
||||||
|
# via pre-commit
|
||||||
|
wcmatch==8.4.1
|
||||||
|
# via ansible-lint
|
||||||
|
websocket-client==1.4.2
|
||||||
|
# via kubernetes
|
||||||
|
yamllint==1.29.0
|
||||||
|
# via
|
||||||
|
# -r requirements.in
|
||||||
|
# ansible-lint
|
||||||
|
|
||||||
|
# The following packages are considered to be unsafe in a requirements file:
|
||||||
|
# setuptools
|
||||||
2
reset.sh
2
reset.sh
@@ -1,3 +1,3 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
ansible-playbook reset.yml -i inventory/my-cluster/hosts.ini
|
ansible-playbook reset.yml
|
||||||
|
|||||||
18
reset.yml
18
reset.yml
@@ -2,6 +2,22 @@
|
|||||||
|
|
||||||
- hosts: k3s_cluster
|
- hosts: k3s_cluster
|
||||||
gather_facts: yes
|
gather_facts: yes
|
||||||
become: yes
|
|
||||||
roles:
|
roles:
|
||||||
- role: reset
|
- role: reset
|
||||||
|
become: true
|
||||||
|
- role: raspberrypi
|
||||||
|
become: true
|
||||||
|
vars: {state: absent}
|
||||||
|
post_tasks:
|
||||||
|
- name: Reboot and wait for node to come back up
|
||||||
|
become: true
|
||||||
|
reboot:
|
||||||
|
reboot_timeout: 3600
|
||||||
|
|
||||||
|
- hosts: proxmox
|
||||||
|
gather_facts: true
|
||||||
|
become: yes
|
||||||
|
remote_user: "{{ proxmox_lxc_ssh_user }}"
|
||||||
|
roles:
|
||||||
|
- role: reset_proxmox_lxc
|
||||||
|
when: proxmox_lxc_configure
|
||||||
|
|||||||
@@ -1,11 +1,15 @@
|
|||||||
---
|
---
|
||||||
ansible_user: root
|
# If you want to explicitly define an interface that ALL control nodes
|
||||||
|
# should use to propagate the VIP, define it here. Otherwise, kube-vip
|
||||||
|
# will determine the right interface automatically at runtime.
|
||||||
|
kube_vip_iface: null
|
||||||
|
|
||||||
server_init_args: >-
|
server_init_args: >-
|
||||||
{% if groups['master'] | length > 1 %}
|
{% if groups['master'] | length > 1 %}
|
||||||
{% if ansible_host == hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0]) %}
|
{% if ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] %}
|
||||||
--cluster-init
|
--cluster-init
|
||||||
{% else %}
|
{% else %}
|
||||||
--server https://{{ hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0]) }}:6443
|
--server https://{{ hostvars[groups['master'][0]].k3s_node_ip | split(",") | first | ansible.utils.ipwrap }}:6443
|
||||||
{% endif %}
|
{% endif %}
|
||||||
--token {{ k3s_token }}
|
--token {{ k3s_token }}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|||||||
28
roles/k3s/master/tasks/fetch_k3s_init_logs.yml
Normal file
28
roles/k3s/master/tasks/fetch_k3s_init_logs.yml
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
---
|
||||||
|
# Download logs of k3s-init.service from the nodes to localhost.
|
||||||
|
# Note that log_destination must be set.
|
||||||
|
|
||||||
|
- name: Fetch k3s-init.service logs
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: journalctl --all --unit=k3s-init.service
|
||||||
|
changed_when: false
|
||||||
|
register: k3s_init_log
|
||||||
|
|
||||||
|
- name: Create {{ log_destination }}
|
||||||
|
delegate_to: localhost
|
||||||
|
run_once: true
|
||||||
|
become: false
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ log_destination }}"
|
||||||
|
state: directory
|
||||||
|
mode: "0755"
|
||||||
|
|
||||||
|
- name: Store logs to {{ log_destination }}
|
||||||
|
delegate_to: localhost
|
||||||
|
become: false
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: content.j2
|
||||||
|
dest: "{{ log_destination }}/k3s-init@{{ ansible_hostname }}.log"
|
||||||
|
mode: 0644
|
||||||
|
vars:
|
||||||
|
content: "{{ k3s_init_log.stdout }}"
|
||||||
@@ -13,59 +13,11 @@
|
|||||||
args:
|
args:
|
||||||
warn: false # The ansible systemd module does not support reset-failed
|
warn: false # The ansible systemd module does not support reset-failed
|
||||||
|
|
||||||
- name: Create manifests directory on first master
|
- name: Deploy vip manifest
|
||||||
file:
|
include_tasks: vip.yml
|
||||||
path: /var/lib/rancher/k3s/server/manifests
|
|
||||||
state: directory
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: 0644
|
|
||||||
when: ansible_host == hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0])
|
|
||||||
|
|
||||||
- name: Copy vip rbac manifest to first master
|
- name: Deploy metallb manifest
|
||||||
template:
|
include_tasks: metallb.yml
|
||||||
src: "vip.rbac.yaml.j2"
|
|
||||||
dest: "/var/lib/rancher/k3s/server/manifests/vip.rbac.yaml"
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: 0644
|
|
||||||
when: ansible_host == hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0])
|
|
||||||
|
|
||||||
- name: Copy vip manifest to first master
|
|
||||||
template:
|
|
||||||
src: "vip.yaml.j2"
|
|
||||||
dest: "/var/lib/rancher/k3s/server/manifests/vip.yaml"
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: 0644
|
|
||||||
when: ansible_host == hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0])
|
|
||||||
|
|
||||||
- name: Copy metallb namespace manifest to first master
|
|
||||||
template:
|
|
||||||
src: "metallb.namespace.j2"
|
|
||||||
dest: "/var/lib/rancher/k3s/server/manifests/metallb.namespace.yaml"
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: 0644
|
|
||||||
when: ansible_host == hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0])
|
|
||||||
|
|
||||||
- name: Copy metallb ConfigMap manifest to first master
|
|
||||||
template:
|
|
||||||
src: "metallb.configmap.j2"
|
|
||||||
dest: "/var/lib/rancher/k3s/server/manifests/metallb.configmap.yaml"
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: 0644
|
|
||||||
when: ansible_host == hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0])
|
|
||||||
|
|
||||||
- name: Copy metallb main manifest to first master
|
|
||||||
template:
|
|
||||||
src: "metallb.yaml.j2"
|
|
||||||
dest: "/var/lib/rancher/k3s/server/manifests/metallb.yaml"
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: 0644
|
|
||||||
when: ansible_host == hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0])
|
|
||||||
|
|
||||||
- name: Init cluster inside the transient k3s-init service
|
- name: Init cluster inside the transient k3s-init service
|
||||||
command:
|
command:
|
||||||
@@ -74,8 +26,6 @@
|
|||||||
--unit=k3s-init \
|
--unit=k3s-init \
|
||||||
k3s server {{ server_init_args }}"
|
k3s server {{ server_init_args }}"
|
||||||
creates: "{{ systemd_dir }}/k3s.service"
|
creates: "{{ systemd_dir }}/k3s.service"
|
||||||
args:
|
|
||||||
warn: false # The ansible systemd module does not support transient units
|
|
||||||
|
|
||||||
- name: Verification
|
- name: Verification
|
||||||
block:
|
block:
|
||||||
@@ -88,11 +38,18 @@
|
|||||||
delay: 10
|
delay: 10
|
||||||
changed_when: false
|
changed_when: false
|
||||||
always:
|
always:
|
||||||
|
- name: Save logs of k3s-init.service
|
||||||
|
include_tasks: fetch_k3s_init_logs.yml
|
||||||
|
when: log_destination
|
||||||
|
vars:
|
||||||
|
log_destination: >-
|
||||||
|
{{ lookup('ansible.builtin.env', 'ANSIBLE_K3S_LOG_DIR', default=False) }}
|
||||||
- name: Kill the temporary service used for initialization
|
- name: Kill the temporary service used for initialization
|
||||||
systemd:
|
systemd:
|
||||||
name: k3s-init
|
name: k3s-init
|
||||||
state: stopped
|
state: stopped
|
||||||
failed_when: false
|
failed_when: false
|
||||||
|
when: not ansible_check_mode
|
||||||
|
|
||||||
- name: Copy K3s service file
|
- name: Copy K3s service file
|
||||||
register: k3s_service
|
register: k3s_service
|
||||||
@@ -140,25 +97,32 @@
|
|||||||
|
|
||||||
- name: Create directory .kube
|
- name: Create directory .kube
|
||||||
file:
|
file:
|
||||||
path: ~{{ ansible_user }}/.kube
|
path: "{{ ansible_user_dir }}/.kube"
|
||||||
state: directory
|
state: directory
|
||||||
owner: "{{ ansible_user }}"
|
owner: "{{ ansible_user_id }}"
|
||||||
mode: "u=rwx,g=rx,o="
|
mode: "u=rwx,g=rx,o="
|
||||||
|
|
||||||
- name: Copy config file to user home directory
|
- name: Copy config file to user home directory
|
||||||
copy:
|
copy:
|
||||||
src: /etc/rancher/k3s/k3s.yaml
|
src: /etc/rancher/k3s/k3s.yaml
|
||||||
dest: ~{{ ansible_user }}/.kube/config
|
dest: "{{ ansible_user_dir }}/.kube/config"
|
||||||
remote_src: yes
|
remote_src: yes
|
||||||
owner: "{{ ansible_user }}"
|
owner: "{{ ansible_user_id }}"
|
||||||
mode: "u=rw,g=,o="
|
mode: "u=rw,g=,o="
|
||||||
|
|
||||||
- name: Configure kubectl cluster to https://{{ apiserver_endpoint }}:6443
|
- name: Configure kubectl cluster to {{ endpoint_url }}
|
||||||
command: >-
|
command: >-
|
||||||
k3s kubectl config set-cluster default
|
k3s kubectl config set-cluster default
|
||||||
--server=https://{{ apiserver_endpoint }}:6443
|
--server={{ endpoint_url }}
|
||||||
--kubeconfig ~{{ ansible_user }}/.kube/config
|
--kubeconfig {{ ansible_user_dir }}/.kube/config
|
||||||
changed_when: true
|
changed_when: true
|
||||||
|
vars:
|
||||||
|
endpoint_url: >-
|
||||||
|
https://{{ apiserver_endpoint | ansible.utils.ipwrap }}:6443
|
||||||
|
# Deactivated linter rules:
|
||||||
|
# - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap
|
||||||
|
# would be undefined. This will not be the case during playbook execution.
|
||||||
|
# noqa jinja[invalid]
|
||||||
|
|
||||||
- name: Create kubectl symlink
|
- name: Create kubectl symlink
|
||||||
file:
|
file:
|
||||||
@@ -171,3 +135,25 @@
|
|||||||
src: /usr/local/bin/k3s
|
src: /usr/local/bin/k3s
|
||||||
dest: /usr/local/bin/crictl
|
dest: /usr/local/bin/crictl
|
||||||
state: link
|
state: link
|
||||||
|
|
||||||
|
- name: Get contents of manifests folder
|
||||||
|
find:
|
||||||
|
paths: /var/lib/rancher/k3s/server/manifests
|
||||||
|
file_type: file
|
||||||
|
register: k3s_server_manifests
|
||||||
|
|
||||||
|
- name: Get sub dirs of manifests folder
|
||||||
|
find:
|
||||||
|
paths: /var/lib/rancher/k3s/server/manifests
|
||||||
|
file_type: directory
|
||||||
|
register: k3s_server_manifests_directories
|
||||||
|
|
||||||
|
- name: Remove manifests and folders that are only needed for bootstrapping cluster so k3s doesn't auto apply on start
|
||||||
|
file:
|
||||||
|
path: "{{ item.path }}"
|
||||||
|
state: absent
|
||||||
|
with_items:
|
||||||
|
- "{{ k3s_server_manifests.files }}"
|
||||||
|
- "{{ k3s_server_manifests_directories.files }}"
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.path }}"
|
||||||
|
|||||||
30
roles/k3s/master/tasks/metallb.yml
Normal file
30
roles/k3s/master/tasks/metallb.yml
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
---
|
||||||
|
- name: Create manifests directory on first master
|
||||||
|
file:
|
||||||
|
path: /var/lib/rancher/k3s/server/manifests
|
||||||
|
state: directory
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0644
|
||||||
|
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
||||||
|
|
||||||
|
- name: "Download to first master: manifest for metallb-{{ metal_lb_type }}"
|
||||||
|
ansible.builtin.get_url:
|
||||||
|
url: "https://raw.githubusercontent.com/metallb/metallb/{{ metal_lb_controller_tag_version }}/config/manifests/metallb-{{metal_lb_type}}.yaml" # noqa yaml[line-length]
|
||||||
|
dest: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0644
|
||||||
|
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
||||||
|
|
||||||
|
- name: Set image versions in manifest for metallb-{{ metal_lb_type }}
|
||||||
|
ansible.builtin.replace:
|
||||||
|
path: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml"
|
||||||
|
regexp: "{{ item.change | ansible.builtin.regex_escape }}"
|
||||||
|
replace: "{{ item.to }}"
|
||||||
|
with_items:
|
||||||
|
- change: "metallb/speaker:{{ metal_lb_controller_tag_version }}"
|
||||||
|
to: "metallb/speaker:{{ metal_lb_speaker_tag_version }}"
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.change }} => {{ item.to }}"
|
||||||
|
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
||||||
27
roles/k3s/master/tasks/vip.yml
Normal file
27
roles/k3s/master/tasks/vip.yml
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
---
|
||||||
|
- name: Create manifests directory on first master
|
||||||
|
file:
|
||||||
|
path: /var/lib/rancher/k3s/server/manifests
|
||||||
|
state: directory
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0644
|
||||||
|
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
||||||
|
|
||||||
|
- name: Download vip rbac manifest to first master
|
||||||
|
ansible.builtin.get_url:
|
||||||
|
url: "https://raw.githubusercontent.com/kube-vip/kube-vip/{{ kube_vip_tag_version }}/docs/manifests/rbac.yaml"
|
||||||
|
dest: "/var/lib/rancher/k3s/server/manifests/vip-rbac.yaml"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0644
|
||||||
|
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
||||||
|
|
||||||
|
- name: Copy vip manifest to first master
|
||||||
|
template:
|
||||||
|
src: "vip.yaml.j2"
|
||||||
|
dest: "/var/lib/rancher/k3s/server/manifests/vip.yaml"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0644
|
||||||
|
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
||||||
5
roles/k3s/master/templates/content.j2
Normal file
5
roles/k3s/master/templates/content.j2
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
{#
|
||||||
|
This is a really simple template that just outputs the
|
||||||
|
value of the "content" variable.
|
||||||
|
#}
|
||||||
|
{{ content }}
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: ConfigMap
|
|
||||||
metadata:
|
|
||||||
namespace: metallb-system
|
|
||||||
name: config
|
|
||||||
data:
|
|
||||||
config: |
|
|
||||||
address-pools:
|
|
||||||
- name: default
|
|
||||||
protocol: layer2
|
|
||||||
addresses:
|
|
||||||
- {{ metal_lb_ip_range }}
|
|
||||||
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: Namespace
|
|
||||||
metadata:
|
|
||||||
name: metallb-system
|
|
||||||
labels:
|
|
||||||
app: metallb
|
|
||||||
|
|
||||||
@@ -1,481 +0,0 @@
|
|||||||
apiVersion: policy/v1beta1
|
|
||||||
kind: PodSecurityPolicy
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: metallb
|
|
||||||
name: controller
|
|
||||||
spec:
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
allowedCapabilities: []
|
|
||||||
allowedHostPaths: []
|
|
||||||
defaultAddCapabilities: []
|
|
||||||
defaultAllowPrivilegeEscalation: false
|
|
||||||
fsGroup:
|
|
||||||
ranges:
|
|
||||||
- max: 65535
|
|
||||||
min: 1
|
|
||||||
rule: MustRunAs
|
|
||||||
hostIPC: false
|
|
||||||
hostNetwork: false
|
|
||||||
hostPID: false
|
|
||||||
privileged: false
|
|
||||||
readOnlyRootFilesystem: true
|
|
||||||
requiredDropCapabilities:
|
|
||||||
- ALL
|
|
||||||
runAsUser:
|
|
||||||
ranges:
|
|
||||||
- max: 65535
|
|
||||||
min: 1
|
|
||||||
rule: MustRunAs
|
|
||||||
seLinux:
|
|
||||||
rule: RunAsAny
|
|
||||||
supplementalGroups:
|
|
||||||
ranges:
|
|
||||||
- max: 65535
|
|
||||||
min: 1
|
|
||||||
rule: MustRunAs
|
|
||||||
volumes:
|
|
||||||
- configMap
|
|
||||||
- secret
|
|
||||||
- emptyDir
|
|
||||||
---
|
|
||||||
apiVersion: policy/v1beta1
|
|
||||||
kind: PodSecurityPolicy
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: metallb
|
|
||||||
name: speaker
|
|
||||||
spec:
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
allowedCapabilities:
|
|
||||||
- NET_RAW
|
|
||||||
allowedHostPaths: []
|
|
||||||
defaultAddCapabilities: []
|
|
||||||
defaultAllowPrivilegeEscalation: false
|
|
||||||
fsGroup:
|
|
||||||
rule: RunAsAny
|
|
||||||
hostIPC: false
|
|
||||||
hostNetwork: true
|
|
||||||
hostPID: false
|
|
||||||
hostPorts:
|
|
||||||
- max: 7472
|
|
||||||
min: 7472
|
|
||||||
- max: 7946
|
|
||||||
min: 7946
|
|
||||||
privileged: true
|
|
||||||
readOnlyRootFilesystem: true
|
|
||||||
requiredDropCapabilities:
|
|
||||||
- ALL
|
|
||||||
runAsUser:
|
|
||||||
rule: RunAsAny
|
|
||||||
seLinux:
|
|
||||||
rule: RunAsAny
|
|
||||||
supplementalGroups:
|
|
||||||
rule: RunAsAny
|
|
||||||
volumes:
|
|
||||||
- configMap
|
|
||||||
- secret
|
|
||||||
- emptyDir
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ServiceAccount
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: metallb
|
|
||||||
name: controller
|
|
||||||
namespace: metallb-system
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ServiceAccount
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: metallb
|
|
||||||
name: speaker
|
|
||||||
namespace: metallb-system
|
|
||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: ClusterRole
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: metallb
|
|
||||||
name: metallb-system:controller
|
|
||||||
rules:
|
|
||||||
- apiGroups:
|
|
||||||
- ''
|
|
||||||
resources:
|
|
||||||
- services
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- watch
|
|
||||||
- apiGroups:
|
|
||||||
- ''
|
|
||||||
resources:
|
|
||||||
- services/status
|
|
||||||
verbs:
|
|
||||||
- update
|
|
||||||
- apiGroups:
|
|
||||||
- ''
|
|
||||||
resources:
|
|
||||||
- events
|
|
||||||
verbs:
|
|
||||||
- create
|
|
||||||
- patch
|
|
||||||
- apiGroups:
|
|
||||||
- policy
|
|
||||||
resourceNames:
|
|
||||||
- controller
|
|
||||||
resources:
|
|
||||||
- podsecuritypolicies
|
|
||||||
verbs:
|
|
||||||
- use
|
|
||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: ClusterRole
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: metallb
|
|
||||||
name: metallb-system:speaker
|
|
||||||
rules:
|
|
||||||
- apiGroups:
|
|
||||||
- ''
|
|
||||||
resources:
|
|
||||||
- services
|
|
||||||
- endpoints
|
|
||||||
- nodes
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- watch
|
|
||||||
- apiGroups: ["discovery.k8s.io"]
|
|
||||||
resources:
|
|
||||||
- endpointslices
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- watch
|
|
||||||
- apiGroups:
|
|
||||||
- ''
|
|
||||||
resources:
|
|
||||||
- events
|
|
||||||
verbs:
|
|
||||||
- create
|
|
||||||
- patch
|
|
||||||
- apiGroups:
|
|
||||||
- policy
|
|
||||||
resourceNames:
|
|
||||||
- speaker
|
|
||||||
resources:
|
|
||||||
- podsecuritypolicies
|
|
||||||
verbs:
|
|
||||||
- use
|
|
||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: Role
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: metallb
|
|
||||||
name: config-watcher
|
|
||||||
namespace: metallb-system
|
|
||||||
rules:
|
|
||||||
- apiGroups:
|
|
||||||
- ''
|
|
||||||
resources:
|
|
||||||
- configmaps
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- watch
|
|
||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: Role
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: metallb
|
|
||||||
name: pod-lister
|
|
||||||
namespace: metallb-system
|
|
||||||
rules:
|
|
||||||
- apiGroups:
|
|
||||||
- ''
|
|
||||||
resources:
|
|
||||||
- pods
|
|
||||||
verbs:
|
|
||||||
- list
|
|
||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: Role
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: metallb
|
|
||||||
name: controller
|
|
||||||
namespace: metallb-system
|
|
||||||
rules:
|
|
||||||
- apiGroups:
|
|
||||||
- ''
|
|
||||||
resources:
|
|
||||||
- secrets
|
|
||||||
verbs:
|
|
||||||
- create
|
|
||||||
- apiGroups:
|
|
||||||
- ''
|
|
||||||
resources:
|
|
||||||
- secrets
|
|
||||||
resourceNames:
|
|
||||||
- memberlist
|
|
||||||
verbs:
|
|
||||||
- list
|
|
||||||
- apiGroups:
|
|
||||||
- apps
|
|
||||||
resources:
|
|
||||||
- deployments
|
|
||||||
resourceNames:
|
|
||||||
- controller
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: ClusterRoleBinding
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: metallb
|
|
||||||
name: metallb-system:controller
|
|
||||||
roleRef:
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
kind: ClusterRole
|
|
||||||
name: metallb-system:controller
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: controller
|
|
||||||
namespace: metallb-system
|
|
||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: ClusterRoleBinding
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: metallb
|
|
||||||
name: metallb-system:speaker
|
|
||||||
roleRef:
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
kind: ClusterRole
|
|
||||||
name: metallb-system:speaker
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: speaker
|
|
||||||
namespace: metallb-system
|
|
||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: RoleBinding
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: metallb
|
|
||||||
name: config-watcher
|
|
||||||
namespace: metallb-system
|
|
||||||
roleRef:
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
kind: Role
|
|
||||||
name: config-watcher
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: controller
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: speaker
|
|
||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: RoleBinding
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: metallb
|
|
||||||
name: pod-lister
|
|
||||||
namespace: metallb-system
|
|
||||||
roleRef:
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
kind: Role
|
|
||||||
name: pod-lister
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: speaker
|
|
||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: RoleBinding
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: metallb
|
|
||||||
name: controller
|
|
||||||
namespace: metallb-system
|
|
||||||
roleRef:
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
kind: Role
|
|
||||||
name: controller
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: controller
|
|
||||||
---
|
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: DaemonSet
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: metallb
|
|
||||||
component: speaker
|
|
||||||
name: speaker
|
|
||||||
namespace: metallb-system
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: metallb
|
|
||||||
component: speaker
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
annotations:
|
|
||||||
prometheus.io/port: '7472'
|
|
||||||
prometheus.io/scrape: 'true'
|
|
||||||
labels:
|
|
||||||
app: metallb
|
|
||||||
component: speaker
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- args:
|
|
||||||
- --port=7472
|
|
||||||
- --config=config
|
|
||||||
- --log-level=info
|
|
||||||
env:
|
|
||||||
- name: METALLB_NODE_NAME
|
|
||||||
valueFrom:
|
|
||||||
fieldRef:
|
|
||||||
fieldPath: spec.nodeName
|
|
||||||
- name: METALLB_HOST
|
|
||||||
valueFrom:
|
|
||||||
fieldRef:
|
|
||||||
fieldPath: status.hostIP
|
|
||||||
- name: METALLB_ML_BIND_ADDR
|
|
||||||
valueFrom:
|
|
||||||
fieldRef:
|
|
||||||
fieldPath: status.podIP
|
|
||||||
# needed when another software is also using memberlist / port 7946
|
|
||||||
# when changing this default you also need to update the container ports definition
|
|
||||||
# and the PodSecurityPolicy hostPorts definition
|
|
||||||
#- name: METALLB_ML_BIND_PORT
|
|
||||||
# value: "7946"
|
|
||||||
- name: METALLB_ML_LABELS
|
|
||||||
value: "app=metallb,component=speaker"
|
|
||||||
- name: METALLB_ML_SECRET_KEY
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: memberlist
|
|
||||||
key: secretkey
|
|
||||||
image: quay.io/metallb/speaker:{{ metal_lb_speaker_tag_version }}
|
|
||||||
name: speaker
|
|
||||||
ports:
|
|
||||||
- containerPort: 7472
|
|
||||||
name: monitoring
|
|
||||||
- containerPort: 7946
|
|
||||||
name: memberlist-tcp
|
|
||||||
- containerPort: 7946
|
|
||||||
name: memberlist-udp
|
|
||||||
protocol: UDP
|
|
||||||
livenessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /metrics
|
|
||||||
port: monitoring
|
|
||||||
initialDelaySeconds: 10
|
|
||||||
periodSeconds: 10
|
|
||||||
timeoutSeconds: 1
|
|
||||||
successThreshold: 1
|
|
||||||
failureThreshold: 3
|
|
||||||
readinessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /metrics
|
|
||||||
port: monitoring
|
|
||||||
initialDelaySeconds: 10
|
|
||||||
periodSeconds: 10
|
|
||||||
timeoutSeconds: 1
|
|
||||||
successThreshold: 1
|
|
||||||
failureThreshold: 3
|
|
||||||
securityContext:
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
capabilities:
|
|
||||||
add:
|
|
||||||
- NET_RAW
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
readOnlyRootFilesystem: true
|
|
||||||
hostNetwork: true
|
|
||||||
nodeSelector:
|
|
||||||
kubernetes.io/os: linux
|
|
||||||
serviceAccountName: speaker
|
|
||||||
terminationGracePeriodSeconds: 2
|
|
||||||
tolerations:
|
|
||||||
- effect: NoSchedule
|
|
||||||
key: node-role.kubernetes.io/master
|
|
||||||
operator: Exists
|
|
||||||
---
|
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: metallb
|
|
||||||
component: controller
|
|
||||||
name: controller
|
|
||||||
namespace: metallb-system
|
|
||||||
spec:
|
|
||||||
revisionHistoryLimit: 3
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: metallb
|
|
||||||
component: controller
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
annotations:
|
|
||||||
prometheus.io/port: '7472'
|
|
||||||
prometheus.io/scrape: 'true'
|
|
||||||
labels:
|
|
||||||
app: metallb
|
|
||||||
component: controller
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- args:
|
|
||||||
- --port=7472
|
|
||||||
- --config=config
|
|
||||||
- --log-level=info
|
|
||||||
env:
|
|
||||||
- name: METALLB_ML_SECRET_NAME
|
|
||||||
value: memberlist
|
|
||||||
- name: METALLB_DEPLOYMENT
|
|
||||||
value: controller
|
|
||||||
image: quay.io/metallb/controller:{{ metal_lb_controller_tag_version }}
|
|
||||||
name: controller
|
|
||||||
ports:
|
|
||||||
- containerPort: 7472
|
|
||||||
name: monitoring
|
|
||||||
livenessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /metrics
|
|
||||||
port: monitoring
|
|
||||||
initialDelaySeconds: 10
|
|
||||||
periodSeconds: 10
|
|
||||||
timeoutSeconds: 1
|
|
||||||
successThreshold: 1
|
|
||||||
failureThreshold: 3
|
|
||||||
readinessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /metrics
|
|
||||||
port: monitoring
|
|
||||||
initialDelaySeconds: 10
|
|
||||||
periodSeconds: 10
|
|
||||||
timeoutSeconds: 1
|
|
||||||
successThreshold: 1
|
|
||||||
failureThreshold: 3
|
|
||||||
securityContext:
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- all
|
|
||||||
readOnlyRootFilesystem: true
|
|
||||||
nodeSelector:
|
|
||||||
kubernetes.io/os: linux
|
|
||||||
securityContext:
|
|
||||||
runAsNonRoot: true
|
|
||||||
runAsUser: 65534
|
|
||||||
fsGroup: 65534
|
|
||||||
serviceAccountName: controller
|
|
||||||
terminationGracePeriodSeconds: 0
|
|
||||||
|
|
||||||
@@ -1,33 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: ServiceAccount
|
|
||||||
metadata:
|
|
||||||
name: kube-vip
|
|
||||||
namespace: kube-system
|
|
||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: ClusterRole
|
|
||||||
metadata:
|
|
||||||
annotations:
|
|
||||||
rbac.authorization.kubernetes.io/autoupdate: "true"
|
|
||||||
name: system:kube-vip-role
|
|
||||||
rules:
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["services", "services/status", "nodes"]
|
|
||||||
verbs: ["list","get","watch", "update"]
|
|
||||||
- apiGroups: ["coordination.k8s.io"]
|
|
||||||
resources: ["leases"]
|
|
||||||
verbs: ["list", "get", "watch", "update", "create"]
|
|
||||||
---
|
|
||||||
kind: ClusterRoleBinding
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
metadata:
|
|
||||||
name: system:kube-vip-binding
|
|
||||||
roleRef:
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
kind: ClusterRole
|
|
||||||
name: system:kube-vip-role
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: kube-vip
|
|
||||||
namespace: kube-system
|
|
||||||
|
|
||||||
@@ -1,7 +1,6 @@
|
|||||||
apiVersion: apps/v1
|
apiVersion: apps/v1
|
||||||
kind: DaemonSet
|
kind: DaemonSet
|
||||||
metadata:
|
metadata:
|
||||||
creationTimestamp: null
|
|
||||||
name: kube-vip-ds
|
name: kube-vip-ds
|
||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
spec:
|
spec:
|
||||||
@@ -10,7 +9,6 @@ spec:
|
|||||||
name: kube-vip-ds
|
name: kube-vip-ds
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
creationTimestamp: null
|
|
||||||
labels:
|
labels:
|
||||||
name: kube-vip-ds
|
name: kube-vip-ds
|
||||||
spec:
|
spec:
|
||||||
@@ -32,10 +30,12 @@ spec:
|
|||||||
value: "true"
|
value: "true"
|
||||||
- name: port
|
- name: port
|
||||||
value: "6443"
|
value: "6443"
|
||||||
|
{% if kube_vip_iface %}
|
||||||
- name: vip_interface
|
- name: vip_interface
|
||||||
value: {{ flannel_iface }}
|
value: {{ kube_vip_iface }}
|
||||||
|
{% endif %}
|
||||||
- name: vip_cidr
|
- name: vip_cidr
|
||||||
value: "32"
|
value: "{{ apiserver_endpoint | ansible.utils.ipsubnet | ansible.utils.ipaddr('prefix') }}"
|
||||||
- name: cp_enable
|
- name: cp_enable
|
||||||
value: "true"
|
value: "true"
|
||||||
- name: cp_namespace
|
- name: cp_namespace
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ After=network-online.target
|
|||||||
Type=notify
|
Type=notify
|
||||||
ExecStartPre=-/sbin/modprobe br_netfilter
|
ExecStartPre=-/sbin/modprobe br_netfilter
|
||||||
ExecStartPre=-/sbin/modprobe overlay
|
ExecStartPre=-/sbin/modprobe overlay
|
||||||
ExecStart=/usr/local/bin/k3s agent --server https://{{ apiserver_endpoint }}:6443 --token {{ hostvars[groups['master'][0]]['token'] | default(k3s_token) }} {{ extra_agent_args | default("") }}
|
ExecStart=/usr/local/bin/k3s agent --server https://{{ apiserver_endpoint | ansible.utils.ipwrap }}:6443 --token {{ hostvars[groups['master'][0]]['token'] | default(k3s_token) }} {{ extra_agent_args | default("") }}
|
||||||
KillMode=process
|
KillMode=process
|
||||||
Delegate=yes
|
Delegate=yes
|
||||||
# Having non-zero Limit*s causes performance problems due to accounting overhead
|
# Having non-zero Limit*s causes performance problems due to accounting overhead
|
||||||
|
|||||||
3
roles/k3s/post/defaults/main.yml
Normal file
3
roles/k3s/post/defaults/main.yml
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
---
|
||||||
|
# Timeout to wait for MetalLB services to come up
|
||||||
|
metal_lb_available_timeout: 120s
|
||||||
8
roles/k3s/post/tasks/main.yml
Normal file
8
roles/k3s/post/tasks/main.yml
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
---
|
||||||
|
- name: Deploy metallb pool
|
||||||
|
include_tasks: metallb.yml
|
||||||
|
|
||||||
|
- name: Remove tmp directory used for manifests
|
||||||
|
file:
|
||||||
|
path: /tmp/k3s
|
||||||
|
state: absent
|
||||||
101
roles/k3s/post/tasks/metallb.yml
Normal file
101
roles/k3s/post/tasks/metallb.yml
Normal file
@@ -0,0 +1,101 @@
|
|||||||
|
---
|
||||||
|
- name: Create manifests directory for temp configuration
|
||||||
|
file:
|
||||||
|
path: /tmp/k3s
|
||||||
|
state: directory
|
||||||
|
owner: "{{ ansible_user_id }}"
|
||||||
|
mode: 0755
|
||||||
|
with_items: "{{ groups['master'] }}"
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: Copy metallb CRs manifest to first master
|
||||||
|
template:
|
||||||
|
src: "metallb.crs.j2"
|
||||||
|
dest: "/tmp/k3s/metallb-crs.yaml"
|
||||||
|
owner: "{{ ansible_user_id }}"
|
||||||
|
mode: 0755
|
||||||
|
with_items: "{{ groups['master'] }}"
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: Test metallb-system namespace
|
||||||
|
command: >-
|
||||||
|
k3s kubectl -n metallb-system
|
||||||
|
changed_when: false
|
||||||
|
with_items: "{{ groups['master'] }}"
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: Wait for MetalLB resources
|
||||||
|
command: >-
|
||||||
|
k3s kubectl wait {{ item.resource }}
|
||||||
|
--namespace='metallb-system'
|
||||||
|
{% if item.name | default(False) -%}{{ item.name }}{%- endif %}
|
||||||
|
{% if item.selector | default(False) -%}--selector='{{ item.selector }}'{%- endif %}
|
||||||
|
{% if item.condition | default(False) -%}{{ item.condition }}{%- endif %}
|
||||||
|
--timeout='{{ metal_lb_available_timeout }}'
|
||||||
|
changed_when: false
|
||||||
|
run_once: true
|
||||||
|
with_items:
|
||||||
|
- description: controller
|
||||||
|
resource: deployment
|
||||||
|
name: controller
|
||||||
|
condition: --for condition=Available=True
|
||||||
|
- description: webhook service
|
||||||
|
resource: pod
|
||||||
|
selector: component=controller
|
||||||
|
condition: --for=jsonpath='{.status.phase}'=Running
|
||||||
|
- description: pods in replica sets
|
||||||
|
resource: pod
|
||||||
|
selector: component=controller,app=metallb
|
||||||
|
condition: --for condition=Ready
|
||||||
|
- description: ready replicas of controller
|
||||||
|
resource: replicaset
|
||||||
|
selector: component=controller,app=metallb
|
||||||
|
condition: --for=jsonpath='{.status.readyReplicas}'=1
|
||||||
|
- description: fully labeled replicas of controller
|
||||||
|
resource: replicaset
|
||||||
|
selector: component=controller,app=metallb
|
||||||
|
condition: --for=jsonpath='{.status.fullyLabeledReplicas}'=1
|
||||||
|
- description: available replicas of controller
|
||||||
|
resource: replicaset
|
||||||
|
selector: component=controller,app=metallb
|
||||||
|
condition: --for=jsonpath='{.status.availableReplicas}'=1
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.description }}"
|
||||||
|
|
||||||
|
- name: Test metallb-system webhook-service endpoint
|
||||||
|
command: >-
|
||||||
|
k3s kubectl -n metallb-system get endpoints webhook-service
|
||||||
|
changed_when: false
|
||||||
|
with_items: "{{ groups['master'] }}"
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: Apply metallb CRs
|
||||||
|
command: >-
|
||||||
|
k3s kubectl apply -f /tmp/k3s/metallb-crs.yaml
|
||||||
|
--timeout='{{ metal_lb_available_timeout }}'
|
||||||
|
register: this
|
||||||
|
changed_when: false
|
||||||
|
run_once: true
|
||||||
|
until: this.rc == 0
|
||||||
|
retries: 5
|
||||||
|
|
||||||
|
- name: Test metallb-system resources for Layer 2 configuration
|
||||||
|
command: >-
|
||||||
|
k3s kubectl -n metallb-system get {{ item }}
|
||||||
|
changed_when: false
|
||||||
|
run_once: true
|
||||||
|
when: metal_lb_mode == "layer2"
|
||||||
|
with_items:
|
||||||
|
- IPAddressPool
|
||||||
|
- L2Advertisement
|
||||||
|
|
||||||
|
- name: Test metallb-system resources for BGP configuration
|
||||||
|
command: >-
|
||||||
|
k3s kubectl -n metallb-system get {{ item }}
|
||||||
|
changed_when: false
|
||||||
|
run_once: true
|
||||||
|
when: metal_lb_mode == "bgp"
|
||||||
|
with_items:
|
||||||
|
- IPAddressPool
|
||||||
|
- BGPPeer
|
||||||
|
- BGPAdvertisement
|
||||||
43
roles/k3s/post/templates/metallb.crs.j2
Normal file
43
roles/k3s/post/templates/metallb.crs.j2
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
apiVersion: metallb.io/v1beta1
|
||||||
|
kind: IPAddressPool
|
||||||
|
metadata:
|
||||||
|
name: first-pool
|
||||||
|
namespace: metallb-system
|
||||||
|
spec:
|
||||||
|
addresses:
|
||||||
|
{% if metal_lb_ip_range is string %}
|
||||||
|
{# metal_lb_ip_range was used in the legacy way: single string instead of a list #}
|
||||||
|
{# => transform to list with single element #}
|
||||||
|
{% set metal_lb_ip_range = [metal_lb_ip_range] %}
|
||||||
|
{% endif %}
|
||||||
|
{% for range in metal_lb_ip_range %}
|
||||||
|
- {{ range }}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
{% if metal_lb_mode == "layer2" %}
|
||||||
|
---
|
||||||
|
apiVersion: metallb.io/v1beta1
|
||||||
|
kind: L2Advertisement
|
||||||
|
metadata:
|
||||||
|
name: default
|
||||||
|
namespace: metallb-system
|
||||||
|
{% endif %}
|
||||||
|
{% if metal_lb_mode == "bgp" %}
|
||||||
|
---
|
||||||
|
apiVersion: metallb.io/v1beta2
|
||||||
|
kind: BGPPeer
|
||||||
|
metadata:
|
||||||
|
name: default
|
||||||
|
namespace: metallb-system
|
||||||
|
spec:
|
||||||
|
myASN: {{ metal_lb_bgp_my_asn }}
|
||||||
|
peerASN: {{ metal_lb_bgp_peer_asn }}
|
||||||
|
peerAddress: {{ metal_lb_bgp_peer_address }}
|
||||||
|
|
||||||
|
---
|
||||||
|
apiVersion: metallb.io/v1beta1
|
||||||
|
kind: BGPAdvertisement
|
||||||
|
metadata:
|
||||||
|
name: default
|
||||||
|
namespace: metallb-system
|
||||||
|
{% endif %}
|
||||||
4
roles/lxc/handlers/main.yml
Normal file
4
roles/lxc/handlers/main.yml
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
---
|
||||||
|
- name: reboot server
|
||||||
|
become: true
|
||||||
|
reboot:
|
||||||
21
roles/lxc/tasks/main.yml
Normal file
21
roles/lxc/tasks/main.yml
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
---
|
||||||
|
- name: Check for rc.local file
|
||||||
|
stat:
|
||||||
|
path: /etc/rc.local
|
||||||
|
register: rcfile
|
||||||
|
|
||||||
|
- name: Create rc.local if needed
|
||||||
|
lineinfile:
|
||||||
|
path: /etc/rc.local
|
||||||
|
line: "#!/bin/sh -e"
|
||||||
|
create: true
|
||||||
|
insertbefore: BOF
|
||||||
|
mode: "u=rwx,g=rx,o=rx"
|
||||||
|
when: not rcfile.stat.exists
|
||||||
|
|
||||||
|
- name: Write rc.local file
|
||||||
|
blockinfile:
|
||||||
|
path: /etc/rc.local
|
||||||
|
content: "{{ lookup('template', 'templates/rc.local.j2') }}"
|
||||||
|
state: present
|
||||||
|
notify: reboot server
|
||||||
@@ -23,6 +23,13 @@
|
|||||||
state: present
|
state: present
|
||||||
reload: yes
|
reload: yes
|
||||||
|
|
||||||
|
- name: Enable IPv6 router advertisements
|
||||||
|
sysctl:
|
||||||
|
name: net.ipv6.conf.all.accept_ra
|
||||||
|
value: "2"
|
||||||
|
state: present
|
||||||
|
reload: yes
|
||||||
|
|
||||||
- name: Add br_netfilter to /etc/modules-load.d/
|
- name: Add br_netfilter to /etc/modules-load.d/
|
||||||
copy:
|
copy:
|
||||||
content: "br_netfilter"
|
content: "br_netfilter"
|
||||||
|
|||||||
5
roles/proxmox_lxc/handlers/main.yml
Normal file
5
roles/proxmox_lxc/handlers/main.yml
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
- name: reboot containers
|
||||||
|
command:
|
||||||
|
"pct reboot {{ item }}"
|
||||||
|
loop: "{{ proxmox_lxc_filtered_ids }}"
|
||||||
50
roles/proxmox_lxc/tasks/main.yml
Normal file
50
roles/proxmox_lxc/tasks/main.yml
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
---
|
||||||
|
- name: check for container files that exist on this host
|
||||||
|
stat:
|
||||||
|
path: "/etc/pve/lxc/{{ item }}.conf"
|
||||||
|
loop: "{{ proxmox_lxc_ct_ids }}"
|
||||||
|
register: stat_results
|
||||||
|
|
||||||
|
- name: filter out files that do not exist
|
||||||
|
set_fact:
|
||||||
|
proxmox_lxc_filtered_files:
|
||||||
|
'{{ stat_results.results | rejectattr("stat.exists", "false") | map(attribute="stat.path") }}'
|
||||||
|
|
||||||
|
# used for the reboot handler
|
||||||
|
- name: get container ids from filtered files
|
||||||
|
set_fact:
|
||||||
|
proxmox_lxc_filtered_ids:
|
||||||
|
'{{ proxmox_lxc_filtered_files | map("split", "/") | map("last") | map("split", ".") | map("first") }}'
|
||||||
|
|
||||||
|
# https://gist.github.com/triangletodd/02f595cd4c0dc9aac5f7763ca2264185
|
||||||
|
- name: Ensure lxc config has the right apparmor profile
|
||||||
|
lineinfile:
|
||||||
|
dest: "{{ item }}"
|
||||||
|
regexp: "^lxc.apparmor.profile"
|
||||||
|
line: "lxc.apparmor.profile: unconfined"
|
||||||
|
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||||
|
notify: reboot containers
|
||||||
|
|
||||||
|
- name: Ensure lxc config has the right cgroup
|
||||||
|
lineinfile:
|
||||||
|
dest: "{{ item }}"
|
||||||
|
regexp: "^lxc.cgroup.devices.allow"
|
||||||
|
line: "lxc.cgroup.devices.allow: a"
|
||||||
|
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||||
|
notify: reboot containers
|
||||||
|
|
||||||
|
- name: Ensure lxc config has the right cap drop
|
||||||
|
lineinfile:
|
||||||
|
dest: "{{ item }}"
|
||||||
|
regexp: "^lxc.cap.drop"
|
||||||
|
line: "lxc.cap.drop: "
|
||||||
|
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||||
|
notify: reboot containers
|
||||||
|
|
||||||
|
- name: Ensure lxc config has the right mounts
|
||||||
|
lineinfile:
|
||||||
|
dest: "{{ item }}"
|
||||||
|
regexp: "^lxc.mount.auto"
|
||||||
|
line: 'lxc.mount.auto: "proc:rw sys:rw"'
|
||||||
|
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||||
|
notify: reboot containers
|
||||||
6
roles/raspberrypi/defaults/main.yml
Normal file
6
roles/raspberrypi/defaults/main.yml
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
# Indicates whether the k3s prerequisites for Raspberry Pi should be set up
|
||||||
|
# Possible values:
|
||||||
|
# - present
|
||||||
|
# - absent
|
||||||
|
state: present
|
||||||
@@ -47,13 +47,20 @@
|
|||||||
- raspberry_pi|default(false)
|
- raspberry_pi|default(false)
|
||||||
- ansible_facts.lsb.description|default("") is match("Debian.*bullseye")
|
- ansible_facts.lsb.description|default("") is match("Debian.*bullseye")
|
||||||
|
|
||||||
- name: execute OS related tasks on the Raspberry Pi
|
- name: execute OS related tasks on the Raspberry Pi - {{ action }}
|
||||||
include_tasks: "{{ item }}"
|
include_tasks: "{{ item }}"
|
||||||
with_first_found:
|
with_first_found:
|
||||||
- "prereq/{{ detected_distribution }}-{{ detected_distribution_major_version }}.yml"
|
- "{{ action }}/{{ detected_distribution }}-{{ detected_distribution_major_version }}.yml"
|
||||||
- "prereq/{{ detected_distribution }}.yml"
|
- "{{ action }}/{{ detected_distribution }}.yml"
|
||||||
- "prereq/{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml"
|
- "{{ action }}/{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml"
|
||||||
- "prereq/{{ ansible_distribution }}.yml"
|
- "{{ action }}/{{ ansible_distribution }}.yml"
|
||||||
- "prereq/default.yml"
|
- "{{ action }}/default.yml"
|
||||||
|
vars:
|
||||||
|
action: >-
|
||||||
|
{% if state == "present" -%}
|
||||||
|
setup
|
||||||
|
{%- else -%}
|
||||||
|
teardown
|
||||||
|
{%- endif %}
|
||||||
when:
|
when:
|
||||||
- raspberry_pi|default(false)
|
- raspberry_pi|default(false)
|
||||||
|
|||||||
@@ -13,7 +13,6 @@
|
|||||||
- name: Flush iptables before changing to iptables-legacy
|
- name: Flush iptables before changing to iptables-legacy
|
||||||
iptables:
|
iptables:
|
||||||
flush: true
|
flush: true
|
||||||
changed_when: false # iptables flush always returns changed
|
|
||||||
|
|
||||||
- name: Changing to iptables-legacy
|
- name: Changing to iptables-legacy
|
||||||
alternatives:
|
alternatives:
|
||||||
@@ -1,8 +1,9 @@
|
|||||||
---
|
---
|
||||||
- name: Enable cgroup via boot commandline if not already enabled for Centos
|
- name: Enable cgroup via boot commandline if not already enabled for Rocky
|
||||||
lineinfile:
|
lineinfile:
|
||||||
path: /boot/cmdline.txt
|
path: /boot/cmdline.txt
|
||||||
backrefs: yes
|
backrefs: yes
|
||||||
regexp: '^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$'
|
regexp: '^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$'
|
||||||
line: '\1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory'
|
line: '\1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory'
|
||||||
notify: reboot
|
notify: reboot
|
||||||
|
when: not ansible_check_mode
|
||||||
@@ -6,3 +6,8 @@
|
|||||||
regexp: '^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$'
|
regexp: '^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$'
|
||||||
line: '\1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory'
|
line: '\1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory'
|
||||||
notify: reboot
|
notify: reboot
|
||||||
|
|
||||||
|
- name: Install linux-modules-extra-raspi
|
||||||
|
apt:
|
||||||
|
name: linux-modules-extra-raspi
|
||||||
|
state: present
|
||||||
1
roles/raspberrypi/tasks/teardown/Raspbian.yml
Normal file
1
roles/raspberrypi/tasks/teardown/Raspbian.yml
Normal file
@@ -0,0 +1 @@
|
|||||||
|
---
|
||||||
1
roles/raspberrypi/tasks/teardown/Rocky.yml
Normal file
1
roles/raspberrypi/tasks/teardown/Rocky.yml
Normal file
@@ -0,0 +1 @@
|
|||||||
|
---
|
||||||
5
roles/raspberrypi/tasks/teardown/Ubuntu.yml
Normal file
5
roles/raspberrypi/tasks/teardown/Ubuntu.yml
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
- name: Remove linux-modules-extra-raspi
|
||||||
|
apt:
|
||||||
|
name: linux-modules-extra-raspi
|
||||||
|
state: absent
|
||||||
1
roles/raspberrypi/tasks/teardown/default.yml
Normal file
1
roles/raspberrypi/tasks/teardown/default.yml
Normal file
@@ -0,0 +1 @@
|
|||||||
|
---
|
||||||
@@ -10,7 +10,7 @@
|
|||||||
- k3s-node
|
- k3s-node
|
||||||
- k3s-init
|
- k3s-init
|
||||||
|
|
||||||
- name: pkill -9 -f "k3s/data/[^/]+/bin/containerd-shim-runc"
|
- name: RUN pkill -9 -f "k3s/data/[^/]+/bin/containerd-shim-runc"
|
||||||
register: pkill_containerd_shim_runc
|
register: pkill_containerd_shim_runc
|
||||||
command: pkill -9 -f "k3s/data/[^/]+/bin/containerd-shim-runc"
|
command: pkill -9 -f "k3s/data/[^/]+/bin/containerd-shim-runc"
|
||||||
changed_when: "pkill_containerd_shim_runc.rc == 0"
|
changed_when: "pkill_containerd_shim_runc.rc == 0"
|
||||||
@@ -44,13 +44,41 @@
|
|||||||
- /var/lib/kubelet
|
- /var/lib/kubelet
|
||||||
- /var/lib/rancher/k3s
|
- /var/lib/rancher/k3s
|
||||||
- /var/lib/rancher/
|
- /var/lib/rancher/
|
||||||
- /usr/local/bin/k3s
|
|
||||||
- /var/lib/cni/
|
- /var/lib/cni/
|
||||||
|
|
||||||
- name: daemon_reload
|
- name: Reload daemon_reload
|
||||||
systemd:
|
systemd:
|
||||||
daemon_reload: yes
|
daemon_reload: yes
|
||||||
|
|
||||||
- name: Reboot and wait for node to come back up
|
- name: Remove tmp directory used for manifests
|
||||||
reboot:
|
file:
|
||||||
reboot_timeout: 3600
|
path: /tmp/k3s
|
||||||
|
state: absent
|
||||||
|
|
||||||
|
- name: Check if rc.local exists
|
||||||
|
stat:
|
||||||
|
path: /etc/rc.local
|
||||||
|
register: rcfile
|
||||||
|
|
||||||
|
- name: Remove rc.local modifications for proxmox lxc containers
|
||||||
|
become: true
|
||||||
|
blockinfile:
|
||||||
|
path: /etc/rc.local
|
||||||
|
content: "{{ lookup('template', 'templates/rc.local.j2') }}"
|
||||||
|
create: false
|
||||||
|
state: absent
|
||||||
|
when: proxmox_lxc_configure and rclocal.stat.exists
|
||||||
|
|
||||||
|
- name: Check rc.local for cleanup
|
||||||
|
become: true
|
||||||
|
slurp:
|
||||||
|
src: /etc/rc.local
|
||||||
|
register: rcslurp
|
||||||
|
when: proxmox_lxc_configure and rclocal.stat.exists
|
||||||
|
|
||||||
|
- name: Cleanup rc.local if we only have a Shebang line
|
||||||
|
become: true
|
||||||
|
file:
|
||||||
|
path: /etc/rc.local
|
||||||
|
state: absent
|
||||||
|
when: proxmox_lxc_configure and rclocal.stat.exists and ((rcslurp.content | b64decode).splitlines() | length) <= 1
|
||||||
|
|||||||
5
roles/reset_proxmox_lxc/handlers/main.yml
Normal file
5
roles/reset_proxmox_lxc/handlers/main.yml
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
- name: reboot containers
|
||||||
|
command:
|
||||||
|
"pct reboot {{ item }}"
|
||||||
|
loop: "{{ proxmox_lxc_filtered_ids }}"
|
||||||
53
roles/reset_proxmox_lxc/tasks/main.yml
Normal file
53
roles/reset_proxmox_lxc/tasks/main.yml
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
---
|
||||||
|
- name: check for container files that exist on this host
|
||||||
|
stat:
|
||||||
|
path: "/etc/pve/lxc/{{ item }}.conf"
|
||||||
|
loop: "{{ proxmox_lxc_ct_ids }}"
|
||||||
|
register: stat_results
|
||||||
|
|
||||||
|
- name: filter out files that do not exist
|
||||||
|
set_fact:
|
||||||
|
proxmox_lxc_filtered_files:
|
||||||
|
'{{ stat_results.results | rejectattr("stat.exists", "false") | map(attribute="stat.path") }}'
|
||||||
|
|
||||||
|
# used for the reboot handler
|
||||||
|
- name: get container ids from filtered files
|
||||||
|
set_fact:
|
||||||
|
proxmox_lxc_filtered_ids:
|
||||||
|
'{{ proxmox_lxc_filtered_files | map("split", "/") | map("last") | map("split", ".") | map("first") }}'
|
||||||
|
|
||||||
|
- name: Remove LXC apparmor profile
|
||||||
|
lineinfile:
|
||||||
|
dest: "{{ item }}"
|
||||||
|
regexp: "^lxc.apparmor.profile"
|
||||||
|
line: "lxc.apparmor.profile: unconfined"
|
||||||
|
state: absent
|
||||||
|
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||||
|
notify: reboot containers
|
||||||
|
|
||||||
|
- name: Remove lxc cgroups
|
||||||
|
lineinfile:
|
||||||
|
dest: "{{ item }}"
|
||||||
|
regexp: "^lxc.cgroup.devices.allow"
|
||||||
|
line: "lxc.cgroup.devices.allow: a"
|
||||||
|
state: absent
|
||||||
|
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||||
|
notify: reboot containers
|
||||||
|
|
||||||
|
- name: Remove lxc cap drop
|
||||||
|
lineinfile:
|
||||||
|
dest: "{{ item }}"
|
||||||
|
regexp: "^lxc.cap.drop"
|
||||||
|
line: "lxc.cap.drop: "
|
||||||
|
state: absent
|
||||||
|
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||||
|
notify: reboot containers
|
||||||
|
|
||||||
|
- name: Remove lxc mounts
|
||||||
|
lineinfile:
|
||||||
|
dest: "{{ item }}"
|
||||||
|
regexp: "^lxc.mount.auto"
|
||||||
|
line: 'lxc.mount.auto: "proc:rw sys:rw"'
|
||||||
|
state: absent
|
||||||
|
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||||
|
notify: reboot containers
|
||||||
24
site.yml
24
site.yml
@@ -1,19 +1,37 @@
|
|||||||
---
|
---
|
||||||
|
|
||||||
|
- hosts: proxmox
|
||||||
|
gather_facts: true
|
||||||
|
become: yes
|
||||||
|
remote_user: "{{ proxmox_lxc_ssh_user }}"
|
||||||
|
roles:
|
||||||
|
- role: proxmox_lxc
|
||||||
|
when: proxmox_lxc_configure
|
||||||
|
|
||||||
- hosts: k3s_cluster
|
- hosts: k3s_cluster
|
||||||
gather_facts: yes
|
gather_facts: yes
|
||||||
become: yes
|
|
||||||
roles:
|
roles:
|
||||||
|
- role: lxc
|
||||||
|
become: true
|
||||||
|
when: proxmox_lxc_configure
|
||||||
- role: prereq
|
- role: prereq
|
||||||
|
become: true
|
||||||
- role: download
|
- role: download
|
||||||
|
become: true
|
||||||
- role: raspberrypi
|
- role: raspberrypi
|
||||||
|
become: true
|
||||||
|
|
||||||
- hosts: master
|
- hosts: master
|
||||||
become: yes
|
|
||||||
roles:
|
roles:
|
||||||
- role: k3s/master
|
- role: k3s/master
|
||||||
|
become: true
|
||||||
|
|
||||||
- hosts: node
|
- hosts: node
|
||||||
become: yes
|
|
||||||
roles:
|
roles:
|
||||||
- role: k3s/node
|
- role: k3s/node
|
||||||
|
become: true
|
||||||
|
|
||||||
|
- hosts: master
|
||||||
|
roles:
|
||||||
|
- role: k3s/post
|
||||||
|
become: true
|
||||||
|
|||||||
8
templates/rc.local.j2
Normal file
8
templates/rc.local.j2
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
# Kubeadm 1.15 needs /dev/kmsg to be there, but it's not in lxc, but we can just use /dev/console instead
|
||||||
|
# see: https://github.com/kubernetes-sigs/kind/issues/662
|
||||||
|
if [ ! -e /dev/kmsg ]; then
|
||||||
|
ln -s /dev/console /dev/kmsg
|
||||||
|
fi
|
||||||
|
|
||||||
|
# https://medium.com/@kvaps/run-kubernetes-in-lxc-container-f04aa94b6c9c
|
||||||
|
mount --make-rshared /
|
||||||
79
vagrant/Vagrantfile
vendored
79
vagrant/Vagrantfile
vendored
@@ -1,79 +0,0 @@
|
|||||||
# -*- mode: ruby -*-
|
|
||||||
# vi: set ft=ruby :
|
|
||||||
|
|
||||||
Vagrant.configure("2") do |config|
|
|
||||||
# General configuration
|
|
||||||
config.vm.box = "generic/ubuntu2110"
|
|
||||||
config.vm.synced_folder ".", "/vagrant", disabled: true
|
|
||||||
config.ssh.insert_key = false
|
|
||||||
|
|
||||||
config.vm.provider :virtualbox do |v|
|
|
||||||
v.memory = 4096
|
|
||||||
v.cpus = 2
|
|
||||||
v.linked_clone = true
|
|
||||||
end
|
|
||||||
|
|
||||||
# Control Node 1
|
|
||||||
config.vm.define "control1" do |control1|
|
|
||||||
control1.vm.hostname = "control1"
|
|
||||||
control1.vm.network "private_network", ip: "192.168.30.38"
|
|
||||||
end
|
|
||||||
|
|
||||||
# Control Node 2
|
|
||||||
config.vm.define "control2" do |control2|
|
|
||||||
control2.vm.hostname = "control2"
|
|
||||||
control2.vm.network "private_network", ip: "192.168.30.39"
|
|
||||||
end
|
|
||||||
|
|
||||||
# Control Node 3
|
|
||||||
config.vm.define "control3" do |control3|
|
|
||||||
control3.vm.hostname = "control3"
|
|
||||||
control3.vm.network "private_network", ip: "192.168.30.40"
|
|
||||||
end
|
|
||||||
|
|
||||||
# Worker Node 1
|
|
||||||
config.vm.define "node1" do |node1|
|
|
||||||
node1.vm.hostname = "node1"
|
|
||||||
node1.vm.network "private_network", ip: "192.168.30.41"
|
|
||||||
end
|
|
||||||
|
|
||||||
# Worker Node 2
|
|
||||||
config.vm.define "node2" do |node2|
|
|
||||||
node2.vm.hostname = "node2"
|
|
||||||
node2.vm.network "private_network", ip: "192.168.30.42"
|
|
||||||
end
|
|
||||||
|
|
||||||
config.vm.provision "ansible",type: "ansible", run: "never" do |ansible|
|
|
||||||
ansible.playbook = "../site.yml"
|
|
||||||
ansible.limit = "all"
|
|
||||||
ansible.groups = {
|
|
||||||
"master" => ["control1", "control2", "control3"],
|
|
||||||
"node" => ["node1", "node2"],
|
|
||||||
"k3s_cluster:children" => ["master", "node"],
|
|
||||||
"k3s_cluster:vars" => {"k3s_version" => "v1.23.4+k3s1",
|
|
||||||
"ansible_user" => "vagrant",
|
|
||||||
"systemd_dir" => "/etc/systemd/system",
|
|
||||||
"flannel_iface" => "eth1",
|
|
||||||
"apiserver_endpoint" => "192.168.30.222",
|
|
||||||
"k3s_token" => "supersecret",
|
|
||||||
"extra_server_args" => "--node-ip={{ ansible_eth1.ipv4.address }} --flannel-iface={{ flannel_iface }} --no-deploy servicelb --no-deploy traefik",
|
|
||||||
"extra_agent_args" => "--flannel-iface={{ flannel_iface }}",
|
|
||||||
"kube_vip_tag_version" => "v0.4.2",
|
|
||||||
"metal_lb_speaker_tag_version" => "v0.12.1",
|
|
||||||
"metal_lb_controller_tag_version" => "v0.12.1",
|
|
||||||
"metal_lb_ip_range" => "192.168.30.80-192.168.30.90",
|
|
||||||
"retry_count" => "30"}
|
|
||||||
}
|
|
||||||
ansible.host_vars = {
|
|
||||||
"control1" => {
|
|
||||||
"server_init_args" => "--cluster-init --token {{ k3s_token }} {{ extra_server_args | default('') }}"
|
|
||||||
},
|
|
||||||
"control2" => {
|
|
||||||
"server_init_args" => "--server https://192.168.30.38:6443 --token {{ k3s_token }} {{ extra_server_args | default('') }}"
|
|
||||||
},
|
|
||||||
"control3" => {
|
|
||||||
"server_init_args" => "--server https://192.168.30.38:6443 --token {{ k3s_token }} {{ extra_server_args | default('') }}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
end
|
|
||||||
end
|
|
||||||
Reference in New Issue
Block a user