mirror of
https://github.com/techno-tim/k3s-ansible.git
synced 2025-12-25 18:23:05 +01:00
Compare commits
179 Commits
v1.24.9+k3
...
dependabot
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3ad9d50f79 | ||
|
|
5cbbf7371b | ||
|
|
422621c69c | ||
|
|
39988a9bee | ||
|
|
133a84b564 | ||
|
|
6b79057f6c | ||
|
|
4c0b1ee8f3 | ||
|
|
11f9505460 | ||
|
|
850301fbc4 | ||
|
|
983e11322e | ||
|
|
a4df16cf87 | ||
|
|
f8ababb7bf | ||
|
|
90eb5e4b41 | ||
|
|
97ed29b4a2 | ||
|
|
fc2225ab8d | ||
|
|
d99f6a96f2 | ||
|
|
fab302fd91 | ||
|
|
eddbcbfb76 | ||
|
|
03ae8de0d5 | ||
|
|
d136fa4486 | ||
|
|
b906cfbf72 | ||
|
|
2c04f38e2c | ||
|
|
3435f43748 | ||
|
|
924a2f528c | ||
|
|
2892ac3858 | ||
|
|
df8e8dd591 | ||
|
|
3a0303d130 | ||
|
|
b077a49e1f | ||
|
|
635f0b21b3 | ||
|
|
4a64ad42df | ||
|
|
d0537736de | ||
|
|
2149827800 | ||
|
|
2d0596209e | ||
|
|
3a20500f9c | ||
|
|
9ce9fecc5b | ||
|
|
668d7fb896 | ||
|
|
6cee0e9051 | ||
|
|
6823ad51d5 | ||
|
|
1a521ea0d9 | ||
|
|
e48bb6df26 | ||
|
|
36893c27fb | ||
|
|
e8cd10d49b | ||
|
|
b86156b995 | ||
|
|
072f1a321d | ||
|
|
2f46a54240 | ||
|
|
bf0418d77f | ||
|
|
d88eb80df0 | ||
|
|
f50d335451 | ||
|
|
d6597150c7 | ||
|
|
353f7ab641 | ||
|
|
c7c727c3dc | ||
|
|
0422bfa2ac | ||
|
|
0333406725 | ||
|
|
f4a19d368b | ||
|
|
02d212c007 | ||
|
|
80095250e9 | ||
|
|
4fe2c92795 | ||
|
|
b3f2a4addc | ||
|
|
cb03ee829e | ||
|
|
9e2e82faeb | ||
|
|
7c1f6cbe42 | ||
|
|
604eb7a6e6 | ||
|
|
a204ed5169 | ||
|
|
b6608ca3e4 | ||
|
|
8252a45dfd | ||
|
|
c99f098c2e | ||
|
|
7867b87d85 | ||
|
|
dfe19f3731 | ||
|
|
a46d97a28d | ||
|
|
dc9d571f17 | ||
|
|
6742551e5c | ||
|
|
fb3478a086 | ||
|
|
518c5bb62a | ||
|
|
3f5d8dfe9f | ||
|
|
efbfadcb93 | ||
|
|
f81ec04ba2 | ||
|
|
8432d3bc66 | ||
|
|
14ae9df1bc | ||
|
|
f175716339 | ||
|
|
955c6f6b4a | ||
|
|
3b74985767 | ||
|
|
9ace193ade | ||
|
|
83a0be3afd | ||
|
|
029eba6102 | ||
|
|
0c8253b3a5 | ||
|
|
326b71dfa2 | ||
|
|
b95d6dd2cc | ||
|
|
e4146b4ca9 | ||
|
|
1fb10faf7f | ||
|
|
ea3b3c776a | ||
|
|
5beca87783 | ||
|
|
6ffc25dfe5 | ||
|
|
bcd37a6904 | ||
|
|
8dd3ffc825 | ||
|
|
f6ba208b5c | ||
|
|
a22d8f7aaf | ||
|
|
05fb6b566d | ||
|
|
3aeb7d69ea | ||
|
|
61bf3971ef | ||
|
|
3f06a11c8d | ||
|
|
3888a29bb1 | ||
|
|
98ef696f31 | ||
|
|
de26a79a4c | ||
|
|
ab7ca9b551 | ||
|
|
c5f71c9e2e | ||
|
|
0f23e7e258 | ||
|
|
121061d875 | ||
|
|
db53f595fd | ||
|
|
7b6b24ce4d | ||
|
|
a5728da35e | ||
|
|
cda7c92203 | ||
|
|
d910b83bf3 | ||
|
|
101313f880 | ||
|
|
12be355867 | ||
|
|
aa09e3e9df | ||
|
|
511c410451 | ||
|
|
df9c6f3014 | ||
|
|
5ae8fd1223 | ||
|
|
e2e9881f0f | ||
|
|
edf0c9eebd | ||
|
|
7669fd4721 | ||
|
|
cddbfc8e40 | ||
|
|
70e658cf98 | ||
|
|
7badfbd7bd | ||
|
|
e880f08d26 | ||
|
|
95b2836dfc | ||
|
|
505c2eeff2 | ||
|
|
9b6d551dd6 | ||
|
|
a64e882fb7 | ||
|
|
38e773315b | ||
|
|
70ddf7b63c | ||
|
|
fb3128a783 | ||
|
|
2e318e0862 | ||
|
|
0607eb8aa4 | ||
|
|
a9904d1562 | ||
|
|
9707bc8a58 | ||
|
|
e635bd2626 | ||
|
|
1aabb5a927 | ||
|
|
215690b55b | ||
|
|
bd44a9b126 | ||
|
|
8d61fe81e5 | ||
|
|
c0ff304f22 | ||
|
|
83077ecdd1 | ||
|
|
33ae0d4970 | ||
|
|
edd4838407 | ||
|
|
5c79ea9b71 | ||
|
|
3d204ad851 | ||
|
|
13bd868faa | ||
|
|
c564a8562a | ||
|
|
0d6d43e7ca | ||
|
|
c0952288c2 | ||
|
|
1c9796e98b | ||
|
|
288c4089e0 | ||
|
|
49f0a2ce6b | ||
|
|
6c4621bd56 | ||
|
|
3e16ab6809 | ||
|
|
83fe50797c | ||
|
|
2db0b3024c | ||
|
|
6b2af77e74 | ||
|
|
d1d1bc3d91 | ||
|
|
3a1a7a19aa | ||
|
|
030eeb4b75 | ||
|
|
4aeeb124ef | ||
|
|
511c020bec | ||
|
|
c47da38b53 | ||
|
|
6448948e9f | ||
|
|
7bc198ab26 | ||
|
|
65bbc8e2ac | ||
|
|
dc2976e7f6 | ||
|
|
5a7ba98968 | ||
|
|
10c6ef1d57 | ||
|
|
ed4d888e3d | ||
|
|
49d6d484ae | ||
|
|
96c49c864e | ||
|
|
60adb1de42 | ||
|
|
e023808f2f | ||
|
|
511ec493d6 | ||
|
|
be3e72e173 | ||
|
|
e33cbe52c1 |
@@ -1,17 +1,21 @@
|
|||||||
---
|
---
|
||||||
|
profile: production
|
||||||
exclude_paths:
|
exclude_paths:
|
||||||
# default paths
|
# default paths
|
||||||
- '.cache/'
|
- .cache/
|
||||||
- '.github/'
|
- .github/
|
||||||
- 'test/fixtures/formatting-before/'
|
- test/fixtures/formatting-before/
|
||||||
- 'test/fixtures/formatting-prettier/'
|
- test/fixtures/formatting-prettier/
|
||||||
|
|
||||||
# The "converge" and "reset" playbooks use import_playbook in
|
# The "converge" and "reset" playbooks use import_playbook in
|
||||||
# conjunction with the "env" lookup plugin, which lets the
|
# conjunction with the "env" lookup plugin, which lets the
|
||||||
# syntax check of ansible-lint fail.
|
# syntax check of ansible-lint fail.
|
||||||
- 'molecule/**/converge.yml'
|
- molecule/**/converge.yml
|
||||||
- 'molecule/**/prepare.yml'
|
- molecule/**/prepare.yml
|
||||||
- 'molecule/**/reset.yml'
|
- molecule/**/reset.yml
|
||||||
|
|
||||||
|
# The file was generated by galaxy ansible - don't mess with it.
|
||||||
|
- galaxy.yml
|
||||||
|
|
||||||
skip_list:
|
skip_list:
|
||||||
- 'fqcn-builtins'
|
- var-naming[no-role-prefix]
|
||||||
|
|||||||
12
.github/ISSUE_TEMPLATE.md
vendored
12
.github/ISSUE_TEMPLATE.md
vendored
@@ -1,5 +1,5 @@
|
|||||||
|
|
||||||
<!-- It's a good idea to check this post first for general troubleshooting https://github.com/techno-tim/k3s-ansible/discussions/19 -->
|
<!-- It's a good idea to check this post first for general troubleshooting https://github.com/timothystewart6/k3s-ansible/discussions/19 -->
|
||||||
|
|
||||||
<!--- Provide a general summary of the issue in the Title above -->
|
<!--- Provide a general summary of the issue in the Title above -->
|
||||||
|
|
||||||
@@ -37,6 +37,11 @@ systemd_dir: ""
|
|||||||
|
|
||||||
flannel_iface: ""
|
flannel_iface: ""
|
||||||
|
|
||||||
|
#calico_iface: ""
|
||||||
|
calico_ebpf: ""
|
||||||
|
calico_cidr: ""
|
||||||
|
calico_tag: ""
|
||||||
|
|
||||||
apiserver_endpoint: ""
|
apiserver_endpoint: ""
|
||||||
|
|
||||||
k3s_token: "NA"
|
k3s_token: "NA"
|
||||||
@@ -46,6 +51,9 @@ extra_agent_args: ""
|
|||||||
|
|
||||||
kube_vip_tag_version: ""
|
kube_vip_tag_version: ""
|
||||||
|
|
||||||
|
kube_vip_cloud_provider_tag_version: ""
|
||||||
|
kube_vip_lb_ip_range: ""
|
||||||
|
|
||||||
metal_lb_speaker_tag_version: ""
|
metal_lb_speaker_tag_version: ""
|
||||||
metal_lb_controller_tag_version: ""
|
metal_lb_controller_tag_version: ""
|
||||||
|
|
||||||
@@ -74,4 +82,4 @@ node
|
|||||||
## Possible Solution
|
## Possible Solution
|
||||||
<!--- Not obligatory, but suggest a fix/reason for the bug, -->
|
<!--- Not obligatory, but suggest a fix/reason for the bug, -->
|
||||||
|
|
||||||
- [ ] I've checked the [General Troubleshooting Guide](https://github.com/techno-tim/k3s-ansible/discussions/20)
|
- [ ] I've checked the [General Troubleshooting Guide](https://github.com/timothystewart6/k3s-ansible/discussions/20)
|
||||||
|
|||||||
15
.github/dependabot.yml
vendored
15
.github/dependabot.yml
vendored
@@ -9,3 +9,18 @@ updates:
|
|||||||
ignore:
|
ignore:
|
||||||
- dependency-name: "*"
|
- dependency-name: "*"
|
||||||
update-types: ["version-update:semver-major"]
|
update-types: ["version-update:semver-major"]
|
||||||
|
|
||||||
|
- package-ecosystem: "github-actions"
|
||||||
|
directory: "/"
|
||||||
|
schedule:
|
||||||
|
interval: "daily"
|
||||||
|
rebase-strategy: "auto"
|
||||||
|
|
||||||
|
- package-ecosystem: "docker"
|
||||||
|
directory: "/"
|
||||||
|
schedule:
|
||||||
|
interval: "daily"
|
||||||
|
rebase-strategy: "auto"
|
||||||
|
ignore:
|
||||||
|
- dependency-name: "*"
|
||||||
|
update-types: ["version-update:semver-major"]
|
||||||
|
|||||||
17
.github/download-boxes.sh
vendored
17
.github/download-boxes.sh
vendored
@@ -9,12 +9,17 @@ set -euo pipefail
|
|||||||
GIT_ROOT=$(git rev-parse --show-toplevel)
|
GIT_ROOT=$(git rev-parse --show-toplevel)
|
||||||
PROVIDER=virtualbox
|
PROVIDER=virtualbox
|
||||||
|
|
||||||
# Read all boxes for all platforms from the "molecule.yml" files
|
yq --version
|
||||||
all_boxes=$(cat "${GIT_ROOT}"/molecule/*/molecule.yml |
|
|
||||||
yq -r '.platforms[].box' | # Read the "box" property of each node under "platforms"
|
# Define the path to the molecule.yml files
|
||||||
grep --invert-match --regexp=--- | # Filter out file separators
|
MOLECULE_YML_PATH="${GIT_ROOT}/molecule/*/molecule.yml"
|
||||||
sort |
|
|
||||||
uniq)
|
# Extract and sort unique boxes from all molecule.yml files
|
||||||
|
all_boxes=$(for file in $MOLECULE_YML_PATH; do
|
||||||
|
yq eval '.platforms[].box' "$file"
|
||||||
|
done | sort -u)
|
||||||
|
|
||||||
|
echo all_boxes: "$all_boxes"
|
||||||
|
|
||||||
# Read the boxes that are currently present on the system (for the current provider)
|
# Read the boxes that are currently present on the system (for the current provider)
|
||||||
present_boxes=$(
|
present_boxes=$(
|
||||||
|
|||||||
42
.github/workflows/cache.yml
vendored
Normal file
42
.github/workflows/cache.yml
vendored
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
---
|
||||||
|
name: "Cache"
|
||||||
|
on:
|
||||||
|
workflow_call:
|
||||||
|
jobs:
|
||||||
|
molecule:
|
||||||
|
name: cache
|
||||||
|
runs-on: self-hosted
|
||||||
|
env:
|
||||||
|
PYTHON_VERSION: "3.11"
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Check out the codebase
|
||||||
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # 4.2.2
|
||||||
|
with:
|
||||||
|
ref: ${{ github.event.pull_request.head.sha }}
|
||||||
|
|
||||||
|
- name: Set up Python ${{ env.PYTHON_VERSION }}
|
||||||
|
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # 6.0.0
|
||||||
|
with:
|
||||||
|
python-version: ${{ env.PYTHON_VERSION }}
|
||||||
|
cache: 'pip' # caching pip dependencies
|
||||||
|
|
||||||
|
- name: Cache Vagrant boxes
|
||||||
|
id: cache-vagrant
|
||||||
|
uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # 4.1.2
|
||||||
|
with:
|
||||||
|
lookup-only: true #if it exists, we don't need to restore and can skip the next step
|
||||||
|
path: |
|
||||||
|
~/.vagrant.d/boxes
|
||||||
|
key: vagrant-boxes-${{ hashFiles('**/molecule.yml') }}
|
||||||
|
restore-keys: |
|
||||||
|
vagrant-boxes
|
||||||
|
|
||||||
|
- name: Download Vagrant boxes for all scenarios
|
||||||
|
# To save some cache space, all scenarios share the same cache key.
|
||||||
|
# On the other hand, this means that the cache contents should be
|
||||||
|
# the same across all scenarios. This step ensures that.
|
||||||
|
if: steps.cache-vagrant.outputs.cache-hit != 'true' # only run if false since this is just a cache step
|
||||||
|
run: |
|
||||||
|
./.github/download-boxes.sh
|
||||||
|
vagrant box list
|
||||||
22
.github/workflows/ci.yml
vendored
22
.github/workflows/ci.yml
vendored
@@ -2,14 +2,26 @@
|
|||||||
name: "CI"
|
name: "CI"
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
push:
|
types:
|
||||||
branches:
|
- opened
|
||||||
- master
|
- synchronize
|
||||||
paths-ignore:
|
paths-ignore:
|
||||||
- '**/README.md'
|
- '**/.gitignore'
|
||||||
|
- '**/FUNDING.yml'
|
||||||
|
- '**/host.ini'
|
||||||
|
- '**/*.md'
|
||||||
|
- '**/.editorconfig'
|
||||||
|
- '**/ansible.example.cfg'
|
||||||
|
- '**/deploy.sh'
|
||||||
|
- '**/LICENSE'
|
||||||
|
- '**/reboot.sh'
|
||||||
|
- '**/reset.sh'
|
||||||
jobs:
|
jobs:
|
||||||
|
pre:
|
||||||
|
uses: ./.github/workflows/cache.yml
|
||||||
lint:
|
lint:
|
||||||
uses: ./.github/workflows/lint.yml
|
uses: ./.github/workflows/lint.yml
|
||||||
|
needs: [pre]
|
||||||
test:
|
test:
|
||||||
uses: ./.github/workflows/test.yml
|
uses: ./.github/workflows/test.yml
|
||||||
needs: [lint]
|
needs: [pre, lint]
|
||||||
|
|||||||
36
.github/workflows/lint.yml
vendored
36
.github/workflows/lint.yml
vendored
@@ -5,37 +5,27 @@ on:
|
|||||||
jobs:
|
jobs:
|
||||||
pre-commit-ci:
|
pre-commit-ci:
|
||||||
name: Pre-Commit
|
name: Pre-Commit
|
||||||
runs-on: ubuntu-latest
|
runs-on: self-hosted
|
||||||
env:
|
env:
|
||||||
PYTHON_VERSION: "3.10"
|
PYTHON_VERSION: "3.11"
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Check out the codebase
|
- name: Check out the codebase
|
||||||
uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v3 2.5.0
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # 4.2.2
|
||||||
with:
|
with:
|
||||||
ref: ${{ github.event.pull_request.head.sha }}
|
ref: ${{ github.event.pull_request.head.sha }}
|
||||||
|
|
||||||
- name: Set up Python ${{ env.PYTHON_VERSION }}
|
- name: Set up Python ${{ env.PYTHON_VERSION }}
|
||||||
uses: actions/setup-python@75f3110429a8c05be0e1bf360334e4cced2b63fa # 2.3.3
|
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # 6.0.0
|
||||||
with:
|
with:
|
||||||
python-version: ${{ env.PYTHON_VERSION }}
|
python-version: ${{ env.PYTHON_VERSION }}
|
||||||
cache: 'pip' # caching pip dependencies
|
cache: 'pip' # caching pip dependencies
|
||||||
|
|
||||||
- name: Cache pip
|
- name: Restore Ansible cache
|
||||||
uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # 3.0.11
|
uses: actions/cache/restore@6849a6489940f00c2f30c0fb92c6274307ccb58a # 4.1.2
|
||||||
with:
|
|
||||||
path: ~/.cache/pip
|
|
||||||
key: ${{ runner.os }}-pip-${{ hashFiles('./requirements.txt') }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-pip-
|
|
||||||
|
|
||||||
- name: Cache Ansible
|
|
||||||
uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # 3.0.11
|
|
||||||
with:
|
with:
|
||||||
path: ~/.ansible/collections
|
path: ~/.ansible/collections
|
||||||
key: ${{ runner.os }}-ansible-${{ hashFiles('collections/requirements.txt') }}
|
key: ansible-${{ hashFiles('collections/requirements.yml') }}
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-ansible-
|
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
@@ -47,21 +37,17 @@ jobs:
|
|||||||
python3 -m pip install -r requirements.txt
|
python3 -m pip install -r requirements.txt
|
||||||
echo "::endgroup::"
|
echo "::endgroup::"
|
||||||
|
|
||||||
echo "::group::Install Ansible role requirements from collections/requirements.yml"
|
|
||||||
ansible-galaxy install -r collections/requirements.yml
|
|
||||||
echo "::endgroup::"
|
|
||||||
|
|
||||||
- name: Run pre-commit
|
- name: Run pre-commit
|
||||||
uses: pre-commit/action@646c83fcd040023954eafda54b4db0192ce70507 # 3.0.0
|
uses: pre-commit/action@2c7b3805fd2a0fd8c1884dcaebf91fc102a13ecd # 3.0.1
|
||||||
|
|
||||||
ensure-pinned-actions:
|
ensure-pinned-actions:
|
||||||
name: Ensure SHA Pinned Actions
|
name: Ensure SHA Pinned Actions
|
||||||
runs-on: ubuntu-latest
|
runs-on: self-hosted
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v3 2.5.0
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # 4.2.2
|
||||||
- name: Ensure SHA pinned actions
|
- name: Ensure SHA pinned actions
|
||||||
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@af2eb3226618e2494e3d9084f515ad6dcf16e229 # 2.0.1
|
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@38608ef4fb69adae7f1eac6eeb88e67b7d083bfd # 3.0.16
|
||||||
with:
|
with:
|
||||||
allowlist: |
|
allowlist: |
|
||||||
aws-actions/
|
aws-actions/
|
||||||
|
|||||||
103
.github/workflows/test.yml
vendored
103
.github/workflows/test.yml
vendored
@@ -5,23 +5,51 @@ on:
|
|||||||
jobs:
|
jobs:
|
||||||
molecule:
|
molecule:
|
||||||
name: Molecule
|
name: Molecule
|
||||||
runs-on: macos-12
|
runs-on: self-hosted
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
scenario:
|
scenario:
|
||||||
- default
|
- default
|
||||||
- ipv6
|
# - ipv6
|
||||||
- single_node
|
- single_node
|
||||||
|
- calico
|
||||||
|
- cilium
|
||||||
|
- kube-vip
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
env:
|
env:
|
||||||
PYTHON_VERSION: "3.10"
|
PYTHON_VERSION: "3.11"
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Check out the codebase
|
- name: Check out the codebase
|
||||||
uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v3 2.5.0
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # 4.2.2
|
||||||
with:
|
with:
|
||||||
ref: ${{ github.event.pull_request.head.sha }}
|
ref: ${{ github.event.pull_request.head.sha }}
|
||||||
|
|
||||||
|
# these steps are necessary if not using ephemeral nodes
|
||||||
|
- name: Delete old Vagrant box versions
|
||||||
|
if: always() # do this even if a step before has failed
|
||||||
|
run: vagrant box prune --force
|
||||||
|
|
||||||
|
- name: Remove all local Vagrant boxes
|
||||||
|
if: always() # do this even if a step before has failed
|
||||||
|
run: if vagrant box list 2>/dev/null; then vagrant box list | cut -f 1 -d ' ' | xargs -L 1 vagrant box remove -f 2>/dev/null && echo "All Vagrant boxes removed." || echo "No Vagrant boxes found."; else echo "No Vagrant boxes found."; fi
|
||||||
|
|
||||||
|
- name: Remove all Virtualbox VMs
|
||||||
|
if: always() # do this even if a step before has failed
|
||||||
|
run: VBoxManage list vms | awk -F'"' '{print $2}' | xargs -I {} VBoxManage unregistervm --delete "{}"
|
||||||
|
|
||||||
|
- name: Remove all Virtualbox HDs
|
||||||
|
if: always() # do this even if a step before has failed
|
||||||
|
run: VBoxManage list hdds | awk -F':' '/^UUID:/ {print $2}' | xargs -I {} VBoxManage closemedium disk "{}" --delete
|
||||||
|
|
||||||
|
- name: Remove all Virtualbox Networks
|
||||||
|
if: always() # do this even if a step before has failed
|
||||||
|
run: VBoxManage list hostonlyifs | grep '^Name:' | awk '{print $2}' | grep '^vboxnet' | xargs -I {} VBoxManage hostonlyif remove {}
|
||||||
|
|
||||||
|
- name: Remove Virtualbox network config
|
||||||
|
if: always() # do this even if a step before has failed
|
||||||
|
run: sudo rm /etc/vbox/networks.conf || true
|
||||||
|
|
||||||
- name: Configure VirtualBox
|
- name: Configure VirtualBox
|
||||||
run: |-
|
run: |-
|
||||||
sudo mkdir -p /etc/vbox
|
sudo mkdir -p /etc/vbox
|
||||||
@@ -30,35 +58,19 @@ jobs:
|
|||||||
* fdad:bad:ba55::/64
|
* fdad:bad:ba55::/64
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
- name: Cache pip
|
|
||||||
uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # 3.0.11
|
|
||||||
with:
|
|
||||||
path: ~/.cache/pip
|
|
||||||
key: ${{ runner.os }}-pip-${{ hashFiles('./requirements.txt') }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-pip-
|
|
||||||
|
|
||||||
- name: Cache Vagrant boxes
|
|
||||||
uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # 3.0.11
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/.vagrant.d/boxes
|
|
||||||
key: vagrant-boxes-${{ hashFiles('**/molecule.yml') }}
|
|
||||||
restore-keys: |
|
|
||||||
vagrant-boxes
|
|
||||||
|
|
||||||
- name: Download Vagrant boxes for all scenarios
|
|
||||||
# To save some cache space, all scenarios share the same cache key.
|
|
||||||
# On the other hand, this means that the cache contents should be
|
|
||||||
# the same across all scenarios. This step ensures that.
|
|
||||||
run: ./.github/download-boxes.sh
|
|
||||||
|
|
||||||
- name: Set up Python ${{ env.PYTHON_VERSION }}
|
- name: Set up Python ${{ env.PYTHON_VERSION }}
|
||||||
uses: actions/setup-python@75f3110429a8c05be0e1bf360334e4cced2b63fa # 2.3.3
|
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # 6.0.0
|
||||||
with:
|
with:
|
||||||
python-version: ${{ env.PYTHON_VERSION }}
|
python-version: ${{ env.PYTHON_VERSION }}
|
||||||
cache: 'pip' # caching pip dependencies
|
cache: 'pip' # caching pip dependencies
|
||||||
|
|
||||||
|
- name: Restore vagrant Boxes cache
|
||||||
|
uses: actions/cache/restore@6849a6489940f00c2f30c0fb92c6274307ccb58a # 4.1.2
|
||||||
|
with:
|
||||||
|
path: ~/.vagrant.d/boxes
|
||||||
|
key: vagrant-boxes-${{ hashFiles('**/molecule.yml') }}
|
||||||
|
fail-on-cache-miss: true
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
echo "::group::Upgrade pip"
|
echo "::group::Upgrade pip"
|
||||||
@@ -71,21 +83,44 @@ jobs:
|
|||||||
|
|
||||||
- name: Test with molecule
|
- name: Test with molecule
|
||||||
run: molecule test --scenario-name ${{ matrix.scenario }}
|
run: molecule test --scenario-name ${{ matrix.scenario }}
|
||||||
|
timeout-minutes: 90
|
||||||
env:
|
env:
|
||||||
ANSIBLE_K3S_LOG_DIR: ${{ runner.temp }}/logs/k3s-ansible/${{ matrix.scenario }}
|
ANSIBLE_K3S_LOG_DIR: ${{ runner.temp }}/logs/k3s-ansible/${{ matrix.scenario }}
|
||||||
ANSIBLE_SSH_RETRIES: 4
|
ANSIBLE_SSH_RETRIES: 4
|
||||||
ANSIBLE_TIMEOUT: 60
|
ANSIBLE_TIMEOUT: 120
|
||||||
PY_COLORS: 1
|
PY_COLORS: 1
|
||||||
ANSIBLE_FORCE_COLOR: 1
|
ANSIBLE_FORCE_COLOR: 1
|
||||||
|
|
||||||
|
# these steps are necessary if not using ephemeral nodes
|
||||||
|
- name: Delete old Vagrant box versions
|
||||||
|
if: always() # do this even if a step before has failed
|
||||||
|
run: vagrant box prune --force
|
||||||
|
|
||||||
|
- name: Remove all local Vagrant boxes
|
||||||
|
if: always() # do this even if a step before has failed
|
||||||
|
run: if vagrant box list 2>/dev/null; then vagrant box list | cut -f 1 -d ' ' | xargs -L 1 vagrant box remove -f 2>/dev/null && echo "All Vagrant boxes removed." || echo "No Vagrant boxes found."; else echo "No Vagrant boxes found."; fi
|
||||||
|
|
||||||
|
- name: Remove all Virtualbox VMs
|
||||||
|
if: always() # do this even if a step before has failed
|
||||||
|
run: VBoxManage list vms | awk -F'"' '{print $2}' | xargs -I {} VBoxManage unregistervm --delete "{}"
|
||||||
|
|
||||||
|
- name: Remove all Virtualbox HDs
|
||||||
|
if: always() # do this even if a step before has failed
|
||||||
|
run: VBoxManage list hdds | awk -F':' '/^UUID:/ {print $2}' | xargs -I {} VBoxManage closemedium disk "{}" --delete
|
||||||
|
|
||||||
|
- name: Remove all Virtualbox Networks
|
||||||
|
if: always() # do this even if a step before has failed
|
||||||
|
run: VBoxManage list hostonlyifs | grep '^Name:' | awk '{print $2}' | grep '^vboxnet' | xargs -I {} VBoxManage hostonlyif remove {}
|
||||||
|
|
||||||
|
- name: Remove Virtualbox network config
|
||||||
|
if: always() # do this even if a step before has failed
|
||||||
|
run: sudo rm /etc/vbox/networks.conf || true
|
||||||
|
|
||||||
- name: Upload log files
|
- name: Upload log files
|
||||||
if: always() # do this even if a step before has failed
|
if: always() # do this even if a step before has failed
|
||||||
uses: actions/upload-artifact@83fd05a356d7e2593de66fc9913b3002723633cb # 3.1.1
|
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # 4.4.3
|
||||||
with:
|
with:
|
||||||
name: logs
|
name: logs
|
||||||
path: |
|
path: |
|
||||||
${{ runner.temp }}/logs
|
${{ runner.temp }}/logs
|
||||||
|
overwrite: true
|
||||||
- name: Delete old box versions
|
|
||||||
if: always() # do this even if a step before has failed
|
|
||||||
run: vagrant box prune --force
|
|
||||||
|
|||||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1,2 +1,4 @@
|
|||||||
.env/
|
.env/
|
||||||
*.log
|
*.log
|
||||||
|
ansible.cfg
|
||||||
|
kubeconfig
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
repos:
|
repos:
|
||||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||||
rev: 3298ddab3c13dd77d6ce1fc0baf97691430d84b0 # v4.3.0
|
rev: v4.5.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: requirements-txt-fixer
|
- id: requirements-txt-fixer
|
||||||
- id: sort-simple-yaml
|
- id: sort-simple-yaml
|
||||||
@@ -12,24 +12,24 @@ repos:
|
|||||||
- id: trailing-whitespace
|
- id: trailing-whitespace
|
||||||
args: [--markdown-linebreak-ext=md]
|
args: [--markdown-linebreak-ext=md]
|
||||||
- repo: https://github.com/adrienverge/yamllint.git
|
- repo: https://github.com/adrienverge/yamllint.git
|
||||||
rev: 9cce2940414e9560ae4c8518ddaee2ac1863a4d2 # v1.28.0
|
rev: v1.33.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: yamllint
|
- id: yamllint
|
||||||
args: [-c=.yamllint]
|
args: [-c=.yamllint]
|
||||||
- repo: https://github.com/ansible-community/ansible-lint.git
|
- repo: https://github.com/ansible-community/ansible-lint.git
|
||||||
rev: a058554b9bcf88f12ad09ab9fb93b267a214368f # v6.8.6
|
rev: v6.22.2
|
||||||
hooks:
|
hooks:
|
||||||
- id: ansible-lint
|
- id: ansible-lint
|
||||||
- repo: https://github.com/shellcheck-py/shellcheck-py
|
- repo: https://github.com/shellcheck-py/shellcheck-py
|
||||||
rev: 4c7c3dd7161ef39e984cb295e93a968236dc8e8a # v0.8.0.4
|
rev: v0.9.0.6
|
||||||
hooks:
|
hooks:
|
||||||
- id: shellcheck
|
- id: shellcheck
|
||||||
- repo: https://github.com/Lucas-C/pre-commit-hooks
|
- repo: https://github.com/Lucas-C/pre-commit-hooks
|
||||||
rev: 04618e68aa2380828a36a23ff5f65a06ae8f59b9 # v1.3.1
|
rev: v1.5.4
|
||||||
hooks:
|
hooks:
|
||||||
- id: remove-crlf
|
- id: remove-crlf
|
||||||
- id: remove-tabs
|
- id: remove-tabs
|
||||||
- repo: https://github.com/sirosen/texthooks
|
- repo: https://github.com/sirosen/texthooks
|
||||||
rev: 30d9af95631de0d7cff4e282bde9160d38bb0359 # 0.4.0
|
rev: 0.6.4
|
||||||
hooks:
|
hooks:
|
||||||
- id: fix-smartquotes
|
- id: fix-smartquotes
|
||||||
|
|||||||
13
.yamllint
13
.yamllint
@@ -2,8 +2,19 @@
|
|||||||
extends: default
|
extends: default
|
||||||
|
|
||||||
rules:
|
rules:
|
||||||
|
comments:
|
||||||
|
min-spaces-from-content: 1
|
||||||
|
comments-indentation: false
|
||||||
|
braces:
|
||||||
|
max-spaces-inside: 1
|
||||||
|
octal-values:
|
||||||
|
forbid-implicit-octal: true
|
||||||
|
forbid-explicit-octal: true
|
||||||
line-length:
|
line-length:
|
||||||
max: 120
|
max: 120
|
||||||
level: warning
|
level: warning
|
||||||
truthy:
|
truthy:
|
||||||
allowed-values: ['true', 'false', 'yes', 'no']
|
allowed-values: ["true", "false"]
|
||||||
|
|
||||||
|
ignore:
|
||||||
|
- galaxy.yml
|
||||||
|
|||||||
122
README.md
122
README.md
@@ -4,11 +4,11 @@
|
|||||||
|
|
||||||
This playbook will build an HA Kubernetes cluster with `k3s`, `kube-vip` and MetalLB via `ansible`.
|
This playbook will build an HA Kubernetes cluster with `k3s`, `kube-vip` and MetalLB via `ansible`.
|
||||||
|
|
||||||
This is based on the work from [this fork](https://github.com/212850a/k3s-ansible) which is based on the work from [k3s-io/k3s-ansible](https://github.com/k3s-io/k3s-ansible). It uses [kube-vip](https://kube-vip.chipzoller.dev/) to create a load balancer for control plane, and [metal-lb](https://metallb.universe.tf/installation/) for its service `LoadBalancer`.
|
This is based on the work from [this fork](https://github.com/212850a/k3s-ansible) which is based on the work from [k3s-io/k3s-ansible](https://github.com/k3s-io/k3s-ansible). It uses [kube-vip](https://kube-vip.io/) to create a load balancer for control plane, and [metal-lb](https://metallb.universe.tf/installation/) for its service `LoadBalancer`.
|
||||||
|
|
||||||
If you want more context on how this works, see:
|
If you want more context on how this works, see:
|
||||||
|
|
||||||
📄 [Documentation](https://docs.technotim.live/posts/k3s-etcd-ansible/) (including example commands)
|
📄 [Documentation](https://technotim.live/posts/k3s-etcd-ansible/) (including example commands)
|
||||||
|
|
||||||
📺 [Watch the Video](https://www.youtube.com/watch?v=CbkEWcUZ7zM)
|
📺 [Watch the Video](https://www.youtube.com/watch?v=CbkEWcUZ7zM)
|
||||||
|
|
||||||
@@ -28,7 +28,7 @@ on processor architecture:
|
|||||||
|
|
||||||
## ✅ System requirements
|
## ✅ System requirements
|
||||||
|
|
||||||
- Deployment environment must have Ansible 2.4.0+. If you need a quick primer on Ansible [you can check out my docs and setting up Ansible](https://docs.technotim.live/posts/ansible-automation/).
|
- Control Node (the machine you are running `ansible` commands) must have Ansible 2.11+ If you need a quick primer on Ansible [you can check out my docs and setting up Ansible](https://technotim.live/posts/ansible-automation/).
|
||||||
|
|
||||||
- You will also need to install collections that this playbook uses by running `ansible-galaxy collection install -r ./collections/requirements.yml` (important❗)
|
- You will also need to install collections that this playbook uses by running `ansible-galaxy collection install -r ./collections/requirements.yml` (important❗)
|
||||||
|
|
||||||
@@ -67,6 +67,8 @@ node
|
|||||||
|
|
||||||
If multiple hosts are in the master group, the playbook will automatically set up k3s in [HA mode with etcd](https://rancher.com/docs/k3s/latest/en/installation/ha-embedded/).
|
If multiple hosts are in the master group, the playbook will automatically set up k3s in [HA mode with etcd](https://rancher.com/docs/k3s/latest/en/installation/ha-embedded/).
|
||||||
|
|
||||||
|
Finally, copy `ansible.example.cfg` to `ansible.cfg` and adapt the inventory path to match the files that you just created.
|
||||||
|
|
||||||
This requires at least k3s version `1.19.1` however the version is configurable by using the `k3s_version` variable.
|
This requires at least k3s version `1.19.1` however the version is configurable by using the `k3s_version` variable.
|
||||||
|
|
||||||
If needed, you can also edit `inventory/my-cluster/group_vars/all.yml` to match your environment.
|
If needed, you can also edit `inventory/my-cluster/group_vars/all.yml` to match your environment.
|
||||||
@@ -94,16 +96,102 @@ ansible-playbook reset.yml -i inventory/my-cluster/hosts.ini
|
|||||||
To copy your `kube config` locally so that you can access your **Kubernetes** cluster run:
|
To copy your `kube config` locally so that you can access your **Kubernetes** cluster run:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
scp debian@master_ip:~/.kube/config ~/.kube/config
|
scp debian@master_ip:/etc/rancher/k3s/k3s.yaml ~/.kube/config
|
||||||
```
|
```
|
||||||
|
If you get file Permission denied, go into the node and temporarly run:
|
||||||
|
```bash
|
||||||
|
sudo chmod 777 /etc/rancher/k3s/k3s.yaml
|
||||||
|
```
|
||||||
|
Then copy with the scp command and reset the permissions back to:
|
||||||
|
```bash
|
||||||
|
sudo chmod 600 /etc/rancher/k3s/k3s.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
You'll then want to modify the config to point to master IP by running:
|
||||||
|
```bash
|
||||||
|
sudo nano ~/.kube/config
|
||||||
|
```
|
||||||
|
Then change `server: https://127.0.0.1:6443` to match your master IP: `server: https://192.168.1.222:6443`
|
||||||
|
|
||||||
### 🔨 Testing your cluster
|
### 🔨 Testing your cluster
|
||||||
|
|
||||||
See the commands [here](https://docs.technotim.live/posts/k3s-etcd-ansible/#testing-your-cluster).
|
See the commands [here](https://technotim.live/posts/k3s-etcd-ansible/#testing-your-cluster).
|
||||||
|
|
||||||
|
### Variables
|
||||||
|
|
||||||
|
| Role(s) | Variable | Type | Default | Required | Description |
|
||||||
|
|---|---|---|---|---|---|
|
||||||
|
| `download` | `k3s_version` | string | ❌ | Required | K3s binaries version |
|
||||||
|
| `k3s_agent`, `k3s_server`, `k3s_server_post` | `apiserver_endpoint` | string | ❌ | Required | Virtual ip-address configured on each master |
|
||||||
|
| `k3s_agent` | `extra_agent_args` | string | `null` | Not required | Extra arguments for agents nodes |
|
||||||
|
| `k3s_agent`, `k3s_server` | `group_name_master` | string | `null` | Not required | Name othe master group |
|
||||||
|
| `k3s_agent` | `k3s_token` | string | `null` | Not required | Token used to communicate between masters |
|
||||||
|
| `k3s_agent`, `k3s_server` | `proxy_env` | dict | `null` | Not required | Internet proxy configurations |
|
||||||
|
| `k3s_agent`, `k3s_server` | `proxy_env.HTTP_PROXY` | string | ❌ | Required | HTTP internet proxy |
|
||||||
|
| `k3s_agent`, `k3s_server` | `proxy_env.HTTPS_PROXY` | string | ❌ | Required | HTTP internet proxy |
|
||||||
|
| `k3s_agent`, `k3s_server` | `proxy_env.NO_PROXY` | string | ❌ | Required | Addresses that will not use the proxies |
|
||||||
|
| `k3s_agent`, `k3s_server`, `reset` | `systemd_dir` | string | `/etc/systemd/system` | Not required | Path to systemd services |
|
||||||
|
| `k3s_custom_registries` | `custom_registries_yaml` | string | ❌ | Required | YAML block defining custom registries. The following is an example that pulls all images used in this playbook through your private registries. It also allows you to pull your own images from your private registry, without having to use imagePullSecrets in your deployments. If all you need is your own images and you don't care about caching the docker/quay/ghcr.io images, you can just remove those from the mirrors: section. |
|
||||||
|
| `k3s_server`, `k3s_server_post` | `cilium_bgp` | bool | `~` | Not required | Enable cilium BGP control plane for LB services and pod cidrs. Disables the use of MetalLB. |
|
||||||
|
| `k3s_server`, `k3s_server_post` | `cilium_iface` | string | ❌ | Not required | The network interface used for when Cilium is enabled |
|
||||||
|
| `k3s_server` | `extra_server_args` | string | `""` | Not required | Extra arguments for server nodes |
|
||||||
|
| `k3s_server` | `k3s_create_kubectl_symlink` | bool | `false` | Not required | Create the kubectl -> k3s symlink |
|
||||||
|
| `k3s_server` | `k3s_create_crictl_symlink` | bool | `true` | Not required | Create the crictl -> k3s symlink |
|
||||||
|
| `k3s_server` | `kube_vip_arp` | bool | `true` | Not required | Enables kube-vip ARP broadcasts |
|
||||||
|
| `k3s_server` | `kube_vip_bgp` | bool | `false` | Not required | Enables kube-vip BGP peering |
|
||||||
|
| `k3s_server` | `kube_vip_bgp_routerid` | string | `"127.0.0.1"` | Not required | Defines the router ID for the kube-vip BGP server |
|
||||||
|
| `k3s_server` | `kube_vip_bgp_as` | string | `"64513"` | Not required | Defines the AS for the kube-vip BGP server |
|
||||||
|
| `k3s_server` | `kube_vip_bgp_peeraddress` | string | `"192.168.30.1"` | Not required | Defines the address for the kube-vip BGP peer |
|
||||||
|
| `k3s_server` | `kube_vip_bgp_peeras` | string | `"64512"` | Not required | Defines the AS for the kube-vip BGP peer |
|
||||||
|
| `k3s_server` | `kube_vip_bgp_peers` | list | `[]` | Not required | List of BGP peer ASN & address pairs |
|
||||||
|
| `k3s_server` | `kube_vip_bgp_peers_groups` | list | `['k3s_master']` | Not required | Inventory group in which to search for additional `kube_vip_bgp_peers` parameters to merge. |
|
||||||
|
| `k3s_server` | `kube_vip_iface` | string | `~` | Not required | Explicitly define an interface that ALL control nodes should use to propagate the VIP, define it here. Otherwise, kube-vip will determine the right interface automatically at runtime. |
|
||||||
|
| `k3s_server` | `kube_vip_tag_version` | string | `v0.7.2` | Not required | Image tag for kube-vip |
|
||||||
|
| `k3s_server` | `kube_vip_cloud_provider_tag_version` | string | `main` | Not required | Tag for kube-vip-cloud-provider manifest when enable |
|
||||||
|
| `k3s_server`, `k3_server_post` | `kube_vip_lb_ip_range` | string | `~` | Not required | IP range for kube-vip load balancer |
|
||||||
|
| `k3s_server`, `k3s_server_post` | `metal_lb_controller_tag_version` | string | `v0.14.3` | Not required | Image tag for MetalLB |
|
||||||
|
| `k3s_server` | `metal_lb_speaker_tag_version` | string | `v0.14.3` | Not required | Image tag for MetalLB |
|
||||||
|
| `k3s_server` | `metal_lb_type` | string | `native` | Not required | Use FRR mode or native. Valid values are `frr` and `native` |
|
||||||
|
| `k3s_server` | `retry_count` | int | `20` | Not required | Amount of retries when verifying that nodes joined |
|
||||||
|
| `k3s_server` | `server_init_args` | string | ❌ | Not required | Arguments for server nodes |
|
||||||
|
| `k3s_server_post` | `bpf_lb_algorithm` | string | `maglev` | Not required | BPF lb algorithm |
|
||||||
|
| `k3s_server_post` | `bpf_lb_mode` | string | `hybrid` | Not required | BPF lb mode |
|
||||||
|
| `k3s_server_post` | `calico_blocksize` | int | `26` | Not required | IP pool block size |
|
||||||
|
| `k3s_server_post` | `calico_ebpf` | bool | `false` | Not required | Use eBPF dataplane instead of iptables |
|
||||||
|
| `k3s_server_post` | `calico_encapsulation` | string | `VXLANCrossSubnet` | Not required | IP pool encapsulation |
|
||||||
|
| `k3s_server_post` | `calico_natOutgoing` | string | `Enabled` | Not required | IP pool NAT outgoing |
|
||||||
|
| `k3s_server_post` | `calico_nodeSelector` | string | `all()` | Not required | IP pool node selector |
|
||||||
|
| `k3s_server_post` | `calico_iface` | string | `~` | Not required | The network interface used for when Calico is enabled |
|
||||||
|
| `k3s_server_post` | `calico_tag` | string | `v3.27.2` | Not required | Calico version tag |
|
||||||
|
| `k3s_server_post` | `cilium_bgp_my_asn` | int | `64513` | Not required | Local ASN for BGP peer |
|
||||||
|
| `k3s_server_post` | `cilium_bgp_peer_asn` | int | `64512` | Not required | BGP peer ASN |
|
||||||
|
| `k3s_server_post` | `cilium_bgp_peer_address` | string | `~` | Not required | BGP peer address |
|
||||||
|
| `k3s_server_post` | `cilium_bgp_neighbors` | list | `[]` | Not required | List of BGP peer ASN & address pairs |
|
||||||
|
| `k3s_server_post` | `cilium_bgp_neighbors_groups` | list | `['k3s_all']` | Not required | Inventory group in which to search for additional `cilium_bgp_neighbors` parameters to merge. |
|
||||||
|
| `k3s_server_post` | `cilium_bgp_lb_cidr` | string | `192.168.31.0/24` | Not required | BGP load balancer IP range |
|
||||||
|
| `k3s_server_post` | `cilium_exportPodCIDR` | bool | `true` | Not required | Export pod CIDR |
|
||||||
|
| `k3s_server_post` | `cilium_hubble` | bool | `true` | Not required | Enable Cilium Hubble |
|
||||||
|
| `k3s_server_post` | `cilium_hubble` | bool | `true` | Not required | Enable Cilium Hubble |
|
||||||
|
| `k3s_server_post` | `cilium_mode` | string | `native` | Not required | Inner-node communication mode (choices are `native` and `routed`) |
|
||||||
|
| `k3s_server_post` | `cluster_cidr` | string | `10.52.0.0/16` | Not required | Inner-cluster IP range |
|
||||||
|
| `k3s_server_post` | `enable_bpf_masquerade` | bool | `true` | Not required | Use IP masquerading |
|
||||||
|
| `k3s_server_post` | `kube_proxy_replacement` | bool | `true` | Not required | Replace the native kube-proxy with Cilium |
|
||||||
|
| `k3s_server_post` | `metal_lb_available_timeout` | string | `240s` | Not required | Wait for MetalLB resources |
|
||||||
|
| `k3s_server_post` | `metal_lb_ip_range` | string | `192.168.30.80-192.168.30.90` | Not required | MetalLB ip range for load balancer |
|
||||||
|
| `k3s_server_post` | `metal_lb_controller_tag_version` | string | `v0.14.3` | Not required | Image tag for MetalLB |
|
||||||
|
| `k3s_server_post` | `metal_lb_mode` | string | `layer2` | Not required | Metallb mode (choices are `bgp` and `layer2`) |
|
||||||
|
| `k3s_server_post` | `metal_lb_bgp_my_asn` | string | `~` | Not required | BGP ASN configurations |
|
||||||
|
| `k3s_server_post` | `metal_lb_bgp_peer_asn` | string | `~` | Not required | BGP peer ASN configurations |
|
||||||
|
| `k3s_server_post` | `metal_lb_bgp_peer_address` | string | `~` | Not required | BGP peer address |
|
||||||
|
| `lxc` | `custom_reboot_command` | string | `~` | Not required | Command to run on reboot |
|
||||||
|
| `prereq` | `system_timezone` | string | `null` | Not required | Timezone to be set on all nodes |
|
||||||
|
| `proxmox_lxc`, `reset_proxmox_lxc` | `proxmox_lxc_ct_ids` | list | ❌ | Required | Proxmox container ID list |
|
||||||
|
| `raspberrypi` | `state` | string | `present` | Not required | Indicates whether the k3s prerequisites for Raspberry Pi should be set up (possible values are `present` and `absent`) |
|
||||||
|
|
||||||
|
|
||||||
### Troubleshooting
|
### Troubleshooting
|
||||||
|
|
||||||
Be sure to see [this post](https://github.com/techno-tim/k3s-ansible/discussions/20) on how to troubleshoot common problems
|
Be sure to see [this post](https://github.com/timothystewart6/k3s-ansible/discussions/20) on how to troubleshoot common problems
|
||||||
|
|
||||||
### Testing the playbook using molecule
|
### Testing the playbook using molecule
|
||||||
|
|
||||||
@@ -116,6 +204,28 @@ You can find more information about it [here](molecule/README.md).
|
|||||||
|
|
||||||
This repo uses `pre-commit` and `pre-commit-hooks` to lint and fix common style and syntax errors. Be sure to install python packages and then run `pre-commit install`. For more information, see [pre-commit](https://pre-commit.com/)
|
This repo uses `pre-commit` and `pre-commit-hooks` to lint and fix common style and syntax errors. Be sure to install python packages and then run `pre-commit install`. For more information, see [pre-commit](https://pre-commit.com/)
|
||||||
|
|
||||||
|
## 🌌 Ansible Galaxy
|
||||||
|
|
||||||
|
This collection can now be used in larger ansible projects.
|
||||||
|
|
||||||
|
Instructions:
|
||||||
|
|
||||||
|
- create or modify a file `collections/requirements.yml` in your project
|
||||||
|
|
||||||
|
```yml
|
||||||
|
collections:
|
||||||
|
- name: ansible.utils
|
||||||
|
- name: community.general
|
||||||
|
- name: ansible.posix
|
||||||
|
- name: kubernetes.core
|
||||||
|
- name: https://github.com/timothystewart6/k3s-ansible.git
|
||||||
|
type: git
|
||||||
|
version: master
|
||||||
|
```
|
||||||
|
|
||||||
|
- install via `ansible-galaxy collection install -r ./collections/requirements.yml`
|
||||||
|
- every role is now available via the prefix `techno_tim.k3s_ansible.` e.g. `techno_tim.k3s_ansible.lxc`
|
||||||
|
|
||||||
## Thanks 🤝
|
## Thanks 🤝
|
||||||
|
|
||||||
This repo is really standing on the shoulders of giants. Thank you to all those who have contributed and thanks to these repos for code and ideas:
|
This repo is really standing on the shoulders of giants. Thank you to all those who have contributed and thanks to these repos for code and ideas:
|
||||||
|
|||||||
23
ansible.cfg
23
ansible.cfg
@@ -1,23 +0,0 @@
|
|||||||
[defaults]
|
|
||||||
nocows = True
|
|
||||||
roles_path = ./roles
|
|
||||||
inventory = ./hosts.ini
|
|
||||||
stdout_callback = yaml
|
|
||||||
|
|
||||||
remote_tmp = $HOME/.ansible/tmp
|
|
||||||
local_tmp = $HOME/.ansible/tmp
|
|
||||||
timeout = 60
|
|
||||||
host_key_checking = False
|
|
||||||
deprecation_warnings = False
|
|
||||||
callbacks_enabled = profile_tasks
|
|
||||||
log_path = ./ansible.log
|
|
||||||
|
|
||||||
[privilege_escalation]
|
|
||||||
become = True
|
|
||||||
|
|
||||||
[ssh_connection]
|
|
||||||
scp_if_ssh = smart
|
|
||||||
retries = 3
|
|
||||||
ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o Compression=yes -o ServerAliveInterval=15s
|
|
||||||
pipelining = True
|
|
||||||
control_path = %(directory)s/%%h-%%r
|
|
||||||
2
ansible.example.cfg
Normal file
2
ansible.example.cfg
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
[defaults]
|
||||||
|
inventory = inventory/my-cluster/hosts.ini ; Adapt this to the path to your inventory file
|
||||||
@@ -1,3 +1,3 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
ansible-playbook site.yml -i inventory/my-cluster/hosts.ini
|
ansible-playbook site.yml
|
||||||
|
|||||||
81
galaxy.yml
Normal file
81
galaxy.yml
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
### REQUIRED
|
||||||
|
# The namespace of the collection. This can be a company/brand/organization or product namespace under which all
|
||||||
|
# content lives. May only contain alphanumeric lowercase characters and underscores. Namespaces cannot start with
|
||||||
|
# underscores or numbers and cannot contain consecutive underscores
|
||||||
|
namespace: techno_tim
|
||||||
|
|
||||||
|
# The name of the collection. Has the same character restrictions as 'namespace'
|
||||||
|
name: k3s_ansible
|
||||||
|
|
||||||
|
# The version of the collection. Must be compatible with semantic versioning
|
||||||
|
version: 1.0.0
|
||||||
|
|
||||||
|
# The path to the Markdown (.md) readme file. This path is relative to the root of the collection
|
||||||
|
readme: README.md
|
||||||
|
|
||||||
|
# A list of the collection's content authors. Can be just the name or in the format 'Full Name <email> (url)
|
||||||
|
# @nicks:irc/im.site#channel'
|
||||||
|
authors:
|
||||||
|
- your name <example@domain.com>
|
||||||
|
|
||||||
|
|
||||||
|
### OPTIONAL but strongly recommended
|
||||||
|
# A short summary description of the collection
|
||||||
|
description: >
|
||||||
|
The easiest way to bootstrap a self-hosted High Availability Kubernetes
|
||||||
|
cluster. A fully automated HA k3s etcd install with kube-vip, MetalLB,
|
||||||
|
and more.
|
||||||
|
|
||||||
|
# Either a single license or a list of licenses for content inside of a collection. Ansible Galaxy currently only
|
||||||
|
# accepts L(SPDX,https://spdx.org/licenses/) licenses. This key is mutually exclusive with 'license_file'
|
||||||
|
license:
|
||||||
|
- Apache-2.0
|
||||||
|
|
||||||
|
|
||||||
|
# A list of tags you want to associate with the collection for indexing/searching. A tag name has the same character
|
||||||
|
# requirements as 'namespace' and 'name'
|
||||||
|
tags:
|
||||||
|
- etcd
|
||||||
|
- high-availability
|
||||||
|
- k8s
|
||||||
|
- k3s
|
||||||
|
- k3s-cluster
|
||||||
|
- kube-vip
|
||||||
|
- kubernetes
|
||||||
|
- metallb
|
||||||
|
- rancher
|
||||||
|
|
||||||
|
# Collections that this collection requires to be installed for it to be usable. The key of the dict is the
|
||||||
|
# collection label 'namespace.name'. The value is a version range
|
||||||
|
# L(specifiers,https://python-semanticversion.readthedocs.io/en/latest/#requirement-specification). Multiple version
|
||||||
|
# range specifiers can be set and are separated by ','
|
||||||
|
dependencies:
|
||||||
|
ansible.utils: '*'
|
||||||
|
ansible.posix: '*'
|
||||||
|
community.general: '*'
|
||||||
|
kubernetes.core: '*'
|
||||||
|
|
||||||
|
# The URL of the originating SCM repository
|
||||||
|
repository: https://github.com/timothystewart6/k3s-ansible
|
||||||
|
|
||||||
|
# The URL to any online docs
|
||||||
|
documentation: https://github.com/timothystewart6/k3s-ansible
|
||||||
|
|
||||||
|
# The URL to the homepage of the collection/project
|
||||||
|
homepage: https://www.youtube.com/watch?v=CbkEWcUZ7zM
|
||||||
|
|
||||||
|
# The URL to the collection issue tracker
|
||||||
|
issues: https://github.com/timothystewart6/k3s-ansible/issues
|
||||||
|
|
||||||
|
# A list of file glob-like patterns used to filter any files or directories that should not be included in the build
|
||||||
|
# artifact. A pattern is matched from the relative path of the file or directory of the collection directory. This
|
||||||
|
# uses 'fnmatch' to match the files or directories. Some directories and files like 'galaxy.yml', '*.pyc', '*.retry',
|
||||||
|
# and '.git' are always filtered. Mutually exclusive with 'manifest'
|
||||||
|
build_ignore: []
|
||||||
|
|
||||||
|
# A dict controlling use of manifest directives used in building the collection artifact. The key 'directives' is a
|
||||||
|
# list of MANIFEST.in style
|
||||||
|
# L(directives,https://packaging.python.org/en/latest/guides/using-manifest-in/#manifest-in-commands). The key
|
||||||
|
# 'omit_default_directives' is a boolean that controls whether the default directives are used. Mutually exclusive
|
||||||
|
# with 'build_ignore'
|
||||||
|
# manifest: null
|
||||||
@@ -1,51 +1,183 @@
|
|||||||
---
|
---
|
||||||
k3s_version: v1.24.9+k3s1
|
k3s_version: v1.30.2+k3s2
|
||||||
# this is the user that has ssh access to these machines
|
# this is the user that has ssh access to these machines
|
||||||
ansible_user: ansibleuser
|
ansible_user: ansibleuser
|
||||||
systemd_dir: /etc/systemd/system
|
systemd_dir: /etc/systemd/system
|
||||||
|
|
||||||
# Set your timezone
|
# Set your timezone
|
||||||
system_timezone: "Your/Timezone"
|
system_timezone: Your/Timezone
|
||||||
|
|
||||||
# interface which will be used for flannel
|
# interface which will be used for flannel
|
||||||
flannel_iface: "eth0"
|
flannel_iface: eth0
|
||||||
|
|
||||||
|
# uncomment calico_iface to use tigera operator/calico cni instead of flannel https://docs.tigera.io/calico/latest/about
|
||||||
|
# calico_iface: "eth0"
|
||||||
|
calico_ebpf: false # use eBPF dataplane instead of iptables
|
||||||
|
calico_tag: v3.28.0 # calico version tag
|
||||||
|
|
||||||
|
# uncomment cilium_iface to use cilium cni instead of flannel or calico
|
||||||
|
# ensure v4.19.57, v5.1.16, v5.2.0 or more recent kernel
|
||||||
|
# cilium_iface: "eth0"
|
||||||
|
cilium_mode: native # native when nodes on same subnet or using bgp, else set routed
|
||||||
|
cilium_tag: v1.16.0 # cilium version tag
|
||||||
|
cilium_hubble: true # enable hubble observability relay and ui
|
||||||
|
|
||||||
|
# if using calico or cilium, you may specify the cluster pod cidr pool
|
||||||
|
cluster_cidr: 10.52.0.0/16
|
||||||
|
|
||||||
|
# enable cilium bgp control plane for lb services and pod cidrs. disables metallb.
|
||||||
|
cilium_bgp: false
|
||||||
|
|
||||||
|
# bgp parameters for cilium cni. only active when cilium_iface is defined and cilium_bgp is true.
|
||||||
|
cilium_bgp_my_asn: "64513"
|
||||||
|
cilium_bgp_peer_asn: "64512"
|
||||||
|
cilium_bgp_peer_address: 192.168.30.1
|
||||||
|
cilium_bgp_lb_cidr: 192.168.31.0/24 # cidr for cilium loadbalancer ipam
|
||||||
|
|
||||||
|
# enable kube-vip ARP broadcasts
|
||||||
|
kube_vip_arp: true
|
||||||
|
|
||||||
|
# enable kube-vip BGP peering
|
||||||
|
kube_vip_bgp: false
|
||||||
|
|
||||||
|
# bgp parameters for kube-vip
|
||||||
|
kube_vip_bgp_routerid: "127.0.0.1" # Defines the router ID for the BGP server
|
||||||
|
kube_vip_bgp_as: "64513" # Defines the AS for the BGP server
|
||||||
|
kube_vip_bgp_peeraddress: "192.168.30.1" # Defines the address for the BGP peer
|
||||||
|
kube_vip_bgp_peeras: "64512" # Defines the AS for the BGP peer
|
||||||
|
|
||||||
# apiserver_endpoint is virtual ip-address which will be configured on each master
|
# apiserver_endpoint is virtual ip-address which will be configured on each master
|
||||||
apiserver_endpoint: "192.168.30.222"
|
apiserver_endpoint: 192.168.30.222
|
||||||
|
|
||||||
# k3s_token is required masters can talk together securely
|
# k3s_token is required masters can talk together securely
|
||||||
# this token should be alpha numeric only
|
# this token should be alpha numeric only
|
||||||
k3s_token: "some-SUPER-DEDEUPER-secret-password"
|
k3s_token: some-SUPER-DEDEUPER-secret-password
|
||||||
|
|
||||||
# The IP on which the node is reachable in the cluster.
|
# The IP on which the node is reachable in the cluster.
|
||||||
# Here, a sensible default is provided, you can still override
|
# Here, a sensible default is provided, you can still override
|
||||||
# it for each of your hosts, though.
|
# it for each of your hosts, though.
|
||||||
k3s_node_ip: '{{ ansible_facts[flannel_iface]["ipv4"]["address"] }}'
|
k3s_node_ip: "{{ ansible_facts[(cilium_iface | default(calico_iface | default(flannel_iface)))]['ipv4']['address'] }}"
|
||||||
|
|
||||||
# Disable the taint manually by setting: k3s_master_taint = false
|
# Disable the taint manually by setting: k3s_master_taint = false
|
||||||
k3s_master_taint: "{{ true if groups['node'] | default([]) | length >= 1 else false }}"
|
k3s_master_taint: "{{ true if groups['node'] | default([]) | length >= 1 else false }}"
|
||||||
|
|
||||||
# these arguments are recommended for servers as well as agents:
|
# these arguments are recommended for servers as well as agents:
|
||||||
extra_args: >-
|
extra_args: >-
|
||||||
--flannel-iface={{ flannel_iface }}
|
{{ '--flannel-iface=' + flannel_iface if calico_iface is not defined and cilium_iface is not defined else '' }}
|
||||||
--node-ip={{ k3s_node_ip }}
|
--node-ip={{ k3s_node_ip }}
|
||||||
|
|
||||||
# change these to your liking, the only required are: --disable servicelb, --tls-san {{ apiserver_endpoint }}
|
# change these to your liking, the only required are: --disable servicelb, --tls-san {{ apiserver_endpoint }}
|
||||||
|
# the contents of the if block is also required if using calico or cilium
|
||||||
extra_server_args: >-
|
extra_server_args: >-
|
||||||
{{ extra_args }}
|
{{ extra_args }}
|
||||||
{{ '--node-taint node-role.kubernetes.io/master=true:NoSchedule' if k3s_master_taint else '' }}
|
{{ '--node-taint node-role.kubernetes.io/master=true:NoSchedule' if k3s_master_taint else '' }}
|
||||||
|
{% if calico_iface is defined or cilium_iface is defined %}
|
||||||
|
--flannel-backend=none
|
||||||
|
--disable-network-policy
|
||||||
|
--cluster-cidr={{ cluster_cidr | default('10.52.0.0/16') }}
|
||||||
|
{% endif %}
|
||||||
--tls-san {{ apiserver_endpoint }}
|
--tls-san {{ apiserver_endpoint }}
|
||||||
--disable servicelb
|
--disable servicelb
|
||||||
--disable traefik
|
--disable traefik
|
||||||
|
|
||||||
extra_agent_args: >-
|
extra_agent_args: >-
|
||||||
{{ extra_args }}
|
{{ extra_args }}
|
||||||
|
|
||||||
# image tag for kube-vip
|
# image tag for kube-vip
|
||||||
kube_vip_tag_version: "v0.5.7"
|
kube_vip_tag_version: v0.8.2
|
||||||
|
|
||||||
|
# tag for kube-vip-cloud-provider manifest
|
||||||
|
# kube_vip_cloud_provider_tag_version: "main"
|
||||||
|
|
||||||
|
# kube-vip ip range for load balancer
|
||||||
|
# (uncomment to use kube-vip for services instead of MetalLB)
|
||||||
|
# kube_vip_lb_ip_range: "192.168.30.80-192.168.30.90"
|
||||||
|
|
||||||
|
# metallb type frr or native
|
||||||
|
metal_lb_type: native
|
||||||
|
|
||||||
|
# metallb mode layer2 or bgp
|
||||||
|
metal_lb_mode: layer2
|
||||||
|
|
||||||
|
# bgp options
|
||||||
|
# metal_lb_bgp_my_asn: "64513"
|
||||||
|
# metal_lb_bgp_peer_asn: "64512"
|
||||||
|
# metal_lb_bgp_peer_address: "192.168.30.1"
|
||||||
|
|
||||||
# image tag for metal lb
|
# image tag for metal lb
|
||||||
metal_lb_speaker_tag_version: "v0.13.7"
|
metal_lb_speaker_tag_version: v0.14.8
|
||||||
metal_lb_controller_tag_version: "v0.13.7"
|
metal_lb_controller_tag_version: v0.14.8
|
||||||
|
|
||||||
# metallb ip range for load balancer
|
# metallb ip range for load balancer
|
||||||
metal_lb_ip_range: "192.168.30.80-192.168.30.90"
|
metal_lb_ip_range: 192.168.30.80-192.168.30.90
|
||||||
|
|
||||||
|
# Only enable if your nodes are proxmox LXC nodes, make sure to configure your proxmox nodes
|
||||||
|
# in your hosts.ini file.
|
||||||
|
# Please read https://gist.github.com/triangletodd/02f595cd4c0dc9aac5f7763ca2264185 before using this.
|
||||||
|
# Most notably, your containers must be privileged, and must not have nesting set to true.
|
||||||
|
# Please note this script disables most of the security of lxc containers, with the trade off being that lxc
|
||||||
|
# containers are significantly more resource efficient compared to full VMs.
|
||||||
|
# Mixing and matching VMs and lxc containers is not supported, ymmv if you want to do this.
|
||||||
|
# I would only really recommend using this if you have particularly low powered proxmox nodes where the overhead of
|
||||||
|
# VMs would use a significant portion of your available resources.
|
||||||
|
proxmox_lxc_configure: false
|
||||||
|
# the user that you would use to ssh into the host, for example if you run ssh some-user@my-proxmox-host,
|
||||||
|
# set this value to some-user
|
||||||
|
proxmox_lxc_ssh_user: root
|
||||||
|
# the unique proxmox ids for all of the containers in the cluster, both worker and master nodes
|
||||||
|
proxmox_lxc_ct_ids:
|
||||||
|
- 200
|
||||||
|
- 201
|
||||||
|
- 202
|
||||||
|
- 203
|
||||||
|
- 204
|
||||||
|
|
||||||
|
# Only enable this if you have set up your own container registry to act as a mirror / pull-through cache
|
||||||
|
# (harbor / nexus / docker's official registry / etc).
|
||||||
|
# Can be beneficial for larger dev/test environments (for example if you're getting rate limited by docker hub),
|
||||||
|
# or air-gapped environments where your nodes don't have internet access after the initial setup
|
||||||
|
# (which is still needed for downloading the k3s binary and such).
|
||||||
|
# k3s's documentation about private registries here: https://docs.k3s.io/installation/private-registry
|
||||||
|
custom_registries: false
|
||||||
|
# The registries can be authenticated or anonymous, depending on your registry server configuration.
|
||||||
|
# If they allow anonymous access, simply remove the following bit from custom_registries_yaml
|
||||||
|
# configs:
|
||||||
|
# "registry.domain.com":
|
||||||
|
# auth:
|
||||||
|
# username: yourusername
|
||||||
|
# password: yourpassword
|
||||||
|
# The following is an example that pulls all images used in this playbook through your private registries.
|
||||||
|
# It also allows you to pull your own images from your private registry, without having to use imagePullSecrets
|
||||||
|
# in your deployments.
|
||||||
|
# If all you need is your own images and you don't care about caching the docker/quay/ghcr.io images,
|
||||||
|
# you can just remove those from the mirrors: section.
|
||||||
|
custom_registries_yaml: |
|
||||||
|
mirrors:
|
||||||
|
docker.io:
|
||||||
|
endpoint:
|
||||||
|
- "https://registry.domain.com/v2/dockerhub"
|
||||||
|
quay.io:
|
||||||
|
endpoint:
|
||||||
|
- "https://registry.domain.com/v2/quayio"
|
||||||
|
ghcr.io:
|
||||||
|
endpoint:
|
||||||
|
- "https://registry.domain.com/v2/ghcrio"
|
||||||
|
registry.domain.com:
|
||||||
|
endpoint:
|
||||||
|
- "https://registry.domain.com"
|
||||||
|
|
||||||
|
configs:
|
||||||
|
"registry.domain.com":
|
||||||
|
auth:
|
||||||
|
username: yourusername
|
||||||
|
password: yourpassword
|
||||||
|
|
||||||
|
# On some distros like Diet Pi, there is no dbus installed. dbus required by the default reboot command.
|
||||||
|
# Uncomment if you need a custom reboot command
|
||||||
|
# custom_reboot_command: /usr/sbin/shutdown -r now
|
||||||
|
|
||||||
|
# Only enable and configure these if you access the internet through a proxy
|
||||||
|
# proxy_env:
|
||||||
|
# HTTP_PROXY: "http://proxy.domain.local:3128"
|
||||||
|
# HTTPS_PROXY: "http://proxy.domain.local:3128"
|
||||||
|
# NO_PROXY: "*.domain.local,127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16"
|
||||||
|
|||||||
2
inventory/sample/group_vars/proxmox.yml
Normal file
2
inventory/sample/group_vars/proxmox.yml
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
---
|
||||||
|
ansible_user: "{{ proxmox_lxc_ssh_user }}"
|
||||||
@@ -7,6 +7,11 @@
|
|||||||
192.168.30.41
|
192.168.30.41
|
||||||
192.168.30.42
|
192.168.30.42
|
||||||
|
|
||||||
|
# only required if proxmox_lxc_configure: true
|
||||||
|
# must contain all proxmox instances that have a master or worker node
|
||||||
|
# [proxmox]
|
||||||
|
# 192.168.30.43
|
||||||
|
|
||||||
[k3s_cluster:children]
|
[k3s_cluster:children]
|
||||||
master
|
master
|
||||||
node
|
node
|
||||||
|
|||||||
@@ -13,6 +13,12 @@ We have these scenarios:
|
|||||||
To save a bit of test time, this cluster is _not_ highly available, it consists of only one control and one worker node.
|
To save a bit of test time, this cluster is _not_ highly available, it consists of only one control and one worker node.
|
||||||
- **single_node**:
|
- **single_node**:
|
||||||
Very similar to the default scenario, but uses only a single node for all cluster functionality.
|
Very similar to the default scenario, but uses only a single node for all cluster functionality.
|
||||||
|
- **calico**:
|
||||||
|
The same as single node, but uses calico cni instead of flannel.
|
||||||
|
- **cilium**:
|
||||||
|
The same as single node, but uses cilium cni instead of flannel.
|
||||||
|
- **kube-vip**
|
||||||
|
The same as single node, but uses kube-vip as service loadbalancer instead of MetalLB
|
||||||
|
|
||||||
## How to execute
|
## How to execute
|
||||||
|
|
||||||
|
|||||||
49
molecule/calico/molecule.yml
Normal file
49
molecule/calico/molecule.yml
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
---
|
||||||
|
dependency:
|
||||||
|
name: galaxy
|
||||||
|
driver:
|
||||||
|
name: vagrant
|
||||||
|
platforms:
|
||||||
|
- name: control1
|
||||||
|
box: generic/ubuntu2204
|
||||||
|
memory: 4096
|
||||||
|
cpus: 4
|
||||||
|
config_options:
|
||||||
|
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||||
|
# see: https://github.com/chef/bento/issues/1405
|
||||||
|
ssh.username: vagrant
|
||||||
|
ssh.password: vagrant
|
||||||
|
groups:
|
||||||
|
- k3s_cluster
|
||||||
|
- master
|
||||||
|
interfaces:
|
||||||
|
- network_name: private_network
|
||||||
|
ip: 192.168.30.62
|
||||||
|
provisioner:
|
||||||
|
name: ansible
|
||||||
|
env:
|
||||||
|
ANSIBLE_VERBOSITY: 1
|
||||||
|
playbooks:
|
||||||
|
converge: ../resources/converge.yml
|
||||||
|
side_effect: ../resources/reset.yml
|
||||||
|
verify: ../resources/verify.yml
|
||||||
|
inventory:
|
||||||
|
links:
|
||||||
|
group_vars: ../../inventory/sample/group_vars
|
||||||
|
scenario:
|
||||||
|
test_sequence:
|
||||||
|
- dependency
|
||||||
|
- cleanup
|
||||||
|
- destroy
|
||||||
|
- syntax
|
||||||
|
- create
|
||||||
|
- prepare
|
||||||
|
- converge
|
||||||
|
# idempotence is not possible with the playbook in its current form.
|
||||||
|
- verify
|
||||||
|
# We are repurposing side_effect here to test the reset playbook.
|
||||||
|
# This is why we do not run it before verify (which tests the cluster),
|
||||||
|
# but after the verify step.
|
||||||
|
- side_effect
|
||||||
|
- cleanup
|
||||||
|
- destroy
|
||||||
16
molecule/calico/overrides.yml
Normal file
16
molecule/calico/overrides.yml
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
---
|
||||||
|
- name: Apply overrides
|
||||||
|
hosts: all
|
||||||
|
tasks:
|
||||||
|
- name: Override host variables
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
# See:
|
||||||
|
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
|
||||||
|
calico_iface: eth1
|
||||||
|
|
||||||
|
# The test VMs might be a bit slow, so we give them more time to join the cluster:
|
||||||
|
retry_count: 45
|
||||||
|
|
||||||
|
# Make sure that our IP ranges do not collide with those of the other scenarios
|
||||||
|
apiserver_endpoint: 192.168.30.224
|
||||||
|
metal_lb_ip_range: 192.168.30.100-192.168.30.109
|
||||||
49
molecule/cilium/molecule.yml
Normal file
49
molecule/cilium/molecule.yml
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
---
|
||||||
|
dependency:
|
||||||
|
name: galaxy
|
||||||
|
driver:
|
||||||
|
name: vagrant
|
||||||
|
platforms:
|
||||||
|
- name: control1
|
||||||
|
box: generic/ubuntu2204
|
||||||
|
memory: 4096
|
||||||
|
cpus: 4
|
||||||
|
config_options:
|
||||||
|
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||||
|
# see: https://github.com/chef/bento/issues/1405
|
||||||
|
ssh.username: vagrant
|
||||||
|
ssh.password: vagrant
|
||||||
|
groups:
|
||||||
|
- k3s_cluster
|
||||||
|
- master
|
||||||
|
interfaces:
|
||||||
|
- network_name: private_network
|
||||||
|
ip: 192.168.30.63
|
||||||
|
provisioner:
|
||||||
|
name: ansible
|
||||||
|
env:
|
||||||
|
ANSIBLE_VERBOSITY: 1
|
||||||
|
playbooks:
|
||||||
|
converge: ../resources/converge.yml
|
||||||
|
side_effect: ../resources/reset.yml
|
||||||
|
verify: ../resources/verify.yml
|
||||||
|
inventory:
|
||||||
|
links:
|
||||||
|
group_vars: ../../inventory/sample/group_vars
|
||||||
|
scenario:
|
||||||
|
test_sequence:
|
||||||
|
- dependency
|
||||||
|
- cleanup
|
||||||
|
- destroy
|
||||||
|
- syntax
|
||||||
|
- create
|
||||||
|
- prepare
|
||||||
|
- converge
|
||||||
|
# idempotence is not possible with the playbook in its current form.
|
||||||
|
- verify
|
||||||
|
# We are repurposing side_effect here to test the reset playbook.
|
||||||
|
# This is why we do not run it before verify (which tests the cluster),
|
||||||
|
# but after the verify step.
|
||||||
|
- side_effect
|
||||||
|
- cleanup
|
||||||
|
- destroy
|
||||||
16
molecule/cilium/overrides.yml
Normal file
16
molecule/cilium/overrides.yml
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
---
|
||||||
|
- name: Apply overrides
|
||||||
|
hosts: all
|
||||||
|
tasks:
|
||||||
|
- name: Override host variables
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
# See:
|
||||||
|
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
|
||||||
|
cilium_iface: eth1
|
||||||
|
|
||||||
|
# The test VMs might be a bit slow, so we give them more time to join the cluster:
|
||||||
|
retry_count: 45
|
||||||
|
|
||||||
|
# Make sure that our IP ranges do not collide with those of the other scenarios
|
||||||
|
apiserver_endpoint: 192.168.30.225
|
||||||
|
metal_lb_ip_range: 192.168.30.110-192.168.30.119
|
||||||
@@ -4,10 +4,9 @@ dependency:
|
|||||||
driver:
|
driver:
|
||||||
name: vagrant
|
name: vagrant
|
||||||
platforms:
|
platforms:
|
||||||
|
|
||||||
- name: control1
|
- name: control1
|
||||||
box: generic/ubuntu2204
|
box: generic/ubuntu2204
|
||||||
memory: 2048
|
memory: 1024
|
||||||
cpus: 2
|
cpus: 2
|
||||||
groups:
|
groups:
|
||||||
- k3s_cluster
|
- k3s_cluster
|
||||||
@@ -18,12 +17,12 @@ platforms:
|
|||||||
config_options:
|
config_options:
|
||||||
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||||
# see: https://github.com/chef/bento/issues/1405
|
# see: https://github.com/chef/bento/issues/1405
|
||||||
ssh.username: "vagrant"
|
ssh.username: vagrant
|
||||||
ssh.password: "vagrant"
|
ssh.password: vagrant
|
||||||
|
|
||||||
- name: control2
|
- name: control2
|
||||||
box: generic/debian11
|
box: generic/debian12
|
||||||
memory: 2048
|
memory: 1024
|
||||||
cpus: 2
|
cpus: 2
|
||||||
groups:
|
groups:
|
||||||
- k3s_cluster
|
- k3s_cluster
|
||||||
@@ -34,7 +33,7 @@ platforms:
|
|||||||
|
|
||||||
- name: control3
|
- name: control3
|
||||||
box: generic/rocky9
|
box: generic/rocky9
|
||||||
memory: 2048
|
memory: 1024
|
||||||
cpus: 2
|
cpus: 2
|
||||||
groups:
|
groups:
|
||||||
- k3s_cluster
|
- k3s_cluster
|
||||||
@@ -45,7 +44,7 @@ platforms:
|
|||||||
|
|
||||||
- name: node1
|
- name: node1
|
||||||
box: generic/ubuntu2204
|
box: generic/ubuntu2204
|
||||||
memory: 2048
|
memory: 1024
|
||||||
cpus: 2
|
cpus: 2
|
||||||
groups:
|
groups:
|
||||||
- k3s_cluster
|
- k3s_cluster
|
||||||
@@ -56,12 +55,12 @@ platforms:
|
|||||||
config_options:
|
config_options:
|
||||||
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||||
# see: https://github.com/chef/bento/issues/1405
|
# see: https://github.com/chef/bento/issues/1405
|
||||||
ssh.username: "vagrant"
|
ssh.username: vagrant
|
||||||
ssh.password: "vagrant"
|
ssh.password: vagrant
|
||||||
|
|
||||||
- name: node2
|
- name: node2
|
||||||
box: generic/rocky9
|
box: generic/rocky9
|
||||||
memory: 2048
|
memory: 1024
|
||||||
cpus: 2
|
cpus: 2
|
||||||
groups:
|
groups:
|
||||||
- k3s_cluster
|
- k3s_cluster
|
||||||
@@ -72,6 +71,8 @@ platforms:
|
|||||||
|
|
||||||
provisioner:
|
provisioner:
|
||||||
name: ansible
|
name: ansible
|
||||||
|
env:
|
||||||
|
ANSIBLE_VERBOSITY: 1
|
||||||
playbooks:
|
playbooks:
|
||||||
converge: ../resources/converge.yml
|
converge: ../resources/converge.yml
|
||||||
side_effect: ../resources/reset.yml
|
side_effect: ../resources/reset.yml
|
||||||
@@ -82,7 +83,6 @@ provisioner:
|
|||||||
scenario:
|
scenario:
|
||||||
test_sequence:
|
test_sequence:
|
||||||
- dependency
|
- dependency
|
||||||
- lint
|
|
||||||
- cleanup
|
- cleanup
|
||||||
- destroy
|
- destroy
|
||||||
- syntax
|
- syntax
|
||||||
|
|||||||
@@ -4,7 +4,8 @@
|
|||||||
tasks:
|
tasks:
|
||||||
- name: Override host variables
|
- name: Override host variables
|
||||||
ansible.builtin.set_fact:
|
ansible.builtin.set_fact:
|
||||||
# See: https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant # noqa yaml[line-length]
|
# See:
|
||||||
|
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
|
||||||
flannel_iface: eth1
|
flannel_iface: eth1
|
||||||
|
|
||||||
# The test VMs might be a bit slow, so we give them more time to join the cluster:
|
# The test VMs might be a bit slow, so we give them more time to join the cluster:
|
||||||
|
|||||||
@@ -17,6 +17,6 @@
|
|||||||
# and security needs.
|
# and security needs.
|
||||||
ansible.builtin.systemd:
|
ansible.builtin.systemd:
|
||||||
name: firewalld
|
name: firewalld
|
||||||
enabled: no
|
enabled: false
|
||||||
state: stopped
|
state: stopped
|
||||||
become: true
|
become: true
|
||||||
|
|||||||
3
molecule/ipv6/host_vars/control2.yml
Normal file
3
molecule/ipv6/host_vars/control2.yml
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
---
|
||||||
|
node_ipv4: 192.168.123.12
|
||||||
|
node_ipv6: fdad:bad:ba55::de:12
|
||||||
@@ -4,10 +4,9 @@ dependency:
|
|||||||
driver:
|
driver:
|
||||||
name: vagrant
|
name: vagrant
|
||||||
platforms:
|
platforms:
|
||||||
|
|
||||||
- name: control1
|
- name: control1
|
||||||
box: generic/ubuntu2204
|
box: generic/ubuntu2204
|
||||||
memory: 2048
|
memory: 1024
|
||||||
cpus: 2
|
cpus: 2
|
||||||
groups:
|
groups:
|
||||||
- k3s_cluster
|
- k3s_cluster
|
||||||
@@ -18,12 +17,28 @@ platforms:
|
|||||||
config_options:
|
config_options:
|
||||||
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||||
# see: https://github.com/chef/bento/issues/1405
|
# see: https://github.com/chef/bento/issues/1405
|
||||||
ssh.username: "vagrant"
|
ssh.username: vagrant
|
||||||
ssh.password: "vagrant"
|
ssh.password: vagrant
|
||||||
|
|
||||||
|
- name: control2
|
||||||
|
box: generic/ubuntu2204
|
||||||
|
memory: 1024
|
||||||
|
cpus: 2
|
||||||
|
groups:
|
||||||
|
- k3s_cluster
|
||||||
|
- master
|
||||||
|
interfaces:
|
||||||
|
- network_name: private_network
|
||||||
|
ip: fdad:bad:ba55::de:12
|
||||||
|
config_options:
|
||||||
|
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||||
|
# see: https://github.com/chef/bento/issues/1405
|
||||||
|
ssh.username: vagrant
|
||||||
|
ssh.password: vagrant
|
||||||
|
|
||||||
- name: node1
|
- name: node1
|
||||||
box: generic/ubuntu2204
|
box: generic/ubuntu2204
|
||||||
memory: 2048
|
memory: 1024
|
||||||
cpus: 2
|
cpus: 2
|
||||||
groups:
|
groups:
|
||||||
- k3s_cluster
|
- k3s_cluster
|
||||||
@@ -34,10 +49,12 @@ platforms:
|
|||||||
config_options:
|
config_options:
|
||||||
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||||
# see: https://github.com/chef/bento/issues/1405
|
# see: https://github.com/chef/bento/issues/1405
|
||||||
ssh.username: "vagrant"
|
ssh.username: vagrant
|
||||||
ssh.password: "vagrant"
|
ssh.password: vagrant
|
||||||
provisioner:
|
provisioner:
|
||||||
name: ansible
|
name: ansible
|
||||||
|
env:
|
||||||
|
ANSIBLE_VERBOSITY: 1
|
||||||
playbooks:
|
playbooks:
|
||||||
converge: ../resources/converge.yml
|
converge: ../resources/converge.yml
|
||||||
side_effect: ../resources/reset.yml
|
side_effect: ../resources/reset.yml
|
||||||
@@ -48,7 +65,6 @@ provisioner:
|
|||||||
scenario:
|
scenario:
|
||||||
test_sequence:
|
test_sequence:
|
||||||
- dependency
|
- dependency
|
||||||
- lint
|
|
||||||
- cleanup
|
- cleanup
|
||||||
- destroy
|
- destroy
|
||||||
- syntax
|
- syntax
|
||||||
|
|||||||
@@ -4,9 +4,15 @@
|
|||||||
tasks:
|
tasks:
|
||||||
- name: Override host variables (1/2)
|
- name: Override host variables (1/2)
|
||||||
ansible.builtin.set_fact:
|
ansible.builtin.set_fact:
|
||||||
# See: https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant # noqa yaml[line-length]
|
# See:
|
||||||
|
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
|
||||||
flannel_iface: eth1
|
flannel_iface: eth1
|
||||||
|
|
||||||
|
# In this scenario, we have multiple interfaces that the VIP could be
|
||||||
|
# broadcasted on. Since we have assigned a dedicated private network
|
||||||
|
# here, let's make sure that it is used.
|
||||||
|
kube_vip_iface: eth1
|
||||||
|
|
||||||
# The test VMs might be a bit slow, so we give them more time to join the cluster:
|
# The test VMs might be a bit slow, so we give them more time to join the cluster:
|
||||||
retry_count: 45
|
retry_count: 45
|
||||||
|
|
||||||
|
|||||||
@@ -38,7 +38,7 @@
|
|||||||
dest: /etc/netplan/55-flannel-ipv4.yaml
|
dest: /etc/netplan/55-flannel-ipv4.yaml
|
||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
mode: 0644
|
mode: "0644"
|
||||||
register: netplan_template
|
register: netplan_template
|
||||||
|
|
||||||
- name: Apply netplan configuration
|
- name: Apply netplan configuration
|
||||||
|
|||||||
49
molecule/kube-vip/molecule.yml
Normal file
49
molecule/kube-vip/molecule.yml
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
---
|
||||||
|
dependency:
|
||||||
|
name: galaxy
|
||||||
|
driver:
|
||||||
|
name: vagrant
|
||||||
|
platforms:
|
||||||
|
- name: control1
|
||||||
|
box: generic/ubuntu2204
|
||||||
|
memory: 4096
|
||||||
|
cpus: 4
|
||||||
|
config_options:
|
||||||
|
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||||
|
# see: https://github.com/chef/bento/issues/1405
|
||||||
|
ssh.username: vagrant
|
||||||
|
ssh.password: vagrant
|
||||||
|
groups:
|
||||||
|
- k3s_cluster
|
||||||
|
- master
|
||||||
|
interfaces:
|
||||||
|
- network_name: private_network
|
||||||
|
ip: 192.168.30.62
|
||||||
|
provisioner:
|
||||||
|
name: ansible
|
||||||
|
env:
|
||||||
|
ANSIBLE_VERBOSITY: 1
|
||||||
|
playbooks:
|
||||||
|
converge: ../resources/converge.yml
|
||||||
|
side_effect: ../resources/reset.yml
|
||||||
|
verify: ../resources/verify.yml
|
||||||
|
inventory:
|
||||||
|
links:
|
||||||
|
group_vars: ../../inventory/sample/group_vars
|
||||||
|
scenario:
|
||||||
|
test_sequence:
|
||||||
|
- dependency
|
||||||
|
- cleanup
|
||||||
|
- destroy
|
||||||
|
- syntax
|
||||||
|
- create
|
||||||
|
- prepare
|
||||||
|
- converge
|
||||||
|
# idempotence is not possible with the playbook in its current form.
|
||||||
|
- verify
|
||||||
|
# We are repurposing side_effect here to test the reset playbook.
|
||||||
|
# This is why we do not run it before verify (which tests the cluster),
|
||||||
|
# but after the verify step.
|
||||||
|
- side_effect
|
||||||
|
- cleanup
|
||||||
|
- destroy
|
||||||
17
molecule/kube-vip/overrides.yml
Normal file
17
molecule/kube-vip/overrides.yml
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
---
|
||||||
|
- name: Apply overrides
|
||||||
|
hosts: all
|
||||||
|
tasks:
|
||||||
|
- name: Override host variables
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
# See:
|
||||||
|
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
|
||||||
|
flannel_iface: eth1
|
||||||
|
|
||||||
|
# The test VMs might be a bit slow, so we give them more time to join the cluster:
|
||||||
|
retry_count: 45
|
||||||
|
|
||||||
|
# Make sure that our IP ranges do not collide with those of the other scenarios
|
||||||
|
apiserver_endpoint: 192.168.30.225
|
||||||
|
# Use kube-vip instead of MetalLB
|
||||||
|
kube_vip_lb_ip_range: 192.168.30.110-192.168.30.119
|
||||||
@@ -2,4 +2,4 @@
|
|||||||
- name: Verify
|
- name: Verify
|
||||||
hosts: all
|
hosts: all
|
||||||
roles:
|
roles:
|
||||||
- verify/from_outside
|
- verify_from_outside
|
||||||
|
|||||||
@@ -6,4 +6,4 @@ outside_host: localhost
|
|||||||
testing_namespace: molecule-verify-from-outside
|
testing_namespace: molecule-verify-from-outside
|
||||||
|
|
||||||
# The directory in which the example manifests reside
|
# The directory in which the example manifests reside
|
||||||
example_manifests_path: ../../../../example
|
example_manifests_path: ../../../example
|
||||||
@@ -27,21 +27,21 @@
|
|||||||
name: nginx
|
name: nginx
|
||||||
namespace: "{{ testing_namespace }}"
|
namespace: "{{ testing_namespace }}"
|
||||||
kubeconfig: "{{ kubecfg_path }}"
|
kubeconfig: "{{ kubecfg_path }}"
|
||||||
vars: &load_balancer_metadata
|
vars:
|
||||||
metallb_ip: status.loadBalancer.ingress[0].ip
|
metallb_ip: status.loadBalancer.ingress[0].ip
|
||||||
metallb_port: spec.ports[0].port
|
metallb_port: spec.ports[0].port
|
||||||
register: nginx_services
|
register: nginx_services
|
||||||
|
|
||||||
- name: Assert that the nginx welcome page is available
|
- name: Assert that the nginx welcome page is available
|
||||||
ansible.builtin.uri:
|
ansible.builtin.uri:
|
||||||
url: http://{{ ip | ansible.utils.ipwrap }}:{{ port }}/
|
url: http://{{ ip | ansible.utils.ipwrap }}:{{ port_ }}/
|
||||||
return_content: yes
|
return_content: true
|
||||||
register: result
|
register: result
|
||||||
failed_when: "'Welcome to nginx!' not in result.content"
|
failed_when: "'Welcome to nginx!' not in result.content"
|
||||||
vars:
|
vars:
|
||||||
ip: >-
|
ip: >-
|
||||||
{{ nginx_services.resources[0].status.loadBalancer.ingress[0].ip }}
|
{{ nginx_services.resources[0].status.loadBalancer.ingress[0].ip }}
|
||||||
port: >-
|
port_: >-
|
||||||
{{ nginx_services.resources[0].spec.ports[0].port }}
|
{{ nginx_services.resources[0].spec.ports[0].port }}
|
||||||
# Deactivated linter rules:
|
# Deactivated linter rules:
|
||||||
# - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap
|
# - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap
|
||||||
@@ -9,7 +9,7 @@
|
|||||||
ansible.builtin.assert:
|
ansible.builtin.assert:
|
||||||
that: found_nodes == expected_nodes
|
that: found_nodes == expected_nodes
|
||||||
success_msg: "Found nodes as expected: {{ found_nodes }}"
|
success_msg: "Found nodes as expected: {{ found_nodes }}"
|
||||||
fail_msg: "Expected nodes {{ expected_nodes }}, but found nodes {{ found_nodes }}"
|
fail_msg: Expected nodes {{ expected_nodes }}, but found nodes {{ found_nodes }}
|
||||||
vars:
|
vars:
|
||||||
found_nodes: >-
|
found_nodes: >-
|
||||||
{{ cluster_nodes | json_query('resources[*].metadata.name') | unique | sort }}
|
{{ cluster_nodes | json_query('resources[*].metadata.name') | unique | sort }}
|
||||||
@@ -11,8 +11,8 @@ platforms:
|
|||||||
config_options:
|
config_options:
|
||||||
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||||
# see: https://github.com/chef/bento/issues/1405
|
# see: https://github.com/chef/bento/issues/1405
|
||||||
ssh.username: "vagrant"
|
ssh.username: vagrant
|
||||||
ssh.password: "vagrant"
|
ssh.password: vagrant
|
||||||
groups:
|
groups:
|
||||||
- k3s_cluster
|
- k3s_cluster
|
||||||
- master
|
- master
|
||||||
@@ -21,6 +21,8 @@ platforms:
|
|||||||
ip: 192.168.30.50
|
ip: 192.168.30.50
|
||||||
provisioner:
|
provisioner:
|
||||||
name: ansible
|
name: ansible
|
||||||
|
env:
|
||||||
|
ANSIBLE_VERBOSITY: 1
|
||||||
playbooks:
|
playbooks:
|
||||||
converge: ../resources/converge.yml
|
converge: ../resources/converge.yml
|
||||||
side_effect: ../resources/reset.yml
|
side_effect: ../resources/reset.yml
|
||||||
@@ -31,7 +33,6 @@ provisioner:
|
|||||||
scenario:
|
scenario:
|
||||||
test_sequence:
|
test_sequence:
|
||||||
- dependency
|
- dependency
|
||||||
- lint
|
|
||||||
- cleanup
|
- cleanup
|
||||||
- destroy
|
- destroy
|
||||||
- syntax
|
- syntax
|
||||||
|
|||||||
@@ -4,12 +4,13 @@
|
|||||||
tasks:
|
tasks:
|
||||||
- name: Override host variables
|
- name: Override host variables
|
||||||
ansible.builtin.set_fact:
|
ansible.builtin.set_fact:
|
||||||
# See: https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant # noqa yaml[line-length]
|
# See:
|
||||||
|
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
|
||||||
flannel_iface: eth1
|
flannel_iface: eth1
|
||||||
|
|
||||||
# The test VMs might be a bit slow, so we give them more time to join the cluster:
|
# The test VMs might be a bit slow, so we give them more time to join the cluster:
|
||||||
retry_count: 45
|
retry_count: 45
|
||||||
|
|
||||||
# Make sure that our IP ranges do not collide with those of the default scenario
|
# Make sure that our IP ranges do not collide with those of the default scenario
|
||||||
apiserver_endpoint: "192.168.30.223"
|
apiserver_endpoint: 192.168.30.223
|
||||||
metal_lb_ip_range: "192.168.30.91-192.168.30.99"
|
metal_lb_ip_range: 192.168.30.91-192.168.30.99
|
||||||
|
|||||||
2
reboot.sh
Normal file → Executable file
2
reboot.sh
Normal file → Executable file
@@ -1,3 +1,3 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
ansible-playbook reboot.yml -i inventory/my-cluster/hosts.ini
|
ansible-playbook reboot.yml
|
||||||
|
|||||||
@@ -1,9 +1,10 @@
|
|||||||
---
|
---
|
||||||
- name: Reboot k3s_cluster
|
- name: Reboot k3s_cluster
|
||||||
hosts: k3s_cluster
|
hosts: k3s_cluster
|
||||||
gather_facts: yes
|
gather_facts: true
|
||||||
become: yes
|
|
||||||
tasks:
|
tasks:
|
||||||
- name: Reboot the nodes (and Wait upto 5 mins max)
|
- name: Reboot the nodes (and Wait upto 5 mins max)
|
||||||
reboot:
|
become: true
|
||||||
|
ansible.builtin.reboot:
|
||||||
|
reboot_command: "{{ custom_reboot_command | default(omit) }}"
|
||||||
reboot_timeout: 300
|
reboot_timeout: 300
|
||||||
|
|||||||
@@ -1,12 +1,10 @@
|
|||||||
ansible-core>=2.13.5
|
ansible-core>=2.16.2
|
||||||
ansible-lint>=6.8.6
|
|
||||||
jmespath>=1.0.1
|
jmespath>=1.0.1
|
||||||
jsonpatch>=1.32
|
jsonpatch>=1.33
|
||||||
kubernetes>=25.3.0
|
kubernetes>=29.0.0
|
||||||
molecule-vagrant>=1.0.0
|
molecule-plugins[vagrant]
|
||||||
molecule>=4.0.3
|
molecule>=6.0.3
|
||||||
netaddr>=0.8.0
|
netaddr>=0.10.1
|
||||||
pre-commit>=2.20.0
|
pre-commit>=3.6.0
|
||||||
pre-commit-hooks>=1.3.1
|
pre-commit-hooks>=4.5.0
|
||||||
pyyaml>=6.0
|
pyyaml>=6.0.1
|
||||||
yamllint>=1.28.0
|
|
||||||
|
|||||||
199
requirements.txt
199
requirements.txt
@@ -1,210 +1,169 @@
|
|||||||
#
|
#
|
||||||
# This file is autogenerated by pip-compile with python 3.8
|
# This file is autogenerated by pip-compile with Python 3.11
|
||||||
# To update, run:
|
# by the following command:
|
||||||
#
|
#
|
||||||
# pip-compile requirements.in
|
# pip-compile requirements.in
|
||||||
#
|
#
|
||||||
ansible-compat==2.2.4
|
ansible-compat==4.1.11
|
||||||
# via
|
# via molecule
|
||||||
# ansible-lint
|
ansible-core==2.18.0
|
||||||
# molecule
|
|
||||||
ansible-core==2.14.1
|
|
||||||
# via
|
# via
|
||||||
# -r requirements.in
|
# -r requirements.in
|
||||||
# ansible-lint
|
# ansible-compat
|
||||||
ansible-lint==6.8.6
|
# molecule
|
||||||
# via -r requirements.in
|
attrs==23.2.0
|
||||||
arrow==1.2.3
|
# via
|
||||||
# via jinja2-time
|
# jsonschema
|
||||||
attrs==22.1.0
|
# referencing
|
||||||
# via jsonschema
|
bracex==2.4
|
||||||
binaryornot==0.4.4
|
|
||||||
# via cookiecutter
|
|
||||||
black==22.10.0
|
|
||||||
# via ansible-lint
|
|
||||||
bracex==2.3.post1
|
|
||||||
# via wcmatch
|
# via wcmatch
|
||||||
cachetools==5.2.0
|
cachetools==5.3.2
|
||||||
# via google-auth
|
# via google-auth
|
||||||
certifi==2022.9.24
|
certifi==2023.11.17
|
||||||
# via
|
# via
|
||||||
# kubernetes
|
# kubernetes
|
||||||
# requests
|
# requests
|
||||||
cffi==1.15.1
|
cffi==1.16.0
|
||||||
# via cryptography
|
# via cryptography
|
||||||
cfgv==3.3.1
|
cfgv==3.4.0
|
||||||
# via pre-commit
|
# via pre-commit
|
||||||
chardet==5.0.0
|
charset-normalizer==3.3.2
|
||||||
# via binaryornot
|
|
||||||
charset-normalizer==2.1.1
|
|
||||||
# via requests
|
# via requests
|
||||||
click==8.1.3
|
click==8.1.7
|
||||||
# via
|
# via
|
||||||
# black
|
|
||||||
# click-help-colors
|
# click-help-colors
|
||||||
# cookiecutter
|
|
||||||
# molecule
|
# molecule
|
||||||
click-help-colors==0.9.1
|
click-help-colors==0.9.4
|
||||||
# via molecule
|
# via molecule
|
||||||
commonmark==0.9.1
|
cryptography==41.0.7
|
||||||
# via rich
|
|
||||||
cookiecutter==2.1.1
|
|
||||||
# via molecule
|
|
||||||
cryptography==38.0.3
|
|
||||||
# via ansible-core
|
# via ansible-core
|
||||||
distlib==0.3.6
|
distlib==0.3.8
|
||||||
# via virtualenv
|
# via virtualenv
|
||||||
distro==1.8.0
|
|
||||||
# via selinux
|
|
||||||
enrich==1.2.7
|
enrich==1.2.7
|
||||||
# via molecule
|
# via molecule
|
||||||
filelock==3.8.0
|
filelock==3.13.1
|
||||||
# via
|
# via virtualenv
|
||||||
# ansible-lint
|
google-auth==2.26.2
|
||||||
# virtualenv
|
|
||||||
google-auth==2.14.0
|
|
||||||
# via kubernetes
|
# via kubernetes
|
||||||
identify==2.5.8
|
identify==2.5.33
|
||||||
# via pre-commit
|
# via pre-commit
|
||||||
idna==3.4
|
idna==3.6
|
||||||
# via requests
|
# via requests
|
||||||
jinja2==3.1.2
|
jinja2==3.1.3
|
||||||
# via
|
# via
|
||||||
# ansible-core
|
# ansible-core
|
||||||
# cookiecutter
|
|
||||||
# jinja2-time
|
|
||||||
# molecule
|
# molecule
|
||||||
# molecule-vagrant
|
|
||||||
jinja2-time==0.2.0
|
|
||||||
# via cookiecutter
|
|
||||||
jmespath==1.0.1
|
jmespath==1.0.1
|
||||||
# via -r requirements.in
|
# via -r requirements.in
|
||||||
jsonpatch==1.32
|
jsonpatch==1.33
|
||||||
# via -r requirements.in
|
# via -r requirements.in
|
||||||
jsonpointer==2.3
|
jsonpointer==2.4
|
||||||
# via jsonpatch
|
# via jsonpatch
|
||||||
jsonschema==4.17.0
|
jsonschema==4.21.1
|
||||||
# via
|
# via
|
||||||
# ansible-compat
|
# ansible-compat
|
||||||
# ansible-lint
|
|
||||||
# molecule
|
# molecule
|
||||||
kubernetes==25.3.0
|
jsonschema-specifications==2023.12.1
|
||||||
|
# via jsonschema
|
||||||
|
kubernetes==29.0.0
|
||||||
# via -r requirements.in
|
# via -r requirements.in
|
||||||
markupsafe==2.1.1
|
markdown-it-py==3.0.0
|
||||||
|
# via rich
|
||||||
|
markupsafe==2.1.4
|
||||||
# via jinja2
|
# via jinja2
|
||||||
molecule==4.0.4
|
mdurl==0.1.2
|
||||||
|
# via markdown-it-py
|
||||||
|
molecule==6.0.3
|
||||||
# via
|
# via
|
||||||
# -r requirements.in
|
# -r requirements.in
|
||||||
# molecule-vagrant
|
# molecule-plugins
|
||||||
molecule-vagrant==1.0.0
|
molecule-plugins[vagrant]==23.5.3
|
||||||
# via -r requirements.in
|
# via -r requirements.in
|
||||||
mypy-extensions==0.4.3
|
netaddr==0.10.1
|
||||||
# via black
|
|
||||||
netaddr==0.8.0
|
|
||||||
# via -r requirements.in
|
# via -r requirements.in
|
||||||
nodeenv==1.7.0
|
nodeenv==1.8.0
|
||||||
# via pre-commit
|
# via pre-commit
|
||||||
oauthlib==3.2.2
|
oauthlib==3.2.2
|
||||||
# via requests-oauthlib
|
# via
|
||||||
packaging==21.3
|
# kubernetes
|
||||||
|
# requests-oauthlib
|
||||||
|
packaging==23.2
|
||||||
# via
|
# via
|
||||||
# ansible-compat
|
# ansible-compat
|
||||||
# ansible-core
|
# ansible-core
|
||||||
# ansible-lint
|
|
||||||
# molecule
|
# molecule
|
||||||
pathspec==0.10.1
|
platformdirs==4.1.0
|
||||||
# via
|
# via virtualenv
|
||||||
# black
|
pluggy==1.3.0
|
||||||
# yamllint
|
|
||||||
platformdirs==2.5.2
|
|
||||||
# via
|
|
||||||
# black
|
|
||||||
# virtualenv
|
|
||||||
pluggy==1.0.0
|
|
||||||
# via molecule
|
# via molecule
|
||||||
pre-commit==2.21.0
|
pre-commit==3.8.0
|
||||||
# via -r requirements.in
|
# via -r requirements.in
|
||||||
pre-commit-hooks==4.4.0
|
pre-commit-hooks==4.6.0
|
||||||
# via -r requirements.in
|
# via -r requirements.in
|
||||||
pyasn1==0.4.8
|
pyasn1==0.5.1
|
||||||
# via
|
# via
|
||||||
# pyasn1-modules
|
# pyasn1-modules
|
||||||
# rsa
|
# rsa
|
||||||
pyasn1-modules==0.2.8
|
pyasn1-modules==0.3.0
|
||||||
# via google-auth
|
# via google-auth
|
||||||
pycparser==2.21
|
pycparser==2.21
|
||||||
# via cffi
|
# via cffi
|
||||||
pygments==2.13.0
|
pygments==2.17.2
|
||||||
# via rich
|
# via rich
|
||||||
pyparsing==3.0.9
|
|
||||||
# via packaging
|
|
||||||
pyrsistent==0.19.2
|
|
||||||
# via jsonschema
|
|
||||||
python-dateutil==2.8.2
|
python-dateutil==2.8.2
|
||||||
# via
|
# via kubernetes
|
||||||
# arrow
|
|
||||||
# kubernetes
|
|
||||||
python-slugify==6.1.2
|
|
||||||
# via cookiecutter
|
|
||||||
python-vagrant==1.0.0
|
python-vagrant==1.0.0
|
||||||
# via molecule-vagrant
|
# via molecule-plugins
|
||||||
pyyaml==6.0
|
pyyaml==6.0.2
|
||||||
# via
|
# via
|
||||||
# -r requirements.in
|
# -r requirements.in
|
||||||
# ansible-compat
|
# ansible-compat
|
||||||
# ansible-core
|
# ansible-core
|
||||||
# ansible-lint
|
|
||||||
# cookiecutter
|
|
||||||
# kubernetes
|
# kubernetes
|
||||||
# molecule
|
# molecule
|
||||||
# molecule-vagrant
|
|
||||||
# pre-commit
|
# pre-commit
|
||||||
# yamllint
|
referencing==0.32.1
|
||||||
requests==2.28.1
|
# via
|
||||||
|
# jsonschema
|
||||||
|
# jsonschema-specifications
|
||||||
|
requests==2.31.0
|
||||||
# via
|
# via
|
||||||
# cookiecutter
|
|
||||||
# kubernetes
|
# kubernetes
|
||||||
# requests-oauthlib
|
# requests-oauthlib
|
||||||
requests-oauthlib==1.3.1
|
requests-oauthlib==1.3.1
|
||||||
# via kubernetes
|
# via kubernetes
|
||||||
resolvelib==0.8.1
|
resolvelib==1.0.1
|
||||||
# via ansible-core
|
# via ansible-core
|
||||||
rich==12.6.0
|
rich==13.7.0
|
||||||
# via
|
# via
|
||||||
# ansible-lint
|
|
||||||
# enrich
|
# enrich
|
||||||
# molecule
|
# molecule
|
||||||
|
rpds-py==0.17.1
|
||||||
|
# via
|
||||||
|
# jsonschema
|
||||||
|
# referencing
|
||||||
rsa==4.9
|
rsa==4.9
|
||||||
# via google-auth
|
# via google-auth
|
||||||
ruamel-yaml==0.17.21
|
ruamel-yaml==0.18.5
|
||||||
# via
|
# via pre-commit-hooks
|
||||||
# ansible-lint
|
ruamel-yaml-clib==0.2.8
|
||||||
# pre-commit-hooks
|
# via ruamel-yaml
|
||||||
selinux==0.2.1
|
|
||||||
# via molecule-vagrant
|
|
||||||
six==1.16.0
|
six==1.16.0
|
||||||
# via
|
# via
|
||||||
# google-auth
|
|
||||||
# kubernetes
|
# kubernetes
|
||||||
# python-dateutil
|
# python-dateutil
|
||||||
subprocess-tee==0.3.5
|
subprocess-tee==0.4.1
|
||||||
# via ansible-compat
|
# via ansible-compat
|
||||||
text-unidecode==1.3
|
urllib3==2.1.0
|
||||||
# via python-slugify
|
|
||||||
urllib3==1.26.12
|
|
||||||
# via
|
# via
|
||||||
# kubernetes
|
# kubernetes
|
||||||
# requests
|
# requests
|
||||||
virtualenv==20.16.6
|
virtualenv==20.25.0
|
||||||
# via pre-commit
|
# via pre-commit
|
||||||
wcmatch==8.4.1
|
wcmatch==8.5
|
||||||
# via ansible-lint
|
# via molecule
|
||||||
websocket-client==1.4.2
|
websocket-client==1.7.0
|
||||||
# via kubernetes
|
# via kubernetes
|
||||||
yamllint==1.29.0
|
|
||||||
# via
|
|
||||||
# -r requirements.in
|
|
||||||
# ansible-lint
|
|
||||||
|
|
||||||
# The following packages are considered to be unsafe in a requirements file:
|
# The following packages are considered to be unsafe in a requirements file:
|
||||||
# setuptools
|
# setuptools
|
||||||
|
|||||||
2
reset.sh
2
reset.sh
@@ -1,3 +1,3 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
ansible-playbook reset.yml -i inventory/my-cluster/hosts.ini
|
ansible-playbook reset.yml
|
||||||
|
|||||||
24
reset.yml
24
reset.yml
@@ -1,13 +1,25 @@
|
|||||||
---
|
---
|
||||||
|
- name: Reset k3s cluster
|
||||||
- hosts: k3s_cluster
|
hosts: k3s_cluster
|
||||||
gather_facts: yes
|
gather_facts: true
|
||||||
become: yes
|
|
||||||
roles:
|
roles:
|
||||||
- role: reset
|
- role: reset
|
||||||
|
become: true
|
||||||
- role: raspberrypi
|
- role: raspberrypi
|
||||||
vars: {state: absent}
|
become: true
|
||||||
|
vars: { state: absent }
|
||||||
post_tasks:
|
post_tasks:
|
||||||
- name: Reboot and wait for node to come back up
|
- name: Reboot and wait for node to come back up
|
||||||
reboot:
|
become: true
|
||||||
|
ansible.builtin.reboot:
|
||||||
|
reboot_command: "{{ custom_reboot_command | default(omit) }}"
|
||||||
reboot_timeout: 3600
|
reboot_timeout: 3600
|
||||||
|
|
||||||
|
- name: Revert changes to Proxmox cluster
|
||||||
|
hosts: proxmox
|
||||||
|
gather_facts: true
|
||||||
|
become: true
|
||||||
|
remote_user: "{{ proxmox_lxc_ssh_user }}"
|
||||||
|
roles:
|
||||||
|
- role: reset_proxmox_lxc
|
||||||
|
when: proxmox_lxc_configure
|
||||||
|
|||||||
8
roles/download/meta/main.yml
Normal file
8
roles/download/meta/main.yml
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
---
|
||||||
|
argument_specs:
|
||||||
|
main:
|
||||||
|
short_description: Manage the downloading of K3S binaries
|
||||||
|
options:
|
||||||
|
k3s_version:
|
||||||
|
description: The desired version of K3S
|
||||||
|
required: true
|
||||||
@@ -1,36 +1,34 @@
|
|||||||
---
|
---
|
||||||
|
|
||||||
- name: Download k3s binary x64
|
- name: Download k3s binary x64
|
||||||
get_url:
|
ansible.builtin.get_url:
|
||||||
url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s
|
url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s
|
||||||
checksum: sha256:https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/sha256sum-amd64.txt
|
checksum: sha256:https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/sha256sum-amd64.txt
|
||||||
dest: /usr/local/bin/k3s
|
dest: /usr/local/bin/k3s
|
||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
mode: 0755
|
mode: "0755"
|
||||||
when: ansible_facts.architecture == "x86_64"
|
when: ansible_facts.architecture == "x86_64"
|
||||||
|
|
||||||
- name: Download k3s binary arm64
|
- name: Download k3s binary arm64
|
||||||
get_url:
|
ansible.builtin.get_url:
|
||||||
url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s-arm64
|
url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s-arm64
|
||||||
checksum: sha256:https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/sha256sum-arm64.txt
|
checksum: sha256:https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/sha256sum-arm64.txt
|
||||||
dest: /usr/local/bin/k3s
|
dest: /usr/local/bin/k3s
|
||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
mode: 0755
|
mode: "0755"
|
||||||
when:
|
when:
|
||||||
- ( ansible_facts.architecture is search("arm") and
|
- ( ansible_facts.architecture is search("arm") and ansible_facts.userspace_bits == "64" )
|
||||||
ansible_facts.userspace_bits == "64" ) or
|
or ansible_facts.architecture is search("aarch64")
|
||||||
ansible_facts.architecture is search("aarch64")
|
|
||||||
|
|
||||||
- name: Download k3s binary armhf
|
- name: Download k3s binary armhf
|
||||||
get_url:
|
ansible.builtin.get_url:
|
||||||
url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s-armhf
|
url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s-armhf
|
||||||
checksum: sha256:https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/sha256sum-arm.txt
|
checksum: sha256:https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/sha256sum-arm.txt
|
||||||
dest: /usr/local/bin/k3s
|
dest: /usr/local/bin/k3s
|
||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
mode: 0755
|
mode: "0755"
|
||||||
when:
|
when:
|
||||||
- ansible_facts.architecture is search("arm")
|
- ansible_facts.architecture is search("arm")
|
||||||
- ansible_facts.userspace_bits == "32"
|
- ansible_facts.userspace_bits == "32"
|
||||||
|
|||||||
@@ -1,12 +0,0 @@
|
|||||||
---
|
|
||||||
ansible_user: root
|
|
||||||
server_init_args: >-
|
|
||||||
{% if groups['master'] | length > 1 %}
|
|
||||||
{% if ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] %}
|
|
||||||
--cluster-init
|
|
||||||
{% else %}
|
|
||||||
--server https://{{ hostvars[groups['master'][0]].k3s_node_ip }}:6443
|
|
||||||
{% endif %}
|
|
||||||
--token {{ k3s_token }}
|
|
||||||
{% endif %}
|
|
||||||
{{ extra_server_args | default('') }}
|
|
||||||
@@ -1,27 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Create manifests directory on first master
|
|
||||||
file:
|
|
||||||
path: /var/lib/rancher/k3s/server/manifests
|
|
||||||
state: directory
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: 0644
|
|
||||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
|
||||||
|
|
||||||
- name: Copy metallb namespace to first master
|
|
||||||
template:
|
|
||||||
src: "metallb.namespace.j2"
|
|
||||||
dest: "/var/lib/rancher/k3s/server/manifests/metallb-namespace.yaml"
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: 0644
|
|
||||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
|
||||||
|
|
||||||
- name: Copy metallb manifest to first master
|
|
||||||
template:
|
|
||||||
src: "metallb.crds.j2"
|
|
||||||
dest: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml"
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: 0644
|
|
||||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
|
||||||
@@ -1,27 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Create manifests directory on first master
|
|
||||||
file:
|
|
||||||
path: /var/lib/rancher/k3s/server/manifests
|
|
||||||
state: directory
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: 0644
|
|
||||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
|
||||||
|
|
||||||
- name: Copy vip rbac manifest to first master
|
|
||||||
template:
|
|
||||||
src: "vip.rbac.yaml.j2"
|
|
||||||
dest: "/var/lib/rancher/k3s/server/manifests/vip-rbac.yaml"
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: 0644
|
|
||||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
|
||||||
|
|
||||||
- name: Copy vip manifest to first master
|
|
||||||
template:
|
|
||||||
src: "vip.yaml.j2"
|
|
||||||
dest: "/var/lib/rancher/k3s/server/manifests/vip.yaml"
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: 0644
|
|
||||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,6 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: Namespace
|
|
||||||
metadata:
|
|
||||||
name: metallb-system
|
|
||||||
labels:
|
|
||||||
app: metallb
|
|
||||||
@@ -1,32 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: ServiceAccount
|
|
||||||
metadata:
|
|
||||||
name: kube-vip
|
|
||||||
namespace: kube-system
|
|
||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: ClusterRole
|
|
||||||
metadata:
|
|
||||||
annotations:
|
|
||||||
rbac.authorization.kubernetes.io/autoupdate: "true"
|
|
||||||
name: system:kube-vip-role
|
|
||||||
rules:
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["services", "services/status", "nodes", "endpoints"]
|
|
||||||
verbs: ["list","get","watch", "update"]
|
|
||||||
- apiGroups: ["coordination.k8s.io"]
|
|
||||||
resources: ["leases"]
|
|
||||||
verbs: ["list", "get", "watch", "update", "create"]
|
|
||||||
---
|
|
||||||
kind: ClusterRoleBinding
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
metadata:
|
|
||||||
name: system:kube-vip-binding
|
|
||||||
roleRef:
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
kind: ClusterRole
|
|
||||||
name: system:kube-vip-role
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: kube-vip
|
|
||||||
namespace: kube-system
|
|
||||||
3
roles/k3s/node/defaults/main.yml
Normal file
3
roles/k3s/node/defaults/main.yml
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
---
|
||||||
|
# Name of the master group
|
||||||
|
group_name_master: master
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
---
|
|
||||||
|
|
||||||
- name: Copy K3s service file
|
|
||||||
template:
|
|
||||||
src: "k3s.service.j2"
|
|
||||||
dest: "{{ systemd_dir }}/k3s-node.service"
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: 0755
|
|
||||||
|
|
||||||
- name: Enable and check K3s service
|
|
||||||
systemd:
|
|
||||||
name: k3s-node
|
|
||||||
daemon_reload: yes
|
|
||||||
state: restarted
|
|
||||||
enabled: yes
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
---
|
|
||||||
# Timeout to wait for MetalLB services to come up
|
|
||||||
metal_lb_available_timeout: 120s
|
|
||||||
@@ -1,8 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Deploy metallb pool
|
|
||||||
include_tasks: metallb.yml
|
|
||||||
|
|
||||||
- name: Remove tmp directory used for manifests
|
|
||||||
file:
|
|
||||||
path: /tmp/k3s
|
|
||||||
state: absent
|
|
||||||
@@ -1,89 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Create manifests directory for temp configuration
|
|
||||||
file:
|
|
||||||
path: /tmp/k3s
|
|
||||||
state: directory
|
|
||||||
owner: "{{ ansible_user }}"
|
|
||||||
mode: 0755
|
|
||||||
with_items: "{{ groups['master'] }}"
|
|
||||||
run_once: true
|
|
||||||
|
|
||||||
- name: Copy metallb CRs manifest to first master
|
|
||||||
template:
|
|
||||||
src: "metallb.crs.j2"
|
|
||||||
dest: "/tmp/k3s/metallb-crs.yaml"
|
|
||||||
owner: "{{ ansible_user }}"
|
|
||||||
mode: 0755
|
|
||||||
with_items: "{{ groups['master'] }}"
|
|
||||||
run_once: true
|
|
||||||
|
|
||||||
- name: Test metallb-system namespace
|
|
||||||
command: >-
|
|
||||||
k3s kubectl -n metallb-system
|
|
||||||
changed_when: false
|
|
||||||
with_items: "{{ groups['master'] }}"
|
|
||||||
run_once: true
|
|
||||||
|
|
||||||
- name: Wait for MetalLB resources
|
|
||||||
command: >-
|
|
||||||
k3s kubectl wait {{ item.resource }}
|
|
||||||
--namespace='metallb-system'
|
|
||||||
{% if item.name | default(False) -%}{{ item.name }}{%- endif %}
|
|
||||||
{% if item.selector | default(False) -%}--selector='{{ item.selector }}'{%- endif %}
|
|
||||||
{% if item.condition | default(False) -%}{{ item.condition }}{%- endif %}
|
|
||||||
--timeout='{{ metal_lb_available_timeout }}'
|
|
||||||
changed_when: false
|
|
||||||
run_once: true
|
|
||||||
with_items:
|
|
||||||
- description: controller
|
|
||||||
resource: deployment
|
|
||||||
name: controller
|
|
||||||
condition: --for condition=Available=True
|
|
||||||
- description: webhook service
|
|
||||||
resource: pod
|
|
||||||
selector: component=controller
|
|
||||||
condition: --for=jsonpath='{.status.phase}'=Running
|
|
||||||
- description: pods in replica sets
|
|
||||||
resource: pod
|
|
||||||
selector: component=controller,app=metallb
|
|
||||||
condition: --for condition=Ready
|
|
||||||
- description: ready replicas of controller
|
|
||||||
resource: replicaset
|
|
||||||
selector: component=controller,app=metallb
|
|
||||||
condition: --for=jsonpath='{.status.readyReplicas}'=1
|
|
||||||
- description: fully labeled replicas of controller
|
|
||||||
resource: replicaset
|
|
||||||
selector: component=controller,app=metallb
|
|
||||||
condition: --for=jsonpath='{.status.fullyLabeledReplicas}'=1
|
|
||||||
- description: available replicas of controller
|
|
||||||
resource: replicaset
|
|
||||||
selector: component=controller,app=metallb
|
|
||||||
condition: --for=jsonpath='{.status.availableReplicas}'=1
|
|
||||||
loop_control:
|
|
||||||
label: "{{ item.description }}"
|
|
||||||
|
|
||||||
- name: Test metallb-system webhook-service endpoint
|
|
||||||
command: >-
|
|
||||||
k3s kubectl -n metallb-system get endpoints webhook-service
|
|
||||||
changed_when: false
|
|
||||||
with_items: "{{ groups['master'] }}"
|
|
||||||
run_once: true
|
|
||||||
|
|
||||||
- name: Apply metallb CRs
|
|
||||||
command: >-
|
|
||||||
k3s kubectl apply -f /tmp/k3s/metallb-crs.yaml
|
|
||||||
--timeout='{{ metal_lb_available_timeout }}'
|
|
||||||
register: this
|
|
||||||
changed_when: false
|
|
||||||
run_once: true
|
|
||||||
until: this.rc == 0
|
|
||||||
retries: 5
|
|
||||||
|
|
||||||
- name: Test metallb-system resources
|
|
||||||
command: >-
|
|
||||||
k3s kubectl -n metallb-system get {{ item }}
|
|
||||||
changed_when: false
|
|
||||||
run_once: true
|
|
||||||
with_items:
|
|
||||||
- IPAddressPool
|
|
||||||
- L2Advertisement
|
|
||||||
4
roles/k3s_agent/defaults/main.yml
Normal file
4
roles/k3s_agent/defaults/main.yml
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
---
|
||||||
|
extra_agent_args: ""
|
||||||
|
group_name_master: master
|
||||||
|
systemd_dir: /etc/systemd/system
|
||||||
39
roles/k3s_agent/meta/main.yml
Normal file
39
roles/k3s_agent/meta/main.yml
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
---
|
||||||
|
argument_specs:
|
||||||
|
main:
|
||||||
|
short_description: Setup k3s agents
|
||||||
|
options:
|
||||||
|
apiserver_endpoint:
|
||||||
|
description: Virtual ip-address configured on each master
|
||||||
|
required: true
|
||||||
|
|
||||||
|
extra_agent_args:
|
||||||
|
description: Extra arguments for agents nodes
|
||||||
|
|
||||||
|
group_name_master:
|
||||||
|
description: Name of the master group
|
||||||
|
default: master
|
||||||
|
|
||||||
|
k3s_token:
|
||||||
|
description: Token used to communicate between masters
|
||||||
|
|
||||||
|
proxy_env:
|
||||||
|
type: dict
|
||||||
|
description:
|
||||||
|
- Internet proxy configurations.
|
||||||
|
- See https://docs.k3s.io/advanced#configuring-an-http-proxy for details
|
||||||
|
default: ~
|
||||||
|
options:
|
||||||
|
HTTP_PROXY:
|
||||||
|
description: HTTP internet proxy
|
||||||
|
required: true
|
||||||
|
HTTPS_PROXY:
|
||||||
|
description: HTTPS internet proxy
|
||||||
|
required: true
|
||||||
|
NO_PROXY:
|
||||||
|
description: Addresses that will not use the proxies
|
||||||
|
required: true
|
||||||
|
|
||||||
|
systemd_dir:
|
||||||
|
description: Path to systemd services
|
||||||
|
default: /etc/systemd/system
|
||||||
18
roles/k3s_agent/tasks/http_proxy.yml
Normal file
18
roles/k3s_agent/tasks/http_proxy.yml
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
---
|
||||||
|
- name: Create k3s-node.service.d directory
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ systemd_dir }}/k3s-node.service.d"
|
||||||
|
state: directory
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0755"
|
||||||
|
when: proxy_env is defined
|
||||||
|
|
||||||
|
- name: Copy K3s http_proxy conf file
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: http_proxy.conf.j2
|
||||||
|
dest: "{{ systemd_dir }}/k3s-node.service.d/http_proxy.conf"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0755"
|
||||||
|
when: proxy_env is defined
|
||||||
36
roles/k3s_agent/tasks/main.yml
Normal file
36
roles/k3s_agent/tasks/main.yml
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
---
|
||||||
|
- name: Check for PXE-booted system
|
||||||
|
block:
|
||||||
|
- name: Check if system is PXE-booted
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: cat /proc/cmdline
|
||||||
|
register: boot_cmdline
|
||||||
|
changed_when: false
|
||||||
|
check_mode: false
|
||||||
|
|
||||||
|
- name: Set fact for PXE-booted system
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
is_pxe_booted: "{{ 'root=/dev/nfs' in boot_cmdline.stdout }}"
|
||||||
|
when: boot_cmdline.stdout is defined
|
||||||
|
|
||||||
|
- name: Include http_proxy configuration tasks
|
||||||
|
ansible.builtin.include_tasks: http_proxy.yml
|
||||||
|
|
||||||
|
- name: Deploy K3s http_proxy conf
|
||||||
|
ansible.builtin.include_tasks: http_proxy.yml
|
||||||
|
when: proxy_env is defined
|
||||||
|
|
||||||
|
- name: Configure the k3s service
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: k3s.service.j2
|
||||||
|
dest: "{{ systemd_dir }}/k3s-node.service"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0755"
|
||||||
|
|
||||||
|
- name: Manage k3s service
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: k3s-node
|
||||||
|
daemon_reload: true
|
||||||
|
state: restarted
|
||||||
|
enabled: true
|
||||||
4
roles/k3s_agent/templates/http_proxy.conf.j2
Normal file
4
roles/k3s_agent/templates/http_proxy.conf.j2
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
[Service]
|
||||||
|
Environment=HTTP_PROXY={{ proxy_env.HTTP_PROXY }}
|
||||||
|
Environment=HTTPS_PROXY={{ proxy_env.HTTPS_PROXY }}
|
||||||
|
Environment=NO_PROXY={{ proxy_env.NO_PROXY }}
|
||||||
@@ -7,11 +7,14 @@ After=network-online.target
|
|||||||
Type=notify
|
Type=notify
|
||||||
ExecStartPre=-/sbin/modprobe br_netfilter
|
ExecStartPre=-/sbin/modprobe br_netfilter
|
||||||
ExecStartPre=-/sbin/modprobe overlay
|
ExecStartPre=-/sbin/modprobe overlay
|
||||||
ExecStart=/usr/local/bin/k3s agent --server https://{{ apiserver_endpoint | ansible.utils.ipwrap }}:6443 --token {{ hostvars[groups['master'][0]]['token'] | default(k3s_token) }} {{ extra_agent_args | default("") }}
|
# Conditional snapshotter based on PXE boot status
|
||||||
|
ExecStart=/usr/local/bin/k3s agent \
|
||||||
|
--server https://{{ apiserver_endpoint | ansible.utils.ipwrap }}:6443 \
|
||||||
|
{% if is_pxe_booted | default(false) %}--snapshotter native \
|
||||||
|
{% endif %}--token {{ hostvars[groups[group_name_master | default('master')][0]]['token'] | default(k3s_token) }} \
|
||||||
|
{{ extra_agent_args }}
|
||||||
KillMode=process
|
KillMode=process
|
||||||
Delegate=yes
|
Delegate=yes
|
||||||
# Having non-zero Limit*s causes performance problems due to accounting overhead
|
|
||||||
# in the kernel. We recommend using cgroups to do container-local accounting.
|
|
||||||
LimitNOFILE=1048576
|
LimitNOFILE=1048576
|
||||||
LimitNPROC=infinity
|
LimitNPROC=infinity
|
||||||
LimitCORE=infinity
|
LimitCORE=infinity
|
||||||
20
roles/k3s_custom_registries/meta/main.yml
Normal file
20
roles/k3s_custom_registries/meta/main.yml
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
---
|
||||||
|
argument_specs:
|
||||||
|
main:
|
||||||
|
short_description: Configure the use of a custom container registry
|
||||||
|
options:
|
||||||
|
custom_registries_yaml:
|
||||||
|
description:
|
||||||
|
- YAML block defining custom registries.
|
||||||
|
- >
|
||||||
|
The following is an example that pulls all images used in
|
||||||
|
this playbook through your private registries.
|
||||||
|
- >
|
||||||
|
It also allows you to pull your own images from your private
|
||||||
|
registry, without having to use imagePullSecrets in your
|
||||||
|
deployments.
|
||||||
|
- >
|
||||||
|
If all you need is your own images and you don't care about
|
||||||
|
caching the docker/quay/ghcr.io images, you can just remove
|
||||||
|
those from the mirrors: section.
|
||||||
|
required: true
|
||||||
16
roles/k3s_custom_registries/tasks/main.yml
Normal file
16
roles/k3s_custom_registries/tasks/main.yml
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
---
|
||||||
|
- name: Create directory /etc/rancher/k3s
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: /etc/{{ item }}
|
||||||
|
state: directory
|
||||||
|
mode: "0755"
|
||||||
|
loop:
|
||||||
|
- rancher
|
||||||
|
- rancher/k3s
|
||||||
|
|
||||||
|
- name: Insert registries into /etc/rancher/k3s/registries.yaml
|
||||||
|
ansible.builtin.blockinfile:
|
||||||
|
path: /etc/rancher/k3s/registries.yaml
|
||||||
|
block: "{{ custom_registries_yaml }}"
|
||||||
|
mode: "0600"
|
||||||
|
create: true
|
||||||
40
roles/k3s_server/defaults/main.yml
Normal file
40
roles/k3s_server/defaults/main.yml
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
---
|
||||||
|
extra_server_args: ""
|
||||||
|
|
||||||
|
k3s_kubectl_binary: k3s kubectl
|
||||||
|
|
||||||
|
group_name_master: master
|
||||||
|
|
||||||
|
kube_vip_arp: true
|
||||||
|
kube_vip_iface:
|
||||||
|
kube_vip_cloud_provider_tag_version: main
|
||||||
|
kube_vip_tag_version: v0.7.2
|
||||||
|
|
||||||
|
kube_vip_bgp: false
|
||||||
|
kube_vip_bgp_routerid: 127.0.0.1
|
||||||
|
kube_vip_bgp_as: "64513"
|
||||||
|
kube_vip_bgp_peeraddress: 192.168.30.1
|
||||||
|
kube_vip_bgp_peeras: "64512"
|
||||||
|
|
||||||
|
kube_vip_bgp_peers: []
|
||||||
|
kube_vip_bgp_peers_groups: ['k3s_master']
|
||||||
|
|
||||||
|
metal_lb_controller_tag_version: v0.14.3
|
||||||
|
metal_lb_speaker_tag_version: v0.14.3
|
||||||
|
metal_lb_type: native
|
||||||
|
|
||||||
|
retry_count: 20
|
||||||
|
|
||||||
|
# yamllint disable rule:line-length
|
||||||
|
server_init_args: >-
|
||||||
|
{% if groups[group_name_master | default('master')] | length > 1 %}
|
||||||
|
{% if ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] %}
|
||||||
|
--cluster-init
|
||||||
|
{% else %}
|
||||||
|
--server https://{{ hostvars[groups[group_name_master | default('master')][0]].k3s_node_ip | split(",") | first | ansible.utils.ipwrap }}:6443
|
||||||
|
{% endif %}
|
||||||
|
--token {{ k3s_token }}
|
||||||
|
{% endif %}
|
||||||
|
{{ extra_server_args }}
|
||||||
|
|
||||||
|
systemd_dir: /etc/systemd/system
|
||||||
135
roles/k3s_server/meta/main.yml
Normal file
135
roles/k3s_server/meta/main.yml
Normal file
@@ -0,0 +1,135 @@
|
|||||||
|
---
|
||||||
|
argument_specs:
|
||||||
|
main:
|
||||||
|
short_description: Setup k3s servers
|
||||||
|
options:
|
||||||
|
apiserver_endpoint:
|
||||||
|
description: Virtual ip-address configured on each master
|
||||||
|
required: true
|
||||||
|
|
||||||
|
cilium_bgp:
|
||||||
|
description:
|
||||||
|
- Enable cilium BGP control plane for LB services and pod cidrs.
|
||||||
|
- Disables the use of MetalLB.
|
||||||
|
type: bool
|
||||||
|
default: ~
|
||||||
|
|
||||||
|
cilium_iface:
|
||||||
|
description: The network interface used for when Cilium is enabled
|
||||||
|
default: ~
|
||||||
|
|
||||||
|
extra_server_args:
|
||||||
|
description: Extra arguments for server nodes
|
||||||
|
default: ""
|
||||||
|
|
||||||
|
group_name_master:
|
||||||
|
description: Name of the master group
|
||||||
|
default: master
|
||||||
|
|
||||||
|
k3s_create_kubectl_symlink:
|
||||||
|
description: Create the kubectl -> k3s symlink
|
||||||
|
default: false
|
||||||
|
type: bool
|
||||||
|
|
||||||
|
k3s_create_crictl_symlink:
|
||||||
|
description: Create the crictl -> k3s symlink
|
||||||
|
default: false
|
||||||
|
type: bool
|
||||||
|
|
||||||
|
kube_vip_arp:
|
||||||
|
description: Enables kube-vip ARP broadcasts
|
||||||
|
default: true
|
||||||
|
type: bool
|
||||||
|
|
||||||
|
kube_vip_bgp:
|
||||||
|
description: Enables kube-vip BGP peering
|
||||||
|
default: false
|
||||||
|
type: bool
|
||||||
|
|
||||||
|
kube_vip_bgp_routerid:
|
||||||
|
description: Defines the router ID for the kube-vip BGP server
|
||||||
|
default: "127.0.0.1"
|
||||||
|
|
||||||
|
kube_vip_bgp_as:
|
||||||
|
description: Defines the AS for the kube-vip BGP server
|
||||||
|
default: "64513"
|
||||||
|
|
||||||
|
kube_vip_bgp_peeraddress:
|
||||||
|
description: Defines the address for the kube-vip BGP peer
|
||||||
|
default: "192.168.30.1"
|
||||||
|
|
||||||
|
kube_vip_bgp_peeras:
|
||||||
|
description: Defines the AS for the kube-vip BGP peer
|
||||||
|
default: "64512"
|
||||||
|
|
||||||
|
kube_vip_bgp_peers:
|
||||||
|
description: List of BGP peer ASN & address pairs
|
||||||
|
default: []
|
||||||
|
|
||||||
|
kube_vip_bgp_peers_groups:
|
||||||
|
description: Inventory group in which to search for additional kube_vip_bgp_peers parameters to merge.
|
||||||
|
default: ['k3s_master']
|
||||||
|
|
||||||
|
kube_vip_iface:
|
||||||
|
description:
|
||||||
|
- Explicitly define an interface that ALL control nodes
|
||||||
|
- should use to propagate the VIP, define it here.
|
||||||
|
- Otherwise, kube-vip will determine the right interface
|
||||||
|
- automatically at runtime.
|
||||||
|
default: ~
|
||||||
|
|
||||||
|
kube_vip_tag_version:
|
||||||
|
description: Image tag for kube-vip
|
||||||
|
default: v0.7.2
|
||||||
|
|
||||||
|
kube_vip_cloud_provider_tag_version:
|
||||||
|
description: Tag for kube-vip-cloud-provider manifest when enabled
|
||||||
|
default: main
|
||||||
|
|
||||||
|
kube_vip_lb_ip_range:
|
||||||
|
description: IP range for kube-vip load balancer
|
||||||
|
default: ~
|
||||||
|
|
||||||
|
metal_lb_controller_tag_version:
|
||||||
|
description: Image tag for MetalLB
|
||||||
|
default: v0.14.3
|
||||||
|
|
||||||
|
metal_lb_speaker_tag_version:
|
||||||
|
description: Image tag for MetalLB
|
||||||
|
default: v0.14.3
|
||||||
|
|
||||||
|
metal_lb_type:
|
||||||
|
choices:
|
||||||
|
- frr
|
||||||
|
- native
|
||||||
|
default: native
|
||||||
|
description: Use FRR mode or native. Valid values are `frr` and `native`
|
||||||
|
|
||||||
|
proxy_env:
|
||||||
|
type: dict
|
||||||
|
description:
|
||||||
|
- Internet proxy configurations.
|
||||||
|
- See https://docs.k3s.io/advanced#configuring-an-http-proxy for details
|
||||||
|
default: ~
|
||||||
|
options:
|
||||||
|
HTTP_PROXY:
|
||||||
|
description: HTTP internet proxy
|
||||||
|
required: true
|
||||||
|
HTTPS_PROXY:
|
||||||
|
description: HTTPS internet proxy
|
||||||
|
required: true
|
||||||
|
NO_PROXY:
|
||||||
|
description: Addresses that will not use the proxies
|
||||||
|
required: true
|
||||||
|
|
||||||
|
retry_count:
|
||||||
|
description: Amount of retries when verifying that nodes joined
|
||||||
|
type: int
|
||||||
|
default: 20
|
||||||
|
|
||||||
|
server_init_args:
|
||||||
|
description: Arguments for server nodes
|
||||||
|
|
||||||
|
systemd_dir:
|
||||||
|
description: Path to systemd services
|
||||||
|
default: /etc/systemd/system
|
||||||
@@ -23,6 +23,6 @@
|
|||||||
ansible.builtin.template:
|
ansible.builtin.template:
|
||||||
src: content.j2
|
src: content.j2
|
||||||
dest: "{{ log_destination }}/k3s-init@{{ ansible_hostname }}.log"
|
dest: "{{ log_destination }}/k3s-init@{{ ansible_hostname }}.log"
|
||||||
mode: 0644
|
mode: "0644"
|
||||||
vars:
|
vars:
|
||||||
content: "{{ k3s_init_log.stdout }}"
|
content: "{{ k3s_init_log.stdout }}"
|
||||||
16
roles/k3s_server/tasks/http_proxy.yml
Normal file
16
roles/k3s_server/tasks/http_proxy.yml
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
---
|
||||||
|
- name: Create k3s.service.d directory
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ systemd_dir }}/k3s.service.d"
|
||||||
|
state: directory
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0755"
|
||||||
|
|
||||||
|
- name: Copy K3s http_proxy conf file
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: http_proxy.conf.j2
|
||||||
|
dest: "{{ systemd_dir }}/k3s.service.d/http_proxy.conf"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0755"
|
||||||
27
roles/k3s_server/tasks/kube-vip.yml
Normal file
27
roles/k3s_server/tasks/kube-vip.yml
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
---
|
||||||
|
- name: Create manifests directory on first master
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: /var/lib/rancher/k3s/server/manifests
|
||||||
|
state: directory
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0644"
|
||||||
|
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||||
|
|
||||||
|
- name: Download vip cloud provider manifest to first master
|
||||||
|
ansible.builtin.get_url:
|
||||||
|
url: https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/{{ kube_vip_cloud_provider_tag_version | default('main') }}/manifest/kube-vip-cloud-controller.yaml # noqa yaml[line-length]
|
||||||
|
dest: /var/lib/rancher/k3s/server/manifests/kube-vip-cloud-controller.yaml
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0644"
|
||||||
|
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||||
|
|
||||||
|
- name: Copy kubevip configMap manifest to first master
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: kubevip.yaml.j2
|
||||||
|
dest: /var/lib/rancher/k3s/server/manifests/kubevip.yaml
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0644"
|
||||||
|
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||||
@@ -1,155 +1,169 @@
|
|||||||
---
|
---
|
||||||
|
- name: Stop k3s-init
|
||||||
- name: Clean previous runs of k3s-init
|
ansible.builtin.systemd:
|
||||||
systemd:
|
|
||||||
name: k3s-init
|
name: k3s-init
|
||||||
state: stopped
|
state: stopped
|
||||||
failed_when: false
|
failed_when: false
|
||||||
|
|
||||||
- name: Clean previous runs of k3s-init
|
# k3s-init won't work if the port is already in use
|
||||||
command: systemctl reset-failed k3s-init
|
- name: Stop k3s
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: k3s
|
||||||
|
state: stopped
|
||||||
|
failed_when: false
|
||||||
|
|
||||||
|
- name: Clean previous runs of k3s-init # noqa command-instead-of-module
|
||||||
|
# The systemd module does not support "reset-failed", so we need to resort to command.
|
||||||
|
ansible.builtin.command: systemctl reset-failed k3s-init
|
||||||
failed_when: false
|
failed_when: false
|
||||||
changed_when: false
|
changed_when: false
|
||||||
args:
|
|
||||||
warn: false # The ansible systemd module does not support reset-failed
|
- name: Deploy K3s http_proxy conf
|
||||||
|
ansible.builtin.include_tasks: http_proxy.yml
|
||||||
|
when: proxy_env is defined
|
||||||
|
|
||||||
- name: Deploy vip manifest
|
- name: Deploy vip manifest
|
||||||
include_tasks: vip.yml
|
ansible.builtin.include_tasks: vip.yml
|
||||||
|
|
||||||
- name: Deploy metallb manifest
|
- name: Deploy metallb manifest
|
||||||
include_tasks: metallb.yml
|
ansible.builtin.include_tasks: metallb.yml
|
||||||
|
tags: metallb
|
||||||
|
when: kube_vip_lb_ip_range is not defined and (not cilium_bgp or cilium_iface is not defined)
|
||||||
|
|
||||||
|
- name: Deploy kube-vip manifest
|
||||||
|
ansible.builtin.include_tasks: kube-vip.yml
|
||||||
|
tags: kubevip
|
||||||
|
when: kube_vip_lb_ip_range is defined
|
||||||
|
|
||||||
- name: Init cluster inside the transient k3s-init service
|
- name: Init cluster inside the transient k3s-init service
|
||||||
command:
|
ansible.builtin.command:
|
||||||
cmd: "systemd-run -p RestartSec=2 \
|
cmd: systemd-run -p RestartSec=2 -p Restart=on-failure --unit=k3s-init k3s server {{ server_init_args }}
|
||||||
-p Restart=on-failure \
|
creates: "{{ systemd_dir }}/k3s-init.service"
|
||||||
--unit=k3s-init \
|
|
||||||
k3s server {{ server_init_args }}"
|
|
||||||
creates: "{{ systemd_dir }}/k3s.service"
|
|
||||||
|
|
||||||
- name: Verification
|
- name: Verification
|
||||||
|
when: not ansible_check_mode
|
||||||
block:
|
block:
|
||||||
- name: Verify that all nodes actually joined (check k3s-init.service if this fails)
|
- name: Verify that all nodes actually joined (check k3s-init.service if this fails)
|
||||||
command:
|
ansible.builtin.command:
|
||||||
cmd: k3s kubectl get nodes -l "node-role.kubernetes.io/master=true" -o=jsonpath="{.items[*].metadata.name}"
|
cmd: "{{ k3s_kubectl_binary | default('k3s kubectl') }} get nodes -l 'node-role.kubernetes.io/master=true' -o=jsonpath='{.items[*].metadata.name}'" # yamllint disable-line rule:line-length
|
||||||
register: nodes
|
register: nodes
|
||||||
until: nodes.rc == 0 and (nodes.stdout.split() | length) == (groups['master'] | length)
|
until: nodes.rc == 0 and (nodes.stdout.split() | length) == (groups[group_name_master | default('master')] | length) # yamllint disable-line rule:line-length
|
||||||
retries: "{{ retry_count | default(20) }}"
|
retries: "{{ retry_count | default(20) }}"
|
||||||
delay: 10
|
delay: 10
|
||||||
changed_when: false
|
changed_when: false
|
||||||
always:
|
always:
|
||||||
- name: Save logs of k3s-init.service
|
- name: Save logs of k3s-init.service
|
||||||
include_tasks: fetch_k3s_init_logs.yml
|
ansible.builtin.include_tasks: fetch_k3s_init_logs.yml
|
||||||
when: log_destination
|
when: log_destination
|
||||||
vars:
|
vars:
|
||||||
log_destination: >-
|
log_destination: >-
|
||||||
{{ lookup('ansible.builtin.env', 'ANSIBLE_K3S_LOG_DIR', default=False) }}
|
{{ lookup('ansible.builtin.env', 'ANSIBLE_K3S_LOG_DIR', default=False) }}
|
||||||
- name: Kill the temporary service used for initialization
|
- name: Kill the temporary service used for initialization
|
||||||
systemd:
|
ansible.builtin.systemd:
|
||||||
name: k3s-init
|
name: k3s-init
|
||||||
state: stopped
|
state: stopped
|
||||||
failed_when: false
|
failed_when: false
|
||||||
when: not ansible_check_mode
|
|
||||||
|
|
||||||
- name: Copy K3s service file
|
- name: Copy K3s service file
|
||||||
register: k3s_service
|
register: k3s_service
|
||||||
template:
|
ansible.builtin.template:
|
||||||
src: "k3s.service.j2"
|
src: k3s.service.j2
|
||||||
dest: "{{ systemd_dir }}/k3s.service"
|
dest: "{{ systemd_dir }}/k3s.service"
|
||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
mode: 0644
|
mode: "0644"
|
||||||
|
|
||||||
- name: Enable and check K3s service
|
- name: Enable and check K3s service
|
||||||
systemd:
|
ansible.builtin.systemd:
|
||||||
name: k3s
|
name: k3s
|
||||||
daemon_reload: yes
|
daemon_reload: true
|
||||||
state: restarted
|
state: restarted
|
||||||
enabled: yes
|
enabled: true
|
||||||
|
|
||||||
- name: Wait for node-token
|
- name: Wait for node-token
|
||||||
wait_for:
|
ansible.builtin.wait_for:
|
||||||
path: /var/lib/rancher/k3s/server/node-token
|
path: /var/lib/rancher/k3s/server/node-token
|
||||||
|
|
||||||
- name: Register node-token file access mode
|
- name: Register node-token file access mode
|
||||||
stat:
|
ansible.builtin.stat:
|
||||||
path: /var/lib/rancher/k3s/server
|
path: /var/lib/rancher/k3s/server
|
||||||
register: p
|
register: p
|
||||||
|
|
||||||
- name: Change file access node-token
|
- name: Change file access node-token
|
||||||
file:
|
ansible.builtin.file:
|
||||||
path: /var/lib/rancher/k3s/server
|
path: /var/lib/rancher/k3s/server
|
||||||
mode: "g+rx,o+rx"
|
mode: g+rx,o+rx
|
||||||
|
|
||||||
- name: Read node-token from master
|
- name: Read node-token from master
|
||||||
slurp:
|
ansible.builtin.slurp:
|
||||||
src: /var/lib/rancher/k3s/server/node-token
|
src: /var/lib/rancher/k3s/server/node-token
|
||||||
register: node_token
|
register: node_token
|
||||||
|
|
||||||
- name: Store Master node-token
|
- name: Store Master node-token
|
||||||
set_fact:
|
ansible.builtin.set_fact:
|
||||||
token: "{{ node_token.content | b64decode | regex_replace('\n', '') }}"
|
token: "{{ node_token.content | b64decode | regex_replace('\n', '') }}"
|
||||||
|
|
||||||
- name: Restore node-token file access
|
- name: Restore node-token file access
|
||||||
file:
|
ansible.builtin.file:
|
||||||
path: /var/lib/rancher/k3s/server
|
path: /var/lib/rancher/k3s/server
|
||||||
mode: "{{ p.stat.mode }}"
|
mode: "{{ p.stat.mode }}"
|
||||||
|
|
||||||
- name: Create directory .kube
|
- name: Create directory .kube
|
||||||
file:
|
ansible.builtin.file:
|
||||||
path: ~{{ ansible_user }}/.kube
|
path: "{{ ansible_user_dir }}/.kube"
|
||||||
state: directory
|
state: directory
|
||||||
owner: "{{ ansible_user }}"
|
owner: "{{ ansible_user_id }}"
|
||||||
mode: "u=rwx,g=rx,o="
|
mode: u=rwx,g=rx,o=
|
||||||
|
|
||||||
- name: Copy config file to user home directory
|
- name: Copy config file to user home directory
|
||||||
copy:
|
ansible.builtin.copy:
|
||||||
src: /etc/rancher/k3s/k3s.yaml
|
src: /etc/rancher/k3s/k3s.yaml
|
||||||
dest: ~{{ ansible_user }}/.kube/config
|
dest: "{{ ansible_user_dir }}/.kube/config"
|
||||||
remote_src: yes
|
remote_src: true
|
||||||
owner: "{{ ansible_user }}"
|
owner: "{{ ansible_user_id }}"
|
||||||
mode: "u=rw,g=,o="
|
mode: u=rw,g=,o=
|
||||||
|
|
||||||
- name: Configure kubectl cluster to {{ endpoint_url }}
|
- name: Configure kubectl cluster to {{ endpoint_url }}
|
||||||
command: >-
|
ansible.builtin.command: >-
|
||||||
k3s kubectl config set-cluster default
|
{{ k3s_kubectl_binary | default('k3s kubectl') }} config set-cluster default
|
||||||
--server={{ endpoint_url }}
|
--server={{ endpoint_url }}
|
||||||
--kubeconfig ~{{ ansible_user }}/.kube/config
|
--kubeconfig {{ ansible_user_dir }}/.kube/config
|
||||||
changed_when: true
|
changed_when: true
|
||||||
vars:
|
vars:
|
||||||
endpoint_url: >-
|
endpoint_url: >-
|
||||||
https://{{ apiserver_endpoint | ansible.utils.ipwrap }}:6443
|
https://{{ apiserver_endpoint | ansible.utils.ipwrap }}:6443
|
||||||
# Deactivated linter rules:
|
# Deactivated linter rules:
|
||||||
# - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap
|
# - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap
|
||||||
# would be undefined. This will not be the case during playbook execution.
|
# would be undefined. This will not be the case during playbook execution.
|
||||||
# noqa jinja[invalid]
|
# noqa jinja[invalid]
|
||||||
|
|
||||||
- name: Create kubectl symlink
|
- name: Create kubectl symlink
|
||||||
file:
|
ansible.builtin.file:
|
||||||
src: /usr/local/bin/k3s
|
src: /usr/local/bin/k3s
|
||||||
dest: /usr/local/bin/kubectl
|
dest: /usr/local/bin/kubectl
|
||||||
state: link
|
state: link
|
||||||
|
when: k3s_create_kubectl_symlink | default(true) | bool
|
||||||
|
|
||||||
- name: Create crictl symlink
|
- name: Create crictl symlink
|
||||||
file:
|
ansible.builtin.file:
|
||||||
src: /usr/local/bin/k3s
|
src: /usr/local/bin/k3s
|
||||||
dest: /usr/local/bin/crictl
|
dest: /usr/local/bin/crictl
|
||||||
state: link
|
state: link
|
||||||
|
when: k3s_create_crictl_symlink | default(true) | bool
|
||||||
|
|
||||||
- name: Get contents of manifests folder
|
- name: Get contents of manifests folder
|
||||||
find:
|
ansible.builtin.find:
|
||||||
paths: /var/lib/rancher/k3s/server/manifests
|
paths: /var/lib/rancher/k3s/server/manifests
|
||||||
file_type: file
|
file_type: file
|
||||||
register: k3s_server_manifests
|
register: k3s_server_manifests
|
||||||
|
|
||||||
- name: Get sub dirs of manifests folder
|
- name: Get sub dirs of manifests folder
|
||||||
find:
|
ansible.builtin.find:
|
||||||
paths: /var/lib/rancher/k3s/server/manifests
|
paths: /var/lib/rancher/k3s/server/manifests
|
||||||
file_type: directory
|
file_type: directory
|
||||||
register: k3s_server_manifests_directories
|
register: k3s_server_manifests_directories
|
||||||
|
|
||||||
- name: Remove manifests and folders that are only needed for bootstrapping cluster so k3s doesn't auto apply on start
|
- name: Remove manifests and folders that are only needed for bootstrapping cluster so k3s doesn't auto apply on start
|
||||||
file:
|
ansible.builtin.file:
|
||||||
path: "{{ item.path }}"
|
path: "{{ item.path }}"
|
||||||
state: absent
|
state: absent
|
||||||
with_items:
|
with_items:
|
||||||
30
roles/k3s_server/tasks/metallb.yml
Normal file
30
roles/k3s_server/tasks/metallb.yml
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
---
|
||||||
|
- name: Create manifests directory on first master
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: /var/lib/rancher/k3s/server/manifests
|
||||||
|
state: directory
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0644"
|
||||||
|
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||||
|
|
||||||
|
- name: "Download to first master: manifest for metallb-{{ metal_lb_type }}"
|
||||||
|
ansible.builtin.get_url:
|
||||||
|
url: https://raw.githubusercontent.com/metallb/metallb/{{ metal_lb_controller_tag_version }}/config/manifests/metallb-{{ metal_lb_type }}.yaml # noqa yaml[line-length]
|
||||||
|
dest: /var/lib/rancher/k3s/server/manifests/metallb-crds.yaml
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0644"
|
||||||
|
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||||
|
|
||||||
|
- name: Set image versions in manifest for metallb-{{ metal_lb_type }}
|
||||||
|
ansible.builtin.replace:
|
||||||
|
path: /var/lib/rancher/k3s/server/manifests/metallb-crds.yaml
|
||||||
|
regexp: "{{ item.change | ansible.builtin.regex_escape }}"
|
||||||
|
replace: "{{ item.to }}"
|
||||||
|
with_items:
|
||||||
|
- change: metallb/speaker:{{ metal_lb_controller_tag_version }}
|
||||||
|
to: metallb/speaker:{{ metal_lb_speaker_tag_version }}
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.change }} => {{ item.to }}"
|
||||||
|
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||||
31
roles/k3s_server/tasks/vip.yml
Normal file
31
roles/k3s_server/tasks/vip.yml
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
---
|
||||||
|
- name: Set _kube_vip_bgp_peers fact
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
_kube_vip_bgp_peers: "{{ lookup('community.general.merge_variables', '^kube_vip_bgp_peers__.+$', initial_value=kube_vip_bgp_peers, groups=kube_vip_bgp_peers_groups) }}" # yamllint disable-line rule:line-length
|
||||||
|
|
||||||
|
- name: Create manifests directory on first master
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: /var/lib/rancher/k3s/server/manifests
|
||||||
|
state: directory
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0644"
|
||||||
|
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||||
|
|
||||||
|
- name: Download vip rbac manifest to first master
|
||||||
|
ansible.builtin.get_url:
|
||||||
|
url: https://kube-vip.io/manifests/rbac.yaml
|
||||||
|
dest: /var/lib/rancher/k3s/server/manifests/vip-rbac.yaml
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0644"
|
||||||
|
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||||
|
|
||||||
|
- name: Copy vip manifest to first master
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: vip.yaml.j2
|
||||||
|
dest: /var/lib/rancher/k3s/server/manifests/vip.yaml
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0644"
|
||||||
|
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||||
4
roles/k3s_server/templates/http_proxy.conf.j2
Normal file
4
roles/k3s_server/templates/http_proxy.conf.j2
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
[Service]
|
||||||
|
Environment=HTTP_PROXY={{ proxy_env.HTTP_PROXY }}
|
||||||
|
Environment=HTTPS_PROXY={{ proxy_env.HTTPS_PROXY }}
|
||||||
|
Environment=NO_PROXY={{ proxy_env.NO_PROXY }}
|
||||||
13
roles/k3s_server/templates/kubevip.yaml.j2
Normal file
13
roles/k3s_server/templates/kubevip.yaml.j2
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: kubevip
|
||||||
|
namespace: kube-system
|
||||||
|
data:
|
||||||
|
{% if kube_vip_lb_ip_range is string %}
|
||||||
|
{# kube_vip_lb_ip_range was used in the legacy way: single string instead of a list #}
|
||||||
|
{# => transform to list with single element #}
|
||||||
|
{% set kube_vip_lb_ip_range = [kube_vip_lb_ip_range] %}
|
||||||
|
{% endif %}
|
||||||
|
range-global: {{ kube_vip_lb_ip_range | join(',') }}
|
||||||
@@ -27,11 +27,15 @@ spec:
|
|||||||
- manager
|
- manager
|
||||||
env:
|
env:
|
||||||
- name: vip_arp
|
- name: vip_arp
|
||||||
value: "true"
|
value: "{{ 'true' if kube_vip_arp | default(true) | bool else 'false' }}"
|
||||||
|
- name: bgp_enable
|
||||||
|
value: "{{ 'true' if kube_vip_bgp | default(false) | bool else 'false' }}"
|
||||||
- name: port
|
- name: port
|
||||||
value: "6443"
|
value: "6443"
|
||||||
|
{% if kube_vip_iface %}
|
||||||
- name: vip_interface
|
- name: vip_interface
|
||||||
value: {{ flannel_iface }}
|
value: {{ kube_vip_iface }}
|
||||||
|
{% endif %}
|
||||||
- name: vip_cidr
|
- name: vip_cidr
|
||||||
value: "{{ apiserver_endpoint | ansible.utils.ipsubnet | ansible.utils.ipaddr('prefix') }}"
|
value: "{{ apiserver_endpoint | ansible.utils.ipsubnet | ansible.utils.ipaddr('prefix') }}"
|
||||||
- name: cp_enable
|
- name: cp_enable
|
||||||
@@ -41,7 +45,7 @@ spec:
|
|||||||
- name: vip_ddns
|
- name: vip_ddns
|
||||||
value: "false"
|
value: "false"
|
||||||
- name: svc_enable
|
- name: svc_enable
|
||||||
value: "false"
|
value: "{{ 'true' if kube_vip_lb_ip_range is defined else 'false' }}"
|
||||||
- name: vip_leaderelection
|
- name: vip_leaderelection
|
||||||
value: "true"
|
value: "true"
|
||||||
- name: vip_leaseduration
|
- name: vip_leaseduration
|
||||||
@@ -52,6 +56,29 @@ spec:
|
|||||||
value: "2"
|
value: "2"
|
||||||
- name: address
|
- name: address
|
||||||
value: {{ apiserver_endpoint }}
|
value: {{ apiserver_endpoint }}
|
||||||
|
{% if kube_vip_bgp | default(false) | bool %}
|
||||||
|
{% if kube_vip_bgp_routerid is defined %}
|
||||||
|
- name: bgp_routerid
|
||||||
|
value: "{{ kube_vip_bgp_routerid }}"
|
||||||
|
{% endif %}
|
||||||
|
{% if _kube_vip_bgp_peers | length > 0 %}
|
||||||
|
- name: bgppeers
|
||||||
|
value: "{{ _kube_vip_bgp_peers | map(attribute='peer_address') | zip(_kube_vip_bgp_peers| map(attribute='peer_asn')) | map('join', ',') | join(':') }}" # yamllint disable-line rule:line-length
|
||||||
|
{% else %}
|
||||||
|
{% if kube_vip_bgp_as is defined %}
|
||||||
|
- name: bgp_as
|
||||||
|
value: "{{ kube_vip_bgp_as }}"
|
||||||
|
{% endif %}
|
||||||
|
{% if kube_vip_bgp_peeraddress is defined %}
|
||||||
|
- name: bgp_peeraddress
|
||||||
|
value: "{{ kube_vip_bgp_peeraddress }}"
|
||||||
|
{% endif %}
|
||||||
|
{% if kube_vip_bgp_peeras is defined %}
|
||||||
|
- name: bgp_peeras
|
||||||
|
value: "{{ kube_vip_bgp_peeras }}"
|
||||||
|
{% endif %}
|
||||||
|
{% endif %}
|
||||||
|
{% endif %}
|
||||||
image: ghcr.io/kube-vip/kube-vip:{{ kube_vip_tag_version }}
|
image: ghcr.io/kube-vip/kube-vip:{{ kube_vip_tag_version }}
|
||||||
imagePullPolicy: Always
|
imagePullPolicy: Always
|
||||||
name: kube-vip
|
name: kube-vip
|
||||||
32
roles/k3s_server_post/defaults/main.yml
Normal file
32
roles/k3s_server_post/defaults/main.yml
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
---
|
||||||
|
k3s_kubectl_binary: k3s kubectl
|
||||||
|
|
||||||
|
bpf_lb_algorithm: maglev
|
||||||
|
bpf_lb_mode: hybrid
|
||||||
|
|
||||||
|
calico_blockSize: 26 # noqa var-naming
|
||||||
|
calico_ebpf: false
|
||||||
|
calico_encapsulation: VXLANCrossSubnet
|
||||||
|
calico_natOutgoing: Enabled # noqa var-naming
|
||||||
|
calico_nodeSelector: all() # noqa var-naming
|
||||||
|
calico_tag: v3.27.2
|
||||||
|
|
||||||
|
cilium_bgp: false
|
||||||
|
cilium_exportPodCIDR: true # noqa var-naming
|
||||||
|
cilium_bgp_my_asn: 64513
|
||||||
|
cilium_bgp_peer_asn: 64512
|
||||||
|
cilium_bgp_neighbors: []
|
||||||
|
cilium_bgp_neighbors_groups: ['k3s_all']
|
||||||
|
cilium_bgp_lb_cidr: 192.168.31.0/24
|
||||||
|
cilium_hubble: true
|
||||||
|
cilium_mode: native
|
||||||
|
|
||||||
|
cluster_cidr: 10.52.0.0/16
|
||||||
|
enable_bpf_masquerade: true
|
||||||
|
kube_proxy_replacement: true
|
||||||
|
group_name_master: master
|
||||||
|
|
||||||
|
metal_lb_mode: layer2
|
||||||
|
metal_lb_available_timeout: 240s
|
||||||
|
metal_lb_controller_tag_version: v0.14.3
|
||||||
|
metal_lb_ip_range: 192.168.30.80-192.168.30.90
|
||||||
153
roles/k3s_server_post/meta/main.yml
Normal file
153
roles/k3s_server_post/meta/main.yml
Normal file
@@ -0,0 +1,153 @@
|
|||||||
|
---
|
||||||
|
argument_specs:
|
||||||
|
main:
|
||||||
|
short_description: Configure k3s cluster
|
||||||
|
options:
|
||||||
|
apiserver_endpoint:
|
||||||
|
description: Virtual ip-address configured on each master
|
||||||
|
required: true
|
||||||
|
|
||||||
|
bpf_lb_algorithm:
|
||||||
|
description: BPF lb algorithm
|
||||||
|
default: maglev
|
||||||
|
|
||||||
|
bpf_lb_mode:
|
||||||
|
description: BPF lb mode
|
||||||
|
default: hybrid
|
||||||
|
|
||||||
|
calico_blockSize:
|
||||||
|
description: IP pool block size
|
||||||
|
type: int
|
||||||
|
default: 26
|
||||||
|
|
||||||
|
calico_ebpf:
|
||||||
|
description: Use eBPF dataplane instead of iptables
|
||||||
|
type: bool
|
||||||
|
default: false
|
||||||
|
|
||||||
|
calico_encapsulation:
|
||||||
|
description: IP pool encapsulation
|
||||||
|
default: VXLANCrossSubnet
|
||||||
|
|
||||||
|
calico_natOutgoing:
|
||||||
|
description: IP pool NAT outgoing
|
||||||
|
default: Enabled
|
||||||
|
|
||||||
|
calico_nodeSelector:
|
||||||
|
description: IP pool node selector
|
||||||
|
default: all()
|
||||||
|
|
||||||
|
calico_iface:
|
||||||
|
description: The network interface used for when Calico is enabled
|
||||||
|
default: ~
|
||||||
|
|
||||||
|
calico_tag:
|
||||||
|
description: Calico version tag
|
||||||
|
default: v3.27.2
|
||||||
|
|
||||||
|
cilium_bgp:
|
||||||
|
description:
|
||||||
|
- Enable cilium BGP control plane for LB services and pod cidrs.
|
||||||
|
- Disables the use of MetalLB.
|
||||||
|
type: bool
|
||||||
|
default: false
|
||||||
|
|
||||||
|
cilium_bgp_my_asn:
|
||||||
|
description: Local ASN for BGP peer
|
||||||
|
type: int
|
||||||
|
default: 64513
|
||||||
|
|
||||||
|
cilium_bgp_peer_asn:
|
||||||
|
description: BGP peer ASN
|
||||||
|
type: int
|
||||||
|
default: 64512
|
||||||
|
|
||||||
|
cilium_bgp_peer_address:
|
||||||
|
description: BGP peer address
|
||||||
|
default: ~
|
||||||
|
|
||||||
|
cilium_bgp_neighbors:
|
||||||
|
description: List of BGP peer ASN & address pairs
|
||||||
|
default: []
|
||||||
|
|
||||||
|
cilium_bgp_neighbors_groups:
|
||||||
|
description: Inventory group in which to search for additional cilium_bgp_neighbors parameters to merge.
|
||||||
|
default: ['k3s_all']
|
||||||
|
|
||||||
|
cilium_bgp_lb_cidr:
|
||||||
|
description: BGP load balancer IP range
|
||||||
|
default: 192.168.31.0/24
|
||||||
|
|
||||||
|
cilium_exportPodCIDR:
|
||||||
|
description: Export pod CIDR
|
||||||
|
type: bool
|
||||||
|
default: true
|
||||||
|
|
||||||
|
cilium_hubble:
|
||||||
|
description: Enable Cilium Hubble
|
||||||
|
type: bool
|
||||||
|
default: true
|
||||||
|
|
||||||
|
cilium_iface:
|
||||||
|
description: The network interface used for when Cilium is enabled
|
||||||
|
default: ~
|
||||||
|
|
||||||
|
cilium_mode:
|
||||||
|
description: Inner-node communication mode
|
||||||
|
default: native
|
||||||
|
choices:
|
||||||
|
- native
|
||||||
|
- routed
|
||||||
|
|
||||||
|
cluster_cidr:
|
||||||
|
description: Inner-cluster IP range
|
||||||
|
default: 10.52.0.0/16
|
||||||
|
|
||||||
|
enable_bpf_masquerade:
|
||||||
|
description: Use IP masquerading
|
||||||
|
type: bool
|
||||||
|
default: true
|
||||||
|
|
||||||
|
group_name_master:
|
||||||
|
description: Name of the master group
|
||||||
|
default: master
|
||||||
|
|
||||||
|
kube_proxy_replacement:
|
||||||
|
description: Replace the native kube-proxy with Cilium
|
||||||
|
type: bool
|
||||||
|
default: true
|
||||||
|
|
||||||
|
kube_vip_lb_ip_range:
|
||||||
|
description: IP range for kube-vip load balancer
|
||||||
|
default: ~
|
||||||
|
|
||||||
|
metal_lb_available_timeout:
|
||||||
|
description: Wait for MetalLB resources
|
||||||
|
default: 240s
|
||||||
|
|
||||||
|
metal_lb_ip_range:
|
||||||
|
description: MetalLB ip range for load balancer
|
||||||
|
default: 192.168.30.80-192.168.30.90
|
||||||
|
|
||||||
|
metal_lb_controller_tag_version:
|
||||||
|
description: Image tag for MetalLB
|
||||||
|
default: v0.14.3
|
||||||
|
|
||||||
|
metal_lb_mode:
|
||||||
|
description: Metallb mode
|
||||||
|
default: layer2
|
||||||
|
choices:
|
||||||
|
- bgp
|
||||||
|
- layer2
|
||||||
|
|
||||||
|
metal_lb_bgp_my_asn:
|
||||||
|
description: BGP ASN configurations
|
||||||
|
default: ~
|
||||||
|
|
||||||
|
metal_lb_bgp_peer_asn:
|
||||||
|
description: BGP peer ASN configurations
|
||||||
|
default: ~
|
||||||
|
|
||||||
|
metal_lb_bgp_peer_address:
|
||||||
|
description: BGP peer address
|
||||||
|
default: ~
|
||||||
120
roles/k3s_server_post/tasks/calico.yml
Normal file
120
roles/k3s_server_post/tasks/calico.yml
Normal file
@@ -0,0 +1,120 @@
|
|||||||
|
---
|
||||||
|
- name: Deploy Calico to cluster
|
||||||
|
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||||
|
run_once: true
|
||||||
|
block:
|
||||||
|
- name: Create manifests directory on first master
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: /tmp/k3s
|
||||||
|
state: directory
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0755"
|
||||||
|
|
||||||
|
- name: "Download to first master: manifest for Tigera Operator and Calico CRDs"
|
||||||
|
ansible.builtin.get_url:
|
||||||
|
url: https://raw.githubusercontent.com/projectcalico/calico/{{ calico_tag }}/manifests/tigera-operator.yaml
|
||||||
|
dest: /tmp/k3s/tigera-operator.yaml
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0755"
|
||||||
|
|
||||||
|
- name: Copy Calico custom resources manifest to first master
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: calico.crs.j2
|
||||||
|
dest: /tmp/k3s/custom-resources.yaml
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0755"
|
||||||
|
|
||||||
|
- name: Deploy or replace Tigera Operator
|
||||||
|
block:
|
||||||
|
- name: Deploy Tigera Operator
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: "{{ k3s_kubectl_binary | default('k3s kubectl') }} create -f /tmp/k3s/tigera-operator.yaml"
|
||||||
|
register: create_operator
|
||||||
|
changed_when: "'created' in create_operator.stdout"
|
||||||
|
failed_when: "'Error' in create_operator.stderr and 'already exists' not in create_operator.stderr"
|
||||||
|
rescue:
|
||||||
|
- name: Replace existing Tigera Operator
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: "{{ k3s_kubectl_binary | default('k3s kubectl') }} replace -f /tmp/k3s/tigera-operator.yaml"
|
||||||
|
register: replace_operator
|
||||||
|
changed_when: "'replaced' in replace_operator.stdout"
|
||||||
|
failed_when: "'Error' in replace_operator.stderr"
|
||||||
|
|
||||||
|
- name: Wait for Tigera Operator resources
|
||||||
|
ansible.builtin.command: >-
|
||||||
|
{{ k3s_kubectl_binary | default('k3s kubectl') }} wait {{ item.type }}/{{ item.name }}
|
||||||
|
--namespace='tigera-operator'
|
||||||
|
--for=condition=Available=True
|
||||||
|
--timeout=30s
|
||||||
|
register: tigera_result
|
||||||
|
changed_when: false
|
||||||
|
until: tigera_result is succeeded
|
||||||
|
retries: 7
|
||||||
|
delay: 7
|
||||||
|
with_items:
|
||||||
|
- { name: tigera-operator, type: deployment }
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.type }}/{{ item.name }}"
|
||||||
|
|
||||||
|
- name: Deploy Calico custom resources
|
||||||
|
block:
|
||||||
|
- name: Deploy custom resources for Calico
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: "{{ k3s_kubectl_binary | default('k3s kubectl') }} create -f /tmp/k3s/custom-resources.yaml"
|
||||||
|
register: create_cr
|
||||||
|
changed_when: "'created' in create_cr.stdout"
|
||||||
|
failed_when: "'Error' in create_cr.stderr and 'already exists' not in create_cr.stderr"
|
||||||
|
rescue:
|
||||||
|
- name: Apply new Calico custom resource manifest
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: "{{ k3s_kubectl_binary | default('k3s kubectl') }} apply -f /tmp/k3s/custom-resources.yaml"
|
||||||
|
register: apply_cr
|
||||||
|
changed_when: "'configured' in apply_cr.stdout or 'created' in apply_cr.stdout"
|
||||||
|
failed_when: "'Error' in apply_cr.stderr"
|
||||||
|
|
||||||
|
- name: Wait for Calico system resources to be available
|
||||||
|
ansible.builtin.command: >-
|
||||||
|
{% if item.type == 'daemonset' %}
|
||||||
|
{{ k3s_kubectl_binary | default('k3s kubectl') }} wait pods
|
||||||
|
--namespace='{{ item.namespace }}'
|
||||||
|
--selector={{ item.selector }}
|
||||||
|
--for=condition=Ready
|
||||||
|
{% else %}
|
||||||
|
{{ k3s_kubectl_binary | default('k3s kubectl') }} wait {{ item.type }}/{{ item.name }}
|
||||||
|
--namespace='{{ item.namespace }}'
|
||||||
|
--for=condition=Available
|
||||||
|
{% endif %}
|
||||||
|
--timeout=30s
|
||||||
|
register: cr_result
|
||||||
|
changed_when: false
|
||||||
|
until: cr_result is succeeded
|
||||||
|
retries: 30
|
||||||
|
delay: 7
|
||||||
|
with_items:
|
||||||
|
- { name: calico-typha, type: deployment, namespace: calico-system }
|
||||||
|
- { name: calico-kube-controllers, type: deployment, namespace: calico-system }
|
||||||
|
- name: csi-node-driver
|
||||||
|
type: daemonset
|
||||||
|
selector: k8s-app=csi-node-driver
|
||||||
|
namespace: calico-system
|
||||||
|
- name: calico-node
|
||||||
|
type: daemonset
|
||||||
|
selector: k8s-app=calico-node
|
||||||
|
namespace: calico-system
|
||||||
|
- { name: calico-apiserver, type: deployment, namespace: calico-apiserver }
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.type }}/{{ item.name }}"
|
||||||
|
|
||||||
|
- name: Patch Felix configuration for eBPF mode
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: >
|
||||||
|
{{ k3s_kubectl_binary | default('k3s kubectl') }} patch felixconfiguration default
|
||||||
|
--type='merge'
|
||||||
|
--patch='{"spec": {"bpfKubeProxyIptablesCleanupEnabled": false}}'
|
||||||
|
register: patch_result
|
||||||
|
changed_when: "'felixconfiguration.projectcalico.org/default patched' in patch_result.stdout"
|
||||||
|
failed_when: "'Error' in patch_result.stderr"
|
||||||
|
when: calico_ebpf
|
||||||
256
roles/k3s_server_post/tasks/cilium.yml
Normal file
256
roles/k3s_server_post/tasks/cilium.yml
Normal file
@@ -0,0 +1,256 @@
|
|||||||
|
---
|
||||||
|
- name: Prepare Cilium CLI on first master and deploy CNI
|
||||||
|
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||||
|
run_once: true
|
||||||
|
block:
|
||||||
|
- name: Create tmp directory on first master
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: /tmp/k3s
|
||||||
|
state: directory
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0755"
|
||||||
|
|
||||||
|
- name: Check if Cilium CLI is installed
|
||||||
|
ansible.builtin.command: cilium version
|
||||||
|
register: cilium_cli_installed
|
||||||
|
failed_when: false
|
||||||
|
changed_when: false
|
||||||
|
ignore_errors: true
|
||||||
|
|
||||||
|
- name: Check for Cilium CLI version in command output
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
installed_cli_version: >-
|
||||||
|
{{
|
||||||
|
cilium_cli_installed.stdout_lines
|
||||||
|
| join(' ')
|
||||||
|
| regex_findall('cilium-cli: (v\d+\.\d+\.\d+)')
|
||||||
|
| first
|
||||||
|
| default('unknown')
|
||||||
|
}}
|
||||||
|
when: cilium_cli_installed.rc == 0
|
||||||
|
|
||||||
|
- name: Get latest stable Cilium CLI version file
|
||||||
|
ansible.builtin.get_url:
|
||||||
|
url: https://raw.githubusercontent.com/cilium/cilium-cli/main/stable.txt
|
||||||
|
dest: /tmp/k3s/cilium-cli-stable.txt
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0755"
|
||||||
|
|
||||||
|
- name: Read Cilium CLI stable version from file
|
||||||
|
ansible.builtin.command: cat /tmp/k3s/cilium-cli-stable.txt
|
||||||
|
register: cli_ver
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Log installed Cilium CLI version
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: "Installed Cilium CLI version: {{ installed_cli_version | default('Not installed') }}"
|
||||||
|
|
||||||
|
- name: Log latest stable Cilium CLI version
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: "Latest Cilium CLI version: {{ cli_ver.stdout }}"
|
||||||
|
|
||||||
|
- name: Determine if Cilium CLI needs installation or update
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
cilium_cli_needs_update: >-
|
||||||
|
{{
|
||||||
|
cilium_cli_installed.rc != 0 or
|
||||||
|
(cilium_cli_installed.rc == 0 and
|
||||||
|
installed_cli_version != cli_ver.stdout)
|
||||||
|
}}
|
||||||
|
|
||||||
|
- name: Install or update Cilium CLI
|
||||||
|
when: cilium_cli_needs_update
|
||||||
|
block:
|
||||||
|
- name: Set architecture variable
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
cli_arch: "{{ 'arm64' if ansible_architecture == 'aarch64' else 'amd64' }}"
|
||||||
|
|
||||||
|
- name: Download Cilium CLI and checksum
|
||||||
|
ansible.builtin.get_url:
|
||||||
|
url: "{{ cilium_base_url }}/cilium-linux-{{ cli_arch }}{{ item }}"
|
||||||
|
dest: /tmp/k3s/cilium-linux-{{ cli_arch }}{{ item }}
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0755"
|
||||||
|
loop:
|
||||||
|
- .tar.gz
|
||||||
|
- .tar.gz.sha256sum
|
||||||
|
vars:
|
||||||
|
cilium_base_url: https://github.com/cilium/cilium-cli/releases/download/{{ cli_ver.stdout }}
|
||||||
|
|
||||||
|
- name: Verify the downloaded tarball
|
||||||
|
ansible.builtin.shell: |
|
||||||
|
cd /tmp/k3s && sha256sum --check cilium-linux-{{ cli_arch }}.tar.gz.sha256sum
|
||||||
|
args:
|
||||||
|
executable: /bin/bash
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Extract Cilium CLI to /usr/local/bin
|
||||||
|
ansible.builtin.unarchive:
|
||||||
|
src: /tmp/k3s/cilium-linux-{{ cli_arch }}.tar.gz
|
||||||
|
dest: /usr/local/bin
|
||||||
|
remote_src: true
|
||||||
|
|
||||||
|
- name: Remove downloaded tarball and checksum file
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ item }}"
|
||||||
|
state: absent
|
||||||
|
loop:
|
||||||
|
- /tmp/k3s/cilium-linux-{{ cli_arch }}.tar.gz
|
||||||
|
- /tmp/k3s/cilium-linux-{{ cli_arch }}.tar.gz.sha256sum
|
||||||
|
|
||||||
|
- name: Wait for connectivity to kube VIP
|
||||||
|
ansible.builtin.command: ping -c 1 {{ apiserver_endpoint }}
|
||||||
|
register: ping_result
|
||||||
|
until: ping_result.rc == 0
|
||||||
|
retries: 21
|
||||||
|
delay: 1
|
||||||
|
ignore_errors: true
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Fail if kube VIP not reachable
|
||||||
|
ansible.builtin.fail:
|
||||||
|
msg: API endpoint {{ apiserver_endpoint }} is not reachable
|
||||||
|
when: ping_result.rc != 0
|
||||||
|
|
||||||
|
- name: Test for existing Cilium install
|
||||||
|
ansible.builtin.command: |
|
||||||
|
{{ k3s_kubectl_binary | default('k3s kubectl') }} -n kube-system get daemonsets cilium
|
||||||
|
register: cilium_installed
|
||||||
|
failed_when: false
|
||||||
|
changed_when: false
|
||||||
|
ignore_errors: true
|
||||||
|
|
||||||
|
- name: Check existing Cilium install
|
||||||
|
when: cilium_installed.rc == 0
|
||||||
|
block:
|
||||||
|
- name: Check Cilium version
|
||||||
|
ansible.builtin.command: cilium version
|
||||||
|
register: cilium_version
|
||||||
|
failed_when: false
|
||||||
|
changed_when: false
|
||||||
|
ignore_errors: true
|
||||||
|
|
||||||
|
- name: Parse installed Cilium version
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
installed_cilium_version: >-
|
||||||
|
{{
|
||||||
|
cilium_version.stdout_lines
|
||||||
|
| join(' ')
|
||||||
|
| regex_findall('cilium image.+(\d+\.\d+\.\d+)')
|
||||||
|
| first
|
||||||
|
| default('unknown')
|
||||||
|
}}
|
||||||
|
|
||||||
|
- name: Determine if Cilium needs update
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
cilium_needs_update: >-
|
||||||
|
{{ 'v' + installed_cilium_version != cilium_tag }}
|
||||||
|
|
||||||
|
- name: Log result
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: >
|
||||||
|
Installed Cilium version: {{ installed_cilium_version }},
|
||||||
|
Target Cilium version: {{ cilium_tag }},
|
||||||
|
Update needed: {{ cilium_needs_update }}
|
||||||
|
|
||||||
|
- name: Install Cilium
|
||||||
|
ansible.builtin.command: >-
|
||||||
|
{% if cilium_installed.rc != 0 %}
|
||||||
|
cilium install
|
||||||
|
{% else %}
|
||||||
|
cilium upgrade
|
||||||
|
{% endif %}
|
||||||
|
--version "{{ cilium_tag }}"
|
||||||
|
--helm-set operator.replicas="1"
|
||||||
|
{{ '--helm-set devices=' + cilium_iface if cilium_iface != 'auto' else '' }}
|
||||||
|
--helm-set ipam.operator.clusterPoolIPv4PodCIDRList={{ cluster_cidr }}
|
||||||
|
{% if cilium_mode == "native" or (cilium_bgp and cilium_exportPodCIDR != 'false') %}
|
||||||
|
--helm-set ipv4NativeRoutingCIDR={{ cluster_cidr }}
|
||||||
|
{% endif %}
|
||||||
|
--helm-set k8sServiceHost="127.0.0.1"
|
||||||
|
--helm-set k8sServicePort="6444"
|
||||||
|
--helm-set routingMode={{ cilium_mode }}
|
||||||
|
--helm-set autoDirectNodeRoutes={{ "true" if cilium_mode == "native" else "false" }}
|
||||||
|
--helm-set kubeProxyReplacement={{ kube_proxy_replacement }}
|
||||||
|
--helm-set bpf.masquerade={{ enable_bpf_masquerade }}
|
||||||
|
--helm-set bgpControlPlane.enabled={{ cilium_bgp | default("false") }}
|
||||||
|
--helm-set hubble.enabled={{ "true" if cilium_hubble else "false" }}
|
||||||
|
--helm-set hubble.relay.enabled={{ "true" if cilium_hubble else "false" }}
|
||||||
|
--helm-set hubble.ui.enabled={{ "true" if cilium_hubble else "false" }}
|
||||||
|
{% if kube_proxy_replacement is not false %}
|
||||||
|
--helm-set bpf.loadBalancer.algorithm={{ bpf_lb_algorithm }}
|
||||||
|
--helm-set bpf.loadBalancer.mode={{ bpf_lb_mode }}
|
||||||
|
{% endif %}
|
||||||
|
environment:
|
||||||
|
KUBECONFIG: "{{ ansible_user_dir }}/.kube/config"
|
||||||
|
register: cilium_install_result
|
||||||
|
changed_when: cilium_install_result.rc == 0
|
||||||
|
when: cilium_installed.rc != 0 or cilium_needs_update
|
||||||
|
|
||||||
|
- name: Wait for Cilium resources
|
||||||
|
ansible.builtin.command: >-
|
||||||
|
{% if item.type == 'daemonset' %}
|
||||||
|
{{ k3s_kubectl_binary | default('k3s kubectl') }} wait pods
|
||||||
|
--namespace=kube-system
|
||||||
|
--selector='k8s-app=cilium'
|
||||||
|
--for=condition=Ready
|
||||||
|
{% else %}
|
||||||
|
{{ k3s_kubectl_binary | default('k3s kubectl') }} wait {{ item.type }}/{{ item.name }}
|
||||||
|
--namespace=kube-system
|
||||||
|
--for=condition=Available
|
||||||
|
{% endif %}
|
||||||
|
--timeout=30s
|
||||||
|
register: cr_result
|
||||||
|
changed_when: false
|
||||||
|
until: cr_result is succeeded
|
||||||
|
retries: 30
|
||||||
|
delay: 7
|
||||||
|
with_items:
|
||||||
|
- { name: cilium-operator, type: deployment }
|
||||||
|
- { name: cilium, type: daemonset, selector: k8s-app=cilium }
|
||||||
|
- { name: hubble-relay, type: deployment, check_hubble: true }
|
||||||
|
- { name: hubble-ui, type: deployment, check_hubble: true }
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.type }}/{{ item.name }}"
|
||||||
|
when: >-
|
||||||
|
not item.check_hubble | default(false) or (item.check_hubble | default(false) and cilium_hubble)
|
||||||
|
|
||||||
|
- name: Configure Cilium BGP
|
||||||
|
when: cilium_bgp
|
||||||
|
block:
|
||||||
|
- name: Set _cilium_bgp_neighbors fact
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
_cilium_bgp_neighbors: "{{ lookup('community.general.merge_variables', '^cilium_bgp_neighbors__.+$', initial_value=cilium_bgp_neighbors, groups=cilium_bgp_neighbors_groups) }}" # yamllint disable-line rule:line-length
|
||||||
|
|
||||||
|
- name: Copy BGP manifests to first master
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: cilium.crs.j2
|
||||||
|
dest: /tmp/k3s/cilium-bgp.yaml
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0755"
|
||||||
|
|
||||||
|
- name: Apply BGP manifests
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: "{{ k3s_kubectl_binary | default('k3s kubectl') }} apply -f /tmp/k3s/cilium-bgp.yaml"
|
||||||
|
register: apply_cr
|
||||||
|
changed_when: "'configured' in apply_cr.stdout or 'created' in apply_cr.stdout"
|
||||||
|
failed_when: "'is invalid' in apply_cr.stderr"
|
||||||
|
ignore_errors: true
|
||||||
|
|
||||||
|
- name: Print error message if BGP manifests application fails
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: "{{ apply_cr.stderr }}"
|
||||||
|
when: "'is invalid' in apply_cr.stderr"
|
||||||
|
|
||||||
|
- name: Test for BGP config resources
|
||||||
|
ansible.builtin.command: "{{ item }}"
|
||||||
|
loop:
|
||||||
|
- "{{ k3s_kubectl_binary | default('k3s kubectl') }} get CiliumBGPPeeringPolicy.cilium.io"
|
||||||
|
- "{{ k3s_kubectl_binary | default('k3s kubectl') }} get CiliumLoadBalancerIPPool.cilium.io"
|
||||||
|
changed_when: false
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item }}"
|
||||||
20
roles/k3s_server_post/tasks/main.yml
Normal file
20
roles/k3s_server_post/tasks/main.yml
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
---
|
||||||
|
- name: Deploy calico
|
||||||
|
ansible.builtin.include_tasks: calico.yml
|
||||||
|
tags: calico
|
||||||
|
when: calico_iface is defined and cilium_iface is not defined
|
||||||
|
|
||||||
|
- name: Deploy cilium
|
||||||
|
ansible.builtin.include_tasks: cilium.yml
|
||||||
|
tags: cilium
|
||||||
|
when: cilium_iface is defined
|
||||||
|
|
||||||
|
- name: Deploy metallb pool
|
||||||
|
ansible.builtin.include_tasks: metallb.yml
|
||||||
|
tags: metallb
|
||||||
|
when: kube_vip_lb_ip_range is not defined and (not cilium_bgp or cilium_iface is not defined)
|
||||||
|
|
||||||
|
- name: Remove tmp directory used for manifests
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: /tmp/k3s
|
||||||
|
state: absent
|
||||||
136
roles/k3s_server_post/tasks/metallb.yml
Normal file
136
roles/k3s_server_post/tasks/metallb.yml
Normal file
@@ -0,0 +1,136 @@
|
|||||||
|
---
|
||||||
|
- name: Create manifests directory for temp configuration
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: /tmp/k3s
|
||||||
|
state: directory
|
||||||
|
owner: "{{ ansible_user_id }}"
|
||||||
|
mode: "0755"
|
||||||
|
with_items: "{{ groups[group_name_master | default('master')] }}"
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: Delete outdated metallb replicas
|
||||||
|
ansible.builtin.shell: |-
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
REPLICAS=$({{ k3s_kubectl_binary | default('k3s kubectl') }} --namespace='metallb-system' get replicasets \
|
||||||
|
-l 'component=controller,app=metallb' \
|
||||||
|
-o jsonpath='{.items[0].spec.template.spec.containers[0].image}, {.items[0].metadata.name}' 2>/dev/null || true)
|
||||||
|
REPLICAS_SETS=$(echo ${REPLICAS} | grep -v '{{ metal_lb_controller_tag_version }}' | sed -e "s/^.*\s//g")
|
||||||
|
if [ -n "${REPLICAS_SETS}" ] ; then
|
||||||
|
for REPLICAS in "${REPLICAS_SETS}"
|
||||||
|
do
|
||||||
|
{{ k3s_kubectl_binary | default('k3s kubectl') }} --namespace='metallb-system' \
|
||||||
|
delete rs "${REPLICAS}"
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
args:
|
||||||
|
executable: /bin/bash
|
||||||
|
changed_when: false
|
||||||
|
run_once: true
|
||||||
|
with_items: "{{ groups[group_name_master | default('master')] }}"
|
||||||
|
|
||||||
|
- name: Copy metallb CRs manifest to first master
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: metallb.crs.j2
|
||||||
|
dest: /tmp/k3s/metallb-crs.yaml
|
||||||
|
owner: "{{ ansible_user_id }}"
|
||||||
|
mode: "0755"
|
||||||
|
with_items: "{{ groups[group_name_master | default('master')] }}"
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: Test metallb-system namespace
|
||||||
|
ansible.builtin.command: >-
|
||||||
|
{{ k3s_kubectl_binary | default('k3s kubectl') }} -n metallb-system
|
||||||
|
changed_when: false
|
||||||
|
with_items: "{{ groups[group_name_master | default('master')] }}"
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: Wait for MetalLB resources
|
||||||
|
ansible.builtin.command: >-
|
||||||
|
{{ k3s_kubectl_binary | default('k3s kubectl') }} wait {{ item.resource }}
|
||||||
|
--namespace='metallb-system'
|
||||||
|
{% if item.name | default(False) -%}{{ item.name }}{%- endif %}
|
||||||
|
{% if item.selector | default(False) -%}--selector='{{ item.selector }}'{%- endif %}
|
||||||
|
{% if item.condition | default(False) -%}{{ item.condition }}{%- endif %}
|
||||||
|
--timeout='{{ metal_lb_available_timeout }}'
|
||||||
|
changed_when: false
|
||||||
|
run_once: true
|
||||||
|
with_items:
|
||||||
|
- description: controller
|
||||||
|
resource: deployment
|
||||||
|
name: controller
|
||||||
|
condition: --for condition=Available=True
|
||||||
|
- description: webhook service
|
||||||
|
resource: pod
|
||||||
|
selector: component=controller
|
||||||
|
condition: --for=jsonpath='{.status.phase}'=Running
|
||||||
|
- description: pods in replica sets
|
||||||
|
resource: pod
|
||||||
|
selector: component=controller,app=metallb
|
||||||
|
condition: --for condition=Ready
|
||||||
|
- description: ready replicas of controller
|
||||||
|
resource: replicaset
|
||||||
|
selector: component=controller,app=metallb
|
||||||
|
condition: --for=jsonpath='{.status.readyReplicas}'=1
|
||||||
|
- description: fully labeled replicas of controller
|
||||||
|
resource: replicaset
|
||||||
|
selector: component=controller,app=metallb
|
||||||
|
condition: --for=jsonpath='{.status.fullyLabeledReplicas}'=1
|
||||||
|
- description: available replicas of controller
|
||||||
|
resource: replicaset
|
||||||
|
selector: component=controller,app=metallb
|
||||||
|
condition: --for=jsonpath='{.status.availableReplicas}'=1
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.description }}"
|
||||||
|
|
||||||
|
- name: Set metallb webhook service name
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
metallb_webhook_service_name: >-
|
||||||
|
{{
|
||||||
|
(
|
||||||
|
(metal_lb_controller_tag_version | regex_replace('^v', ''))
|
||||||
|
is
|
||||||
|
version('0.14.4', '<', version_type='semver')
|
||||||
|
) | ternary(
|
||||||
|
'webhook-service',
|
||||||
|
'metallb-webhook-service'
|
||||||
|
)
|
||||||
|
}}
|
||||||
|
|
||||||
|
- name: Test metallb-system webhook-service endpoint
|
||||||
|
ansible.builtin.command: >-
|
||||||
|
{{ k3s_kubectl_binary | default('k3s kubectl') }} -n metallb-system get endpoints {{ metallb_webhook_service_name }}
|
||||||
|
changed_when: false
|
||||||
|
with_items: "{{ groups[group_name_master | default('master')] }}"
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: Apply metallb CRs
|
||||||
|
ansible.builtin.command: >-
|
||||||
|
{{ k3s_kubectl_binary | default('k3s kubectl') }} apply -f /tmp/k3s/metallb-crs.yaml
|
||||||
|
--timeout='{{ metal_lb_available_timeout }}'
|
||||||
|
register: this
|
||||||
|
changed_when: false
|
||||||
|
run_once: true
|
||||||
|
until: this.rc == 0
|
||||||
|
retries: 5
|
||||||
|
|
||||||
|
- name: Test metallb-system resources for Layer 2 configuration
|
||||||
|
ansible.builtin.command: >-
|
||||||
|
{{ k3s_kubectl_binary | default('k3s kubectl') }} -n metallb-system get {{ item }}
|
||||||
|
changed_when: false
|
||||||
|
run_once: true
|
||||||
|
when: metal_lb_mode == "layer2"
|
||||||
|
with_items:
|
||||||
|
- IPAddressPool
|
||||||
|
- L2Advertisement
|
||||||
|
|
||||||
|
- name: Test metallb-system resources for BGP configuration
|
||||||
|
ansible.builtin.command: >-
|
||||||
|
{{ k3s_kubectl_binary | default('k3s kubectl') }} -n metallb-system get {{ item }}
|
||||||
|
changed_when: false
|
||||||
|
run_once: true
|
||||||
|
when: metal_lb_mode == "bgp"
|
||||||
|
with_items:
|
||||||
|
- IPAddressPool
|
||||||
|
- BGPPeer
|
||||||
|
- BGPAdvertisement
|
||||||
41
roles/k3s_server_post/templates/calico.crs.j2
Normal file
41
roles/k3s_server_post/templates/calico.crs.j2
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
# This section includes base Calico installation configuration.
|
||||||
|
# For more information, see: https://docs.tigera.io/calico/latest/reference/installation/api#operator.tigera.io/v1.Installation
|
||||||
|
apiVersion: operator.tigera.io/v1
|
||||||
|
kind: Installation
|
||||||
|
metadata:
|
||||||
|
name: default
|
||||||
|
spec:
|
||||||
|
# Configures Calico networking.
|
||||||
|
calicoNetwork:
|
||||||
|
# Note: The ipPools section cannot be modified post-install.
|
||||||
|
ipPools:
|
||||||
|
- blockSize: {{ calico_blockSize }}
|
||||||
|
cidr: {{ cluster_cidr }}
|
||||||
|
encapsulation: {{ calico_encapsulation }}
|
||||||
|
natOutgoing: {{ calico_natOutgoing }}
|
||||||
|
nodeSelector: {{ calico_nodeSelector }}
|
||||||
|
nodeAddressAutodetectionV4:
|
||||||
|
interface: {{ calico_iface }}
|
||||||
|
linuxDataplane: {{ 'BPF' if calico_ebpf else 'Iptables' }}
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# This section configures the Calico API server.
|
||||||
|
# For more information, see: https://docs.tigera.io/calico/latest/reference/installation/api#operator.tigera.io/v1.APIServer
|
||||||
|
apiVersion: operator.tigera.io/v1
|
||||||
|
kind: APIServer
|
||||||
|
metadata:
|
||||||
|
name: default
|
||||||
|
spec: {}
|
||||||
|
|
||||||
|
{% if calico_ebpf %}
|
||||||
|
---
|
||||||
|
kind: ConfigMap
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
name: kubernetes-services-endpoint
|
||||||
|
namespace: tigera-operator
|
||||||
|
data:
|
||||||
|
KUBERNETES_SERVICE_HOST: '{{ apiserver_endpoint }}'
|
||||||
|
KUBERNETES_SERVICE_PORT: '6443'
|
||||||
|
{% endif %}
|
||||||
48
roles/k3s_server_post/templates/cilium.crs.j2
Normal file
48
roles/k3s_server_post/templates/cilium.crs.j2
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
apiVersion: "cilium.io/v2alpha1"
|
||||||
|
kind: CiliumBGPPeeringPolicy
|
||||||
|
metadata:
|
||||||
|
name: 01-bgp-peering-policy
|
||||||
|
spec: # CiliumBGPPeeringPolicySpec
|
||||||
|
virtualRouters: # []CiliumBGPVirtualRouter
|
||||||
|
- localASN: {{ cilium_bgp_my_asn }}
|
||||||
|
exportPodCIDR: {{ cilium_exportPodCIDR | default('true') }}
|
||||||
|
neighbors: # []CiliumBGPNeighbor
|
||||||
|
{% if _cilium_bgp_neighbors | length > 0 %}
|
||||||
|
{% for item in _cilium_bgp_neighbors %}
|
||||||
|
- peerAddress: '{{ item.peer_address + "/32"}}'
|
||||||
|
peerASN: {{ item.peer_asn }}
|
||||||
|
eBGPMultihopTTL: 10
|
||||||
|
connectRetryTimeSeconds: 120
|
||||||
|
holdTimeSeconds: 90
|
||||||
|
keepAliveTimeSeconds: 30
|
||||||
|
gracefulRestart:
|
||||||
|
enabled: true
|
||||||
|
restartTimeSeconds: 120
|
||||||
|
{% endfor %}
|
||||||
|
{% else %}
|
||||||
|
- peerAddress: '{{ cilium_bgp_peer_address + "/32"}}'
|
||||||
|
peerASN: {{ cilium_bgp_peer_asn }}
|
||||||
|
eBGPMultihopTTL: 10
|
||||||
|
connectRetryTimeSeconds: 120
|
||||||
|
holdTimeSeconds: 90
|
||||||
|
keepAliveTimeSeconds: 30
|
||||||
|
gracefulRestart:
|
||||||
|
enabled: true
|
||||||
|
restartTimeSeconds: 120
|
||||||
|
{% endif %}
|
||||||
|
serviceSelector:
|
||||||
|
matchExpressions:
|
||||||
|
- {key: somekey, operator: NotIn, values: ['never-used-value']}
|
||||||
|
---
|
||||||
|
apiVersion: "cilium.io/v2alpha1"
|
||||||
|
kind: CiliumLoadBalancerIPPool
|
||||||
|
metadata:
|
||||||
|
name: "01-lb-pool"
|
||||||
|
spec:
|
||||||
|
blocks:
|
||||||
|
{% if "/" in cilium_bgp_lb_cidr %}
|
||||||
|
- cidr: {{ cilium_bgp_lb_cidr }}
|
||||||
|
{% else %}
|
||||||
|
- start: {{ cilium_bgp_lb_cidr.split('-')[0] }}
|
||||||
|
stop: {{ cilium_bgp_lb_cidr.split('-')[1] }}
|
||||||
|
{% endif %}
|
||||||
@@ -13,9 +13,31 @@ spec:
|
|||||||
{% for range in metal_lb_ip_range %}
|
{% for range in metal_lb_ip_range %}
|
||||||
- {{ range }}
|
- {{ range }}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
|
||||||
|
{% if metal_lb_mode == "layer2" %}
|
||||||
---
|
---
|
||||||
apiVersion: metallb.io/v1beta1
|
apiVersion: metallb.io/v1beta1
|
||||||
kind: L2Advertisement
|
kind: L2Advertisement
|
||||||
metadata:
|
metadata:
|
||||||
name: default
|
name: default
|
||||||
namespace: metallb-system
|
namespace: metallb-system
|
||||||
|
{% endif %}
|
||||||
|
{% if metal_lb_mode == "bgp" %}
|
||||||
|
---
|
||||||
|
apiVersion: metallb.io/v1beta2
|
||||||
|
kind: BGPPeer
|
||||||
|
metadata:
|
||||||
|
name: default
|
||||||
|
namespace: metallb-system
|
||||||
|
spec:
|
||||||
|
myASN: {{ metal_lb_bgp_my_asn }}
|
||||||
|
peerASN: {{ metal_lb_bgp_peer_asn }}
|
||||||
|
peerAddress: {{ metal_lb_bgp_peer_address }}
|
||||||
|
|
||||||
|
---
|
||||||
|
apiVersion: metallb.io/v1beta1
|
||||||
|
kind: BGPAdvertisement
|
||||||
|
metadata:
|
||||||
|
name: default
|
||||||
|
namespace: metallb-system
|
||||||
|
{% endif %}
|
||||||
6
roles/lxc/handlers/main.yml
Normal file
6
roles/lxc/handlers/main.yml
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
- name: Reboot server
|
||||||
|
become: true
|
||||||
|
ansible.builtin.reboot:
|
||||||
|
reboot_command: "{{ custom_reboot_command | default(omit) }}"
|
||||||
|
listen: reboot server
|
||||||
8
roles/lxc/meta/main.yml
Normal file
8
roles/lxc/meta/main.yml
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
---
|
||||||
|
argument_specs:
|
||||||
|
main:
|
||||||
|
short_description: Configure LXC
|
||||||
|
options:
|
||||||
|
custom_reboot_command:
|
||||||
|
default: ~
|
||||||
|
description: Command to run on reboot
|
||||||
21
roles/lxc/tasks/main.yml
Normal file
21
roles/lxc/tasks/main.yml
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
---
|
||||||
|
- name: Check for rc.local file
|
||||||
|
ansible.builtin.stat:
|
||||||
|
path: /etc/rc.local
|
||||||
|
register: rcfile
|
||||||
|
|
||||||
|
- name: Create rc.local if needed
|
||||||
|
ansible.builtin.lineinfile:
|
||||||
|
path: /etc/rc.local
|
||||||
|
line: "#!/bin/sh -e"
|
||||||
|
create: true
|
||||||
|
insertbefore: BOF
|
||||||
|
mode: u=rwx,g=rx,o=rx
|
||||||
|
when: not rcfile.stat.exists
|
||||||
|
|
||||||
|
- name: Write rc.local file
|
||||||
|
ansible.builtin.blockinfile:
|
||||||
|
path: /etc/rc.local
|
||||||
|
content: "{{ lookup('template', 'templates/rc.local.j2') }}"
|
||||||
|
state: present
|
||||||
|
notify: reboot server
|
||||||
4
roles/prereq/defaults/main.yml
Normal file
4
roles/prereq/defaults/main.yml
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
---
|
||||||
|
secure_path:
|
||||||
|
RedHat: /sbin:/bin:/usr/sbin:/usr/bin:/usr/local/bin
|
||||||
|
Suse: /usr/sbin:/usr/bin:/sbin:/bin:/usr/local/bin
|
||||||
7
roles/prereq/meta/main.yml
Normal file
7
roles/prereq/meta/main.yml
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
---
|
||||||
|
argument_specs:
|
||||||
|
main:
|
||||||
|
short_description: Prerequisites
|
||||||
|
options:
|
||||||
|
system_timezone:
|
||||||
|
description: Timezone to be set on all nodes
|
||||||
@@ -1,65 +1,69 @@
|
|||||||
---
|
---
|
||||||
- name: Set same timezone on every Server
|
- name: Set same timezone on every Server
|
||||||
timezone:
|
community.general.timezone:
|
||||||
name: "{{ system_timezone }}"
|
name: "{{ system_timezone }}"
|
||||||
when: (system_timezone is defined) and (system_timezone != "Your/Timezone")
|
when: (system_timezone is defined) and (system_timezone != "Your/Timezone")
|
||||||
|
|
||||||
- name: Set SELinux to disabled state
|
- name: Set SELinux to disabled state
|
||||||
selinux:
|
ansible.posix.selinux:
|
||||||
state: disabled
|
state: disabled
|
||||||
when: ansible_os_family == "RedHat"
|
when: ansible_os_family == "RedHat"
|
||||||
|
|
||||||
- name: Enable IPv4 forwarding
|
- name: Enable IPv4 forwarding
|
||||||
sysctl:
|
ansible.posix.sysctl:
|
||||||
name: net.ipv4.ip_forward
|
name: net.ipv4.ip_forward
|
||||||
value: "1"
|
value: "1"
|
||||||
state: present
|
state: present
|
||||||
reload: yes
|
reload: true
|
||||||
|
tags: sysctl
|
||||||
|
|
||||||
- name: Enable IPv6 forwarding
|
- name: Enable IPv6 forwarding
|
||||||
sysctl:
|
ansible.posix.sysctl:
|
||||||
name: net.ipv6.conf.all.forwarding
|
name: net.ipv6.conf.all.forwarding
|
||||||
value: "1"
|
value: "1"
|
||||||
state: present
|
state: present
|
||||||
reload: yes
|
reload: true
|
||||||
|
tags: sysctl
|
||||||
|
|
||||||
- name: Enable IPv6 router advertisements
|
- name: Enable IPv6 router advertisements
|
||||||
sysctl:
|
ansible.posix.sysctl:
|
||||||
name: net.ipv6.conf.all.accept_ra
|
name: net.ipv6.conf.all.accept_ra
|
||||||
value: "2"
|
value: "2"
|
||||||
state: present
|
state: present
|
||||||
reload: yes
|
reload: true
|
||||||
|
tags: sysctl
|
||||||
|
|
||||||
- name: Add br_netfilter to /etc/modules-load.d/
|
- name: Add br_netfilter to /etc/modules-load.d/
|
||||||
copy:
|
ansible.builtin.copy:
|
||||||
content: "br_netfilter"
|
content: br_netfilter
|
||||||
dest: /etc/modules-load.d/br_netfilter.conf
|
dest: /etc/modules-load.d/br_netfilter.conf
|
||||||
mode: "u=rw,g=,o="
|
mode: u=rw,g=,o=
|
||||||
when: ansible_os_family == "RedHat"
|
when: ansible_os_family == "RedHat"
|
||||||
|
|
||||||
- name: Load br_netfilter
|
- name: Load br_netfilter
|
||||||
modprobe:
|
community.general.modprobe:
|
||||||
name: br_netfilter
|
name: br_netfilter
|
||||||
state: present
|
state: present
|
||||||
when: ansible_os_family == "RedHat"
|
when: ansible_os_family == "RedHat"
|
||||||
|
|
||||||
- name: Set bridge-nf-call-iptables (just to be sure)
|
- name: Set bridge-nf-call-iptables (just to be sure)
|
||||||
sysctl:
|
ansible.posix.sysctl:
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
value: "1"
|
value: "1"
|
||||||
state: present
|
state: present
|
||||||
reload: yes
|
reload: true
|
||||||
when: ansible_os_family == "RedHat"
|
when: ansible_os_family == "RedHat"
|
||||||
loop:
|
loop:
|
||||||
- net.bridge.bridge-nf-call-iptables
|
- net.bridge.bridge-nf-call-iptables
|
||||||
- net.bridge.bridge-nf-call-ip6tables
|
- net.bridge.bridge-nf-call-ip6tables
|
||||||
|
tags: sysctl
|
||||||
|
|
||||||
- name: Add /usr/local/bin to sudo secure_path
|
- name: Add /usr/local/bin to sudo secure_path
|
||||||
lineinfile:
|
ansible.builtin.lineinfile:
|
||||||
line: 'Defaults secure_path = /sbin:/bin:/usr/sbin:/usr/bin:/usr/local/bin'
|
line: Defaults secure_path = {{ secure_path[ansible_os_family] }}
|
||||||
regexp: "Defaults(\\s)*secure_path(\\s)*="
|
regexp: Defaults(\s)*secure_path(\s)*=
|
||||||
state: present
|
state: present
|
||||||
insertafter: EOF
|
insertafter: EOF
|
||||||
path: /etc/sudoers
|
path: /etc/sudoers
|
||||||
validate: 'visudo -cf %s'
|
validate: visudo -cf %s
|
||||||
when: ansible_os_family == "RedHat"
|
when: ansible_os_family in [ "RedHat", "Suse" ]
|
||||||
|
|||||||
13
roles/proxmox_lxc/handlers/main.yml
Normal file
13
roles/proxmox_lxc/handlers/main.yml
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
---
|
||||||
|
- name: Reboot containers
|
||||||
|
block:
|
||||||
|
- name: Get container ids from filtered files
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
proxmox_lxc_filtered_ids: >-
|
||||||
|
{{ proxmox_lxc_filtered_files | map("split", "/") | map("last") | map("split", ".") | map("first") }}
|
||||||
|
listen: reboot containers
|
||||||
|
- name: Reboot container
|
||||||
|
ansible.builtin.command: pct reboot {{ item }}
|
||||||
|
loop: "{{ proxmox_lxc_filtered_ids }}"
|
||||||
|
changed_when: true
|
||||||
|
listen: reboot containers
|
||||||
9
roles/proxmox_lxc/meta/main.yml
Normal file
9
roles/proxmox_lxc/meta/main.yml
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
argument_specs:
|
||||||
|
main:
|
||||||
|
short_description: Proxmox LXC settings
|
||||||
|
options:
|
||||||
|
proxmox_lxc_ct_ids:
|
||||||
|
description: Proxmox container ID list
|
||||||
|
type: list
|
||||||
|
required: true
|
||||||
43
roles/proxmox_lxc/tasks/main.yml
Normal file
43
roles/proxmox_lxc/tasks/main.yml
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
---
|
||||||
|
- name: Check for container files that exist on this host
|
||||||
|
ansible.builtin.stat:
|
||||||
|
path: /etc/pve/lxc/{{ item }}.conf
|
||||||
|
loop: "{{ proxmox_lxc_ct_ids }}"
|
||||||
|
register: stat_results
|
||||||
|
|
||||||
|
- name: Filter out files that do not exist
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
proxmox_lxc_filtered_files: '{{ stat_results.results | rejectattr("stat.exists", "false") | map(attribute="stat.path") }}' # noqa yaml[line-length]
|
||||||
|
|
||||||
|
# https://gist.github.com/triangletodd/02f595cd4c0dc9aac5f7763ca2264185
|
||||||
|
- name: Ensure lxc config has the right apparmor profile
|
||||||
|
ansible.builtin.lineinfile:
|
||||||
|
dest: "{{ item }}"
|
||||||
|
regexp: ^lxc.apparmor.profile
|
||||||
|
line: "lxc.apparmor.profile: unconfined"
|
||||||
|
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||||
|
notify: reboot containers
|
||||||
|
|
||||||
|
- name: Ensure lxc config has the right cgroup
|
||||||
|
ansible.builtin.lineinfile:
|
||||||
|
dest: "{{ item }}"
|
||||||
|
regexp: ^lxc.cgroup.devices.allow
|
||||||
|
line: "lxc.cgroup.devices.allow: a"
|
||||||
|
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||||
|
notify: reboot containers
|
||||||
|
|
||||||
|
- name: Ensure lxc config has the right cap drop
|
||||||
|
ansible.builtin.lineinfile:
|
||||||
|
dest: "{{ item }}"
|
||||||
|
regexp: ^lxc.cap.drop
|
||||||
|
line: "lxc.cap.drop: "
|
||||||
|
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||||
|
notify: reboot containers
|
||||||
|
|
||||||
|
- name: Ensure lxc config has the right mounts
|
||||||
|
ansible.builtin.lineinfile:
|
||||||
|
dest: "{{ item }}"
|
||||||
|
regexp: ^lxc.mount.auto
|
||||||
|
line: 'lxc.mount.auto: "proc:rw sys:rw"'
|
||||||
|
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||||
|
notify: reboot containers
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user