Compare commits

..

79 Commits

Author SHA1 Message Date
Timothy Stewart
ca79207dc9 chore(ci): triggering build 2023-12-09 22:22:19 -06:00
Timothy Stewart
af7f41379b fix(CI): testing macos-13 2023-11-01 12:05:58 -05:00
Timothy Stewart
69c8d28c36 fix(CI): testing macos-latest 2023-11-01 11:29:32 -05:00
Timothy Stewart
723f07caf1 fix(CI): testing ubuntu 2023-11-01 11:24:06 -05:00
Timothy Stewart
9e9b862334 fix(CI): Remove vagrant box cache 2023-10-30 16:00:55 -05:00
Timothy Stewart
df99f988c5 fix(CI): Break up workflows and use templates 2023-10-29 15:29:25 -05:00
Timothy Stewart
71821b2be3 fix(CI): Break up workflows and use templates 2023-10-29 15:22:05 -05:00
Timothy Stewart
4066ca7b6c fix(CI): Break up workflows and use templates 2023-10-29 15:03:05 -05:00
Timothy Stewart
6ca0dcecae fix(CI): Break up workflows and use templates 2023-10-29 15:00:37 -05:00
Timothy Stewart
9593ae69ef fix(CI): Break up workflows and use templates 2023-10-29 14:57:58 -05:00
Timothy Stewart
926b245281 fix(CI): Break up workflows and use templates 2023-10-29 14:57:10 -05:00
Timothy Stewart
f89eb5ec07 fix(CI): Break up workflows and use templates 2023-10-29 14:55:40 -05:00
Timothy Stewart
d767e6add0 fix(CI): Break up workflows and use templates 2023-10-29 14:40:50 -05:00
Timothy Stewart
0cb638faf1 fix(CI): Break up workflows and use templates 2023-10-29 14:39:50 -05:00
Timothy Stewart
0c3d5e9a29 fix(CI): Break up workflows and use templates 2023-10-29 14:37:19 -05:00
Timothy Stewart
841e924449 fix(CI): Break up workflows and use templates 2023-10-29 14:32:27 -05:00
Timothy Stewart
22c1bccd13 fix(CI): Break up workflows and use templates 2023-10-29 14:30:22 -05:00
Timothy Stewart
26aa538e5f fix(CI): Break up workflows and use templates 2023-10-29 14:28:36 -05:00
Timothy Stewart
06d4773e42 fix(CI): Break up workflows and use templates 2023-10-29 14:27:51 -05:00
Timothy Stewart
c6a20c7f14 fix(CI): Break up workflows and use templates 2023-10-29 14:27:20 -05:00
Timothy Stewart
856015d127 fix(CI): Break up workflows and use templates 2023-10-29 14:26:53 -05:00
Timothy Stewart
8637027635 fix(CI): Break up workflows and use templates 2023-10-29 14:19:14 -05:00
Timothy Stewart
207b8f6f88 fix(CI): Break up workflows and use templates 2023-10-29 14:16:09 -05:00
Timothy Stewart
4d6dc254f3 fix(CI): Break up workflows and use templates 2023-10-29 14:11:48 -05:00
Timothy Stewart
f54f180997 fix(CI): Break up workflows and use templates 2023-10-29 14:10:04 -05:00
Timothy Stewart
c9a96de15b fix(CI): Break up workflows and use templates 2023-10-29 14:04:29 -05:00
Timothy Stewart
7404865bd7 fix(CI): Break up workflows and use templates 2023-10-29 14:01:43 -05:00
Timothy Stewart
015e91bca9 fix(CI): Break up workflows and use templates 2023-10-29 13:58:32 -05:00
Timothy Stewart
4450075425 fix(CI): Break up workflows and use templates 2023-10-29 13:56:11 -05:00
Timothy Stewart
fc88777e16 fix(CI): Break up workflows and use templates 2023-10-29 13:55:34 -05:00
Timothy Stewart
2884d54e6b fix(CI): Break up workflows and use templates 2023-10-29 13:55:09 -05:00
Timothy Stewart
0874e56045 fix(CI): Break up workflows and use templates 2023-10-29 13:53:58 -05:00
Timothy Stewart
4bb58181e8 fix(CI): Break up workflows and use templates 2023-10-29 13:47:52 -05:00
Timothy Stewart
f52a556109 fix(CI): Break up workflows and use templates 2023-10-29 13:44:22 -05:00
Timothy Stewart
90fe88379c fix(CI): Break up workflows and use templates 2023-10-29 13:43:22 -05:00
Timothy Stewart
3ff7547f8c fix(CI): Break up workflows and use templates 2023-10-29 13:42:08 -05:00
Timothy Stewart
edd2ba29ce fix(CI): Break up workflows and use templates 2023-10-29 13:41:31 -05:00
Timothy Stewart
4174e81e68 fix(CI): Break up workflows and use templates 2023-10-29 13:40:45 -05:00
Timothy Stewart
8c8ac8b942 fix(CI): Break up workflows and use templates 2023-10-29 13:40:22 -05:00
Timothy Stewart
eb10968a75 fix(CI): Break up workflows and use templates 2023-10-29 13:38:40 -05:00
Timothy Stewart
96b2d243ea fix(CI): Break up workflows and use templates 2023-10-29 13:38:09 -05:00
Timothy Stewart
2490fecb69 fix(CI): Break up workflows and use templates 2023-10-29 13:37:25 -05:00
Timothy Stewart
80fc191c20 fix(CI): Break up workflows and use templates 2023-10-29 13:36:36 -05:00
Timothy Stewart
bc6aa32198 fix(CI): Break up workflows and use templates 2023-10-29 13:35:35 -05:00
Timothy Stewart
85f7ec6889 fix(CI): Break up workflows and use templates 2023-10-29 13:33:44 -05:00
Timothy Stewart
d90fde7b3b fix(CI): Break up workflows and use templates 2023-10-29 13:33:16 -05:00
Timothy Stewart
756c140bee fix(CI): Break up workflows and use templates 2023-10-29 13:32:34 -05:00
Timothy Stewart
85c1826985 fix(CI): Break up workflows and use templates 2023-10-29 13:31:34 -05:00
Timothy Stewart
ad672b3965 fix(CI): Break up workflows and use templates 2023-10-29 13:30:20 -05:00
Timothy Stewart
f19ff12057 fix(CI): Break up workflows and use templates 2023-10-29 13:29:28 -05:00
Timothy Stewart
173e104663 fix(CI): Break up workflows and use templates 2023-10-29 13:27:13 -05:00
Timothy Stewart
1e06289dca fix(CI): Break up workflows and use templates 2023-10-29 13:26:35 -05:00
Timothy Stewart
c2a3b9f56f fix(CI): Break up workflows and use templates 2023-10-29 13:20:32 -05:00
Timothy Stewart
14aa2f8383 fix(CI): Break up workflows and use templates 2023-10-29 13:20:01 -05:00
Timothy Stewart
fb6d94362c fix(CI): Break up workflows and use templates 2023-10-29 13:18:09 -05:00
Timothy Stewart
9483f18ae2 fix(CI): Break up workflows and use templates 2023-10-29 13:16:01 -05:00
Timothy Stewart
2b4ef14b49 fix(CI): Break up workflows and use templates 2023-10-29 13:15:25 -05:00
Timothy Stewart
32b9bfa44f fix(CI): Break up workflows and use templates 2023-10-29 13:11:55 -05:00
Timothy Stewart
a7d4f88b79 fix(CI): Break up workflows and use templates 2023-10-29 13:09:41 -05:00
Balázs Hasprai
e880f08d26 Add option for install behind http_proxy (#384)
* Add option for install behind http_proxy

* Tidy up http_proxy usage
2023-10-21 00:18:36 +00:00
Balázs Hasprai
95b2836dfc Add option to disable MetalLB, for use w/ ext LBs (#383)
* Add option to disable MetalLB, for use w/ ext LBs

* Add option to disable MetalLB, for use w/ ext LBs - add defaults

* Skip MetalLB with tags instead of flag
2023-10-18 22:07:07 +00:00
balazshasprai
505c2eeff2 Add option for custom registries / mirrors (#382) 2023-10-18 03:33:30 +00:00
balazshasprai
9b6d551dd6 Expand secure_path with support for Suse (#381) 2023-10-13 04:14:47 +00:00
dependabot[bot]
a64e882fb7 chore(deps): bump pre-commit-hooks from 4.4.0 to 4.5.0 (#379)
Bumps [pre-commit-hooks](https://github.com/pre-commit/pre-commit-hooks) from 4.4.0 to 4.5.0.
- [Release notes](https://github.com/pre-commit/pre-commit-hooks/releases)
- [Changelog](https://github.com/pre-commit/pre-commit-hooks/blob/main/CHANGELOG.md)
- [Commits](https://github.com/pre-commit/pre-commit-hooks/compare/v4.4.0...v4.5.0)

---
updated-dependencies:
- dependency-name: pre-commit-hooks
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-10-09 15:39:23 +00:00
johnnyrun
38e773315b sysctl tags (#373)
* sysctl tags

* lost tag

---------

Co-authored-by: Gianni <gianni@chainlabo.com>
Co-authored-by: Gianni Carabelli <gianni.carabelli@skytv.it>
2023-10-09 10:00:31 -05:00
dependabot[bot]
70ddf7b63c chore(deps): bump netaddr from 0.8.0 to 0.9.0 (#365)
Bumps [netaddr](https://github.com/drkjam/netaddr) from 0.8.0 to 0.9.0.
- [Changelog](https://github.com/netaddr/netaddr/blob/master/CHANGELOG)
- [Commits](https://github.com/drkjam/netaddr/commits)

---
updated-dependencies:
- dependency-name: netaddr
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-21 12:39:15 -05:00
dependabot[bot]
fb3128a783 chore(deps): bump ansible-core from 2.15.3 to 2.15.4 (#362)
Bumps [ansible-core](https://github.com/ansible/ansible) from 2.15.3 to 2.15.4.
- [Release notes](https://github.com/ansible/ansible/releases)
- [Commits](https://github.com/ansible/ansible/compare/v2.15.3...v2.15.4)

---
updated-dependencies:
- dependency-name: ansible-core
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-14 13:48:59 -05:00
Techno Tim
2e318e0862 feat(k3s): Updated to v1.25.12+k3s1 (#351) 2023-08-18 08:59:08 -05:00
dependabot[bot]
0607eb8aa4 chore(deps): bump ansible-core from 2.15.2 to 2.15.3 (#349)
Bumps [ansible-core](https://github.com/ansible/ansible) from 2.15.2 to 2.15.3.
- [Release notes](https://github.com/ansible/ansible/releases)
- [Commits](https://github.com/ansible/ansible/compare/v2.15.2...v2.15.3)

---
updated-dependencies:
- dependency-name: ansible-core
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-08-16 13:27:35 -05:00
Marek Pilch
a9904d1562 fixes: ERROR! The requested handler <'Reboot containers' / 'Reboot se… (#348)
* fixes: ERROR! The requested handler <'Reboot containers' / 'Reboot server' / 'Reboot>' was not found in either the main handlers list nor in the listening handlers list

* Update main.yml
2023-08-14 17:37:20 -05:00
Techno Tim
9707bc8a58 fix(docs): updated kube-vip url (#341) 2023-08-14 17:30:42 +00:00
Phil Bolduc
e635bd2626 Change reboot.sh to be executable (#344)
Co-authored-by: Techno Tim <timothystewart6@gmail.com>
2023-08-07 11:29:03 -05:00
dependabot[bot]
1aabb5a927 chore(deps): bump jsonpatch from 1.32 to 1.33 (#318) 2023-07-23 19:32:01 +00:00
Christian Berendt
215690b55b Replace hardcoded 'master' group name with 'group_name_master' variable (#337)
For improved flexibility and maintainability.

* Update tasks in node role to use 'group_name_master' variable instead
  of hardcoded 'master' group name
* Update tasks in master role to use 'group_name_master' variable instead
  of hardcoded 'master' group name
* Update tasks in post role to use 'group_name_master' variable instead of
  hardcoded 'master' group name

Signed-off-by: Christian Berendt <berendt@23technologies.cloud>
2023-07-21 16:37:57 -05:00
Simon Leiner
bd44a9b126 Remove unused variable metal_lb_frr_tag_version (#331) 2023-07-21 05:06:04 +00:00
dependabot[bot]
8d61fe81e5 chore(deps): bump pyyaml from 6.0 to 6.0.1 (#334) 2023-07-20 23:20:55 -05:00
dependabot[bot]
c0ff304f22 chore(deps): bump ansible-core from 2.14.5 to 2.15.2 (#335)
Bumps [ansible-core](https://github.com/ansible/ansible) from 2.14.5 to 2.15.2.
- [Release notes](https://github.com/ansible/ansible/releases)
- [Commits](https://github.com/ansible/ansible/compare/v2.14.5...v2.15.2)

---
updated-dependencies:
- dependency-name: ansible-core
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-07-20 21:54:40 -05:00
Techno Tim
83077ecdd1 Fix CI - python version (#338)
* fix(README): Updated docs link

* fix(ci): set PYTHON_VERSION to 3.11
2023-07-20 21:19:53 -05:00
Simon Leiner
33ae0d4970 Fix CI (#332)
* Update pre-commit actions

This was done by running "pre-commit autoupdate --freeze".

* Remove pre-commit only dependencies from requirements.in

Including them in the file would create the illusion that those were the
versions actually used in CI, but they are not. The exact versions are
determined by the pre-commit hooks which are pinned in
.pre-commit-config.yaml.

* Ansible Lint: Fix role-name[path]

* Ansible Lint: Fix name[play]

* Ansible Lint: Fix key-order[task]

* Ansible Lint: Fix jinja[spacing]

* Ansible Lint: Fix no-free-form

* Ansible Lint: Fix var-naming[no-reserved]

* Ansible Lint: Fix yaml[comments]

* Ansible Lint: Fix yaml[line-length]

* Ansible Lint: Fix name[casing]

* Ansible Lint: Fix no-changed-when

* Ansible Lint: Fix fqcn[action]

* Ansible Lint: Fix args[module]

* Improve task naming
2023-07-20 10:50:02 -05:00
57 changed files with 467 additions and 192 deletions

View File

@@ -10,6 +10,12 @@ on:
jobs:
lint:
uses: ./.github/workflows/lint.yml
test:
uses: ./.github/workflows/test.yml
test-default:
uses: ./.github/workflows/test-default.yml
needs: [lint]
test-ipv6:
uses: ./.github/workflows/test-ipv6.yml
needs: [lint, test-default]
test-single-node:
uses: ./.github/workflows/test-single-node.yml
needs: [lint, test-default, test-ipv6]

View File

@@ -5,24 +5,24 @@ on:
jobs:
pre-commit-ci:
name: Pre-Commit
runs-on: ubuntu-latest
runs-on: ubuntu-22.04
env:
PYTHON_VERSION: "3.10"
PYTHON_VERSION: "3.11"
steps:
- name: Check out the codebase
uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v3 2.5.0
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # 4.1.1
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@75f3110429a8c05be0e1bf360334e4cced2b63fa # 2.3.3
uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # 4.7.1
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies
- name: Cache pip
uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # 3.0.11
uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # 3.3.2
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('./requirements.txt') }}
@@ -30,7 +30,7 @@ jobs:
${{ runner.os }}-pip-
- name: Cache Ansible
uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # 3.0.11
uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # 3.3.2
with:
path: ~/.ansible/collections
key: ${{ runner.os }}-ansible-${{ hashFiles('collections/requirements.txt') }}
@@ -59,9 +59,9 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v3 2.5.0
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # 4.1.1
- name: Ensure SHA pinned actions
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@af2eb3226618e2494e3d9084f515ad6dcf16e229 # 2.0.1
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@f32435541e24cd6a4700a7f52bb2ec59e80603b1 # 2.0.1
with:
allowlist: |
aws-actions/

80
.github/workflows/test-default.yml vendored Normal file
View File

@@ -0,0 +1,80 @@
---
name: Molecule Default
on:
workflow_call:
jobs:
molecule:
name: Molecule
runs-on: macos-13
strategy:
matrix:
scenario:
- default
fail-fast: false
env:
PYTHON_VERSION: "3.11"
steps:
- name: Check out the codebase
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # 4.1.1
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Configure VirtualBox
run: |-
sudo mkdir -p /etc/vbox
cat <<EOF | sudo tee -a /etc/vbox/networks.conf > /dev/null
* 192.168.30.0/24
* fdad:bad:ba55::/64
EOF
- name: Cache pip
uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # 3.3.2
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('./requirements.txt') }}
restore-keys: |
${{ runner.os }}-pip-
- name: Download Vagrant boxes for all scenarios
# To save some cache space, all scenarios share the same cache key.
# On the other hand, this means that the cache contents should be
# the same across all scenarios. This step ensures that.
run: ./.github/download-boxes.sh
- name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # 4.7.1
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies
- name: Install dependencies
run: |
echo "::group::Upgrade pip"
python3 -m pip install --upgrade pip
echo "::endgroup::"
echo "::group::Install Python requirements from requirements.txt"
python3 -m pip install -r requirements.txt
echo "::endgroup::"
- name: Test with molecule
run: molecule test --scenario-name ${{ matrix.scenario }}
timeout-minutes: 90
env:
ANSIBLE_K3S_LOG_DIR: ${{ runner.temp }}/logs/k3s-ansible/${{ matrix.scenario }}
ANSIBLE_SSH_RETRIES: 4
ANSIBLE_TIMEOUT: 60
PY_COLORS: 1
ANSIBLE_FORCE_COLOR: 1
- name: Upload log files
if: always() # do this even if a step before has failed
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # 3.1.3
with:
name: logs
path: |
${{ runner.temp }}/logs
- name: Delete old box versions
if: always() # do this even if a step before has failed
run: vagrant box prune --force

80
.github/workflows/test-ipv6.yml vendored Normal file
View File

@@ -0,0 +1,80 @@
---
name: Molecule IPv6
on:
workflow_call:
jobs:
molecule:
name: Molecule
runs-on: macos-13
strategy:
matrix:
scenario:
- ipv6
fail-fast: false
env:
PYTHON_VERSION: "3.11"
steps:
- name: Check out the codebase
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # 4.1.1
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Configure VirtualBox
run: |-
sudo mkdir -p /etc/vbox
cat <<EOF | sudo tee -a /etc/vbox/networks.conf > /dev/null
* 192.168.30.0/24
* fdad:bad:ba55::/64
EOF
- name: Cache pip
uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # 3.3.2
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('./requirements.txt') }}
restore-keys: |
${{ runner.os }}-pip-
- name: Download Vagrant boxes for all scenarios
# To save some cache space, all scenarios share the same cache key.
# On the other hand, this means that the cache contents should be
# the same across all scenarios. This step ensures that.
run: ./.github/download-boxes.sh
- name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # 4.7.1
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies
- name: Install dependencies
run: |
echo "::group::Upgrade pip"
python3 -m pip install --upgrade pip
echo "::endgroup::"
echo "::group::Install Python requirements from requirements.txt"
python3 -m pip install -r requirements.txt
echo "::endgroup::"
- name: Test with molecule
run: molecule test --scenario-name ${{ matrix.scenario }}
timeout-minutes: 90
env:
ANSIBLE_K3S_LOG_DIR: ${{ runner.temp }}/logs/k3s-ansible/${{ matrix.scenario }}
ANSIBLE_SSH_RETRIES: 4
ANSIBLE_TIMEOUT: 60
PY_COLORS: 1
ANSIBLE_FORCE_COLOR: 1
- name: Upload log files
if: always() # do this even if a step before has failed
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # 3.1.3
with:
name: logs
path: |
${{ runner.temp }}/logs
- name: Delete old box versions
if: always() # do this even if a step before has failed
run: vagrant box prune --force

View File

@@ -1,24 +1,21 @@
---
name: Test
name: Molecule Single Node
on:
workflow_call:
jobs:
molecule:
name: Molecule
runs-on: macos-12
runs-on: macos-13
strategy:
matrix:
scenario:
- default
- ipv6
- single_node
fail-fast: false
env:
PYTHON_VERSION: "3.10"
PYTHON_VERSION: "3.11"
steps:
- name: Check out the codebase
uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v3 2.5.0
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # 4.1.1
with:
ref: ${{ github.event.pull_request.head.sha }}
@@ -31,22 +28,13 @@ jobs:
EOF
- name: Cache pip
uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # 3.0.11
uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # 3.3.2
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('./requirements.txt') }}
restore-keys: |
${{ runner.os }}-pip-
- name: Cache Vagrant boxes
uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # 3.0.11
with:
path: |
~/.vagrant.d/boxes
key: vagrant-boxes-${{ hashFiles('**/molecule.yml') }}
restore-keys: |
vagrant-boxes
- name: Download Vagrant boxes for all scenarios
# To save some cache space, all scenarios share the same cache key.
# On the other hand, this means that the cache contents should be
@@ -54,7 +42,7 @@ jobs:
run: ./.github/download-boxes.sh
- name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@75f3110429a8c05be0e1bf360334e4cced2b63fa # 2.3.3
uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # 4.7.1
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies
@@ -81,7 +69,7 @@ jobs:
- name: Upload log files
if: always() # do this even if a step before has failed
uses: actions/upload-artifact@83fd05a356d7e2593de66fc9913b3002723633cb # 3.1.1
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # 3.1.3
with:
name: logs
path: |

View File

@@ -1,7 +1,7 @@
---
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: 3298ddab3c13dd77d6ce1fc0baf97691430d84b0 # v4.3.0
rev: f71fa2c1f9cf5cb705f73dffe4b21f7c61470ba9 # frozen: v4.4.0
hooks:
- id: requirements-txt-fixer
- id: sort-simple-yaml
@@ -12,24 +12,24 @@ repos:
- id: trailing-whitespace
args: [--markdown-linebreak-ext=md]
- repo: https://github.com/adrienverge/yamllint.git
rev: 9cce2940414e9560ae4c8518ddaee2ac1863a4d2 # v1.28.0
rev: b05e028c5881819161d11cb543fd96a30c06cceb # frozen: v1.32.0
hooks:
- id: yamllint
args: [-c=.yamllint]
- repo: https://github.com/ansible-community/ansible-lint.git
rev: a058554b9bcf88f12ad09ab9fb93b267a214368f # v6.8.6
rev: 3293b64b939c0de16ef8cb81dd49255e475bf89a # frozen: v6.17.2
hooks:
- id: ansible-lint
- repo: https://github.com/shellcheck-py/shellcheck-py
rev: 4c7c3dd7161ef39e984cb295e93a968236dc8e8a # v0.8.0.4
rev: 375289a39f5708101b1f916eb729e8d6da96993f # frozen: v0.9.0.5
hooks:
- id: shellcheck
- repo: https://github.com/Lucas-C/pre-commit-hooks
rev: 04618e68aa2380828a36a23ff5f65a06ae8f59b9 # v1.3.1
rev: 12885e376b93dc4536ad68d156065601e4433665 # frozen: v1.5.1
hooks:
- id: remove-crlf
- id: remove-tabs
- repo: https://github.com/sirosen/texthooks
rev: 30d9af95631de0d7cff4e282bde9160d38bb0359 # 0.4.0
rev: c4ffd3e31669dd4fa4d31a23436cc13839730084 # frozen: 0.5.0
hooks:
- id: fix-smartquotes

View File

@@ -4,11 +4,11 @@
This playbook will build an HA Kubernetes cluster with `k3s`, `kube-vip` and MetalLB via `ansible`.
This is based on the work from [this fork](https://github.com/212850a/k3s-ansible) which is based on the work from [k3s-io/k3s-ansible](https://github.com/k3s-io/k3s-ansible). It uses [kube-vip](https://kube-vip.chipzoller.dev/) to create a load balancer for control plane, and [metal-lb](https://metallb.universe.tf/installation/) for its service `LoadBalancer`.
This is based on the work from [this fork](https://github.com/212850a/k3s-ansible) which is based on the work from [k3s-io/k3s-ansible](https://github.com/k3s-io/k3s-ansible). It uses [kube-vip](https://kube-vip.io/) to create a load balancer for control plane, and [metal-lb](https://metallb.universe.tf/installation/) for its service `LoadBalancer`.
If you want more context on how this works, see:
📄 [Documentation](https://docs.technotim.live/posts/k3s-etcd-ansible/) (including example commands)
📄 [Documentation](https://technotim.live/posts/k3s-etcd-ansible/) (including example commands)
📺 [Watch the Video](https://www.youtube.com/watch?v=CbkEWcUZ7zM)
@@ -28,7 +28,7 @@ on processor architecture:
## ✅ System requirements
- Control Node (the machine you are running `ansible` commands) must have Ansible 2.11+ If you need a quick primer on Ansible [you can check out my docs and setting up Ansible](https://docs.technotim.live/posts/ansible-automation/).
- Control Node (the machine you are running `ansible` commands) must have Ansible 2.11+ If you need a quick primer on Ansible [you can check out my docs and setting up Ansible](https://technotim.live/posts/ansible-automation/).
- You will also need to install collections that this playbook uses by running `ansible-galaxy collection install -r ./collections/requirements.yml` (important❗)
@@ -101,7 +101,7 @@ scp debian@master_ip:~/.kube/config ~/.kube/config
### 🔨 Testing your cluster
See the commands [here](https://docs.technotim.live/posts/k3s-etcd-ansible/#testing-your-cluster).
See the commands [here](https://technotim.live/posts/k3s-etcd-ansible/#testing-your-cluster).
### Troubleshooting

View File

@@ -1,5 +1,5 @@
---
k3s_version: v1.25.9+k3s1
k3s_version: v1.25.12+k3s1
# this is the user that has ssh access to these machines
ansible_user: ansibleuser
systemd_dir: /etc/systemd/system
@@ -55,7 +55,6 @@ metal_lb_mode: "layer2"
# metal_lb_bgp_peer_address: "192.168.30.1"
# image tag for metal lb
metal_lb_frr_tag_version: "v7.5.1"
metal_lb_speaker_tag_version: "v0.13.9"
metal_lb_controller_tag_version: "v0.13.9"
@@ -82,3 +81,49 @@ proxmox_lxc_ct_ids:
- 202
- 203
- 204
# Only enable this if you have set up your own container registry to act as a mirror / pull-through cache
# (harbor / nexus / docker's official registry / etc).
# Can be beneficial for larger dev/test environments (for example if you're getting rate limited by docker hub),
# or air-gapped environments where your nodes don't have internet access after the initial setup
# (which is still needed for downloading the k3s binary and such).
# k3s's documentation about private registries here: https://docs.k3s.io/installation/private-registry
custom_registries: false
# The registries can be authenticated or anonymous, depending on your registry server configuration.
# If they allow anonymous access, simply remove the following bit from custom_registries_yaml
# configs:
# "registry.domain.com":
# auth:
# username: yourusername
# password: yourpassword
# The following is an example that pulls all images used in this playbook through your private registries.
# It also allows you to pull your own images from your private registry, without having to use imagePullSecrets
# in your deployments.
# If all you need is your own images and you don't care about caching the docker/quay/ghcr.io images,
# you can just remove those from the mirrors: section.
custom_registries_yaml: |
mirrors:
docker.io:
endpoint:
- "https://registry.domain.com/v2/dockerhub"
quay.io:
endpoint:
- "https://registry.domain.com/v2/quayio"
ghcr.io:
endpoint:
- "https://registry.domain.com/v2/ghcrio"
registry.domain.com:
endpoint:
- "https://registry.domain.com"
configs:
"registry.domain.com":
auth:
username: yourusername
password: yourpassword
# Only enable and configure these if you access the internet through a proxy
# proxy_env:
# HTTP_PROXY: "http://proxy.domain.local:3128"
# HTTPS_PROXY: "http://proxy.domain.local:3128"
# NO_PROXY: "*.domain.local,127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16"

View File

@@ -4,7 +4,8 @@
tasks:
- name: Override host variables
ansible.builtin.set_fact:
# See: https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant # noqa yaml[line-length]
# See:
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
flannel_iface: eth1
# The test VMs might be a bit slow, so we give them more time to join the cluster:

View File

@@ -4,7 +4,8 @@
tasks:
- name: Override host variables (1/2)
ansible.builtin.set_fact:
# See: https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant # noqa yaml[line-length]
# See:
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
flannel_iface: eth1
# In this scenario, we have multiple interfaces that the VIP could be

View File

@@ -2,4 +2,4 @@
- name: Verify
hosts: all
roles:
- verify/from_outside
- verify_from_outside

View File

@@ -6,4 +6,4 @@ outside_host: localhost
testing_namespace: molecule-verify-from-outside
# The directory in which the example manifests reside
example_manifests_path: ../../../../example
example_manifests_path: ../../../example

View File

@@ -34,14 +34,14 @@
- name: Assert that the nginx welcome page is available
ansible.builtin.uri:
url: http://{{ ip | ansible.utils.ipwrap }}:{{ port }}/
url: http://{{ ip | ansible.utils.ipwrap }}:{{ port_ }}/
return_content: yes
register: result
failed_when: "'Welcome to nginx!' not in result.content"
vars:
ip: >-
{{ nginx_services.resources[0].status.loadBalancer.ingress[0].ip }}
port: >-
port_: >-
{{ nginx_services.resources[0].spec.ports[0].port }}
# Deactivated linter rules:
# - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap

View File

@@ -4,7 +4,8 @@
tasks:
- name: Override host variables
ansible.builtin.set_fact:
# See: https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant # noqa yaml[line-length]
# See:
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
flannel_iface: eth1
# The test VMs might be a bit slow, so we give them more time to join the cluster:

0
reboot.sh Normal file → Executable file
View File

View File

@@ -1,5 +1,4 @@
ansible-core>=2.13.5
ansible-lint>=6.8.6
jmespath>=1.0.1
jsonpatch>=1.32
kubernetes>=25.3.0
@@ -9,4 +8,3 @@ netaddr>=0.8.0
pre-commit>=2.20.0
pre-commit-hooks>=1.3.1
pyyaml>=6.0
yamllint>=1.28.0

View File

@@ -1,28 +1,21 @@
#
# This file is autogenerated by pip-compile with python 3.8
# To update, run:
# This file is autogenerated by pip-compile with Python 3.11
# by the following command:
#
# pip-compile requirements.in
#
ansible-compat==3.0.1
# via molecule
ansible-core==2.14.5
ansible-core==2.15.4
# via
# -r requirements.in
# ansible-compat
# ansible-lint
ansible-lint==6.15.0
# via -r requirements.in
arrow==1.2.3
# via jinja2-time
attrs==22.1.0
# via jsonschema
binaryornot==0.4.4
# via cookiecutter
black==22.10.0
# via ansible-lint
bracex==2.3.post1
# via wcmatch
cachetools==5.2.0
# via google-auth
certifi==2022.9.24
@@ -39,7 +32,6 @@ charset-normalizer==2.1.1
# via requests
click==8.1.3
# via
# black
# click-help-colors
# cookiecutter
# molecule
@@ -58,9 +50,7 @@ distro==1.8.0
enrich==1.2.7
# via molecule
filelock==3.8.0
# via
# ansible-lint
# virtualenv
# via virtualenv
google-auth==2.14.0
# via kubernetes
identify==2.5.8
@@ -78,14 +68,13 @@ jinja2-time==0.2.0
# via cookiecutter
jmespath==1.0.1
# via -r requirements.in
jsonpatch==1.32
jsonpatch==1.33
# via -r requirements.in
jsonpointer==2.3
# via jsonpatch
jsonschema==4.17.0
# via
# ansible-compat
# ansible-lint
# molecule
kubernetes==25.3.0
# via -r requirements.in
@@ -97,9 +86,7 @@ molecule==4.0.4
# molecule-vagrant
molecule-vagrant==1.0.0
# via -r requirements.in
mypy-extensions==0.4.3
# via black
netaddr==0.8.0
netaddr==0.9.0
# via -r requirements.in
nodeenv==1.7.0
# via pre-commit
@@ -109,21 +96,14 @@ packaging==21.3
# via
# ansible-compat
# ansible-core
# ansible-lint
# molecule
pathspec==0.10.1
# via
# black
# yamllint
platformdirs==2.5.2
# via
# black
# virtualenv
# via virtualenv
pluggy==1.0.0
# via molecule
pre-commit==2.21.0
# via -r requirements.in
pre-commit-hooks==4.4.0
pre-commit-hooks==4.5.0
# via -r requirements.in
pyasn1==0.4.8
# via
@@ -147,18 +127,16 @@ python-slugify==6.1.2
# via cookiecutter
python-vagrant==1.0.0
# via molecule-vagrant
pyyaml==6.0
pyyaml==6.0.1
# via
# -r requirements.in
# ansible-compat
# ansible-core
# ansible-lint
# cookiecutter
# kubernetes
# molecule
# molecule-vagrant
# pre-commit
# yamllint
requests==2.28.1
# via
# cookiecutter
@@ -170,15 +148,12 @@ resolvelib==0.8.1
# via ansible-core
rich==12.6.0
# via
# ansible-lint
# enrich
# molecule
rsa==4.9
# via google-auth
ruamel-yaml==0.17.21
# via
# ansible-lint
# pre-commit-hooks
# via pre-commit-hooks
selinux==0.2.1
# via molecule-vagrant
six==1.16.0
@@ -187,9 +162,7 @@ six==1.16.0
# kubernetes
# python-dateutil
subprocess-tee==0.4.1
# via
# ansible-compat
# ansible-lint
# via ansible-compat
text-unidecode==1.3
# via python-slugify
urllib3==1.26.12
@@ -198,14 +171,8 @@ urllib3==1.26.12
# requests
virtualenv==20.16.6
# via pre-commit
wcmatch==8.4.1
# via ansible-lint
websocket-client==1.4.2
# via kubernetes
yamllint==1.31.0
# via
# -r requirements.in
# ansible-lint
# The following packages are considered to be unsafe in a requirements file:
# setuptools

View File

@@ -1,6 +1,6 @@
---
- hosts: k3s_cluster
- name: Reset k3s cluster
hosts: k3s_cluster
gather_facts: yes
roles:
- role: reset
@@ -14,7 +14,8 @@
reboot:
reboot_timeout: 3600
- hosts: proxmox
- name: Revert changes to Proxmox cluster
hosts: proxmox
gather_facts: true
become: yes
remote_user: "{{ proxmox_lxc_ssh_user }}"

View File

@@ -1,16 +0,0 @@
---
# If you want to explicitly define an interface that ALL control nodes
# should use to propagate the VIP, define it here. Otherwise, kube-vip
# will determine the right interface automatically at runtime.
kube_vip_iface: null
server_init_args: >-
{% if groups['master'] | length > 1 %}
{% if ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] %}
--cluster-init
{% else %}
--server https://{{ hostvars[groups['master'][0]].k3s_node_ip | split(",") | first | ansible.utils.ipwrap }}:6443
{% endif %}
--token {{ k3s_token }}
{% endif %}
{{ extra_server_args | default('') }}

View File

@@ -0,0 +1,3 @@
---
# Name of the master group
group_name_master: master

View File

@@ -0,0 +1,18 @@
---
- name: Create k3s.service.d directory
file:
path: '{{ systemd_dir }}/k3s.service.d'
state: directory
owner: root
group: root
mode: '0755'
- name: Copy K3s http_proxy conf file
template:
src: "http_proxy.conf.j2"
dest: "{{ systemd_dir }}/k3s.service.d/http_proxy.conf"
owner: root
group: root
mode: '0755'

View File

@@ -1,5 +1,9 @@
---
- name: Deploy K3s http_proxy conf
include_tasks: http_proxy.yml
when: proxy_env is defined
- name: Copy K3s service file
template:
src: "k3s.service.j2"

View File

@@ -0,0 +1,4 @@
[Service]
Environment=HTTP_PROXY={{ proxy_env.HTTP_PROXY }}
Environment=HTTPS_PROXY={{ proxy_env.HTTPS_PROXY }}
Environment=NO_PROXY={{ proxy_env.NO_PROXY }}

View File

@@ -7,7 +7,7 @@ After=network-online.target
Type=notify
ExecStartPre=-/sbin/modprobe br_netfilter
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/local/bin/k3s agent --server https://{{ apiserver_endpoint | ansible.utils.ipwrap }}:6443 --token {{ hostvars[groups['master'][0]]['token'] | default(k3s_token) }} {{ extra_agent_args | default("") }}
ExecStart=/usr/local/bin/k3s agent --server https://{{ apiserver_endpoint | ansible.utils.ipwrap }}:6443 --token {{ hostvars[groups[group_name_master | default('master')][0]]['token'] | default(k3s_token) }} {{ extra_agent_args | default("") }}
KillMode=process
Delegate=yes
# Having non-zero Limit*s causes performance problems due to accounting overhead

View File

@@ -0,0 +1,6 @@
---
# Indicates whether custom registries for k3s should be configured
# Possible values:
# - present
# - absent
state: present

View File

@@ -0,0 +1,17 @@
---
- name: Create directory /etc/rancher/k3s
file:
path: "/etc/{{ item }}"
state: directory
mode: '0755'
loop:
- rancher
- rancher/k3s
- name: Insert registries into /etc/rancher/k3s/registries.yaml
blockinfile:
path: /etc/rancher/k3s/registries.yaml
block: "{{ custom_registries_yaml }}"
mode: '0600'
create: true

View File

@@ -0,0 +1,20 @@
---
# If you want to explicitly define an interface that ALL control nodes
# should use to propagate the VIP, define it here. Otherwise, kube-vip
# will determine the right interface automatically at runtime.
kube_vip_iface: null
# Name of the master group
group_name_master: master
# yamllint disable rule:line-length
server_init_args: >-
{% if groups[group_name_master | default('master')] | length > 1 %}
{% if ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] %}
--cluster-init
{% else %}
--server https://{{ hostvars[groups[group_name_master | default('master')][0]].k3s_node_ip | split(",") | first | ansible.utils.ipwrap }}:6443
{% endif %}
--token {{ k3s_token }}
{% endif %}
{{ extra_server_args | default('') }}

View File

@@ -0,0 +1,18 @@
---
- name: Create k3s.service.d directory
file:
path: '{{ systemd_dir }}/k3s.service.d'
state: directory
owner: root
group: root
mode: '0755'
- name: Copy K3s http_proxy conf file
template:
src: "http_proxy.conf.j2"
dest: "{{ systemd_dir }}/k3s.service.d/http_proxy.conf"
owner: root
group: root
mode: '0755'

View File

@@ -1,23 +1,27 @@
---
- name: Clean previous runs of k3s-init
- name: Stop k3s-init
systemd:
name: k3s-init
state: stopped
failed_when: false
- name: Clean previous runs of k3s-init
- name: Clean previous runs of k3s-init # noqa command-instead-of-module
# The systemd module does not support "reset-failed", so we need to resort to command.
command: systemctl reset-failed k3s-init
failed_when: false
changed_when: false
args:
warn: false # The ansible systemd module does not support reset-failed
- name: Deploy K3s http_proxy conf
include_tasks: http_proxy.yml
when: proxy_env is defined
- name: Deploy vip manifest
include_tasks: vip.yml
- name: Deploy metallb manifest
include_tasks: metallb.yml
tags: metallb
- name: Init cluster inside the transient k3s-init service
command:
@@ -28,12 +32,13 @@
creates: "{{ systemd_dir }}/k3s.service"
- name: Verification
when: not ansible_check_mode
block:
- name: Verify that all nodes actually joined (check k3s-init.service if this fails)
command:
cmd: k3s kubectl get nodes -l "node-role.kubernetes.io/master=true" -o=jsonpath="{.items[*].metadata.name}"
register: nodes
until: nodes.rc == 0 and (nodes.stdout.split() | length) == (groups['master'] | length)
until: nodes.rc == 0 and (nodes.stdout.split() | length) == (groups[group_name_master | default('master')] | length) # yamllint disable-line rule:line-length
retries: "{{ retry_count | default(20) }}"
delay: 10
changed_when: false
@@ -49,7 +54,6 @@
name: k3s-init
state: stopped
failed_when: false
when: not ansible_check_mode
- name: Copy K3s service file
register: k3s_service

View File

@@ -6,16 +6,16 @@
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
- name: "Download to first master: manifest for metallb-{{ metal_lb_type }}"
ansible.builtin.get_url:
url: "https://raw.githubusercontent.com/metallb/metallb/{{ metal_lb_controller_tag_version }}/config/manifests/metallb-{{metal_lb_type}}.yaml" # noqa yaml[line-length]
url: "https://raw.githubusercontent.com/metallb/metallb/{{ metal_lb_controller_tag_version }}/config/manifests/metallb-{{ metal_lb_type }}.yaml" # noqa yaml[line-length]
dest: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml"
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
- name: Set image versions in manifest for metallb-{{ metal_lb_type }}
ansible.builtin.replace:
@@ -27,4 +27,4 @@
to: "metallb/speaker:{{ metal_lb_speaker_tag_version }}"
loop_control:
label: "{{ item.change }} => {{ item.to }}"
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']

View File

@@ -6,7 +6,7 @@
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
- name: Download vip rbac manifest to first master
ansible.builtin.get_url:
@@ -15,7 +15,7 @@
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
- name: Copy vip manifest to first master
template:
@@ -24,4 +24,4 @@
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']

View File

@@ -0,0 +1,4 @@
[Service]
Environment=HTTP_PROXY={{ proxy_env.HTTP_PROXY }}
Environment=HTTPS_PROXY={{ proxy_env.HTTPS_PROXY }}
Environment=NO_PROXY={{ proxy_env.NO_PROXY }}

View File

@@ -1,3 +1,6 @@
---
# Timeout to wait for MetalLB services to come up
metal_lb_available_timeout: 120s
# Name of the master group
group_name_master: master

View File

@@ -1,6 +1,7 @@
---
- name: Deploy metallb pool
include_tasks: metallb.yml
tags: metallb
- name: Remove tmp directory used for manifests
file:

View File

@@ -5,7 +5,7 @@
state: directory
owner: "{{ ansible_user_id }}"
mode: 0755
with_items: "{{ groups['master'] }}"
with_items: "{{ groups[group_name_master | default('master')] }}"
run_once: true
- name: Copy metallb CRs manifest to first master
@@ -14,14 +14,14 @@
dest: "/tmp/k3s/metallb-crs.yaml"
owner: "{{ ansible_user_id }}"
mode: 0755
with_items: "{{ groups['master'] }}"
with_items: "{{ groups[group_name_master | default('master')] }}"
run_once: true
- name: Test metallb-system namespace
command: >-
k3s kubectl -n metallb-system
changed_when: false
with_items: "{{ groups['master'] }}"
with_items: "{{ groups[group_name_master | default('master')] }}"
run_once: true
- name: Wait for MetalLB resources
@@ -66,7 +66,7 @@
command: >-
k3s kubectl -n metallb-system get endpoints webhook-service
changed_when: false
with_items: "{{ groups['master'] }}"
with_items: "{{ groups[group_name_master | default('master')] }}"
run_once: true
- name: Apply metallb CRs

View File

@@ -1,4 +1,5 @@
---
- name: reboot server
- name: Reboot server
become: true
reboot:
listen: reboot server

View File

@@ -0,0 +1,4 @@
---
secure_path:
RedHat: '/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/bin'
Suse: '/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/bin'

View File

@@ -1,34 +1,37 @@
---
- name: Set same timezone on every Server
timezone:
community.general.timezone:
name: "{{ system_timezone }}"
when: (system_timezone is defined) and (system_timezone != "Your/Timezone")
- name: Set SELinux to disabled state
selinux:
ansible.posix.selinux:
state: disabled
when: ansible_os_family == "RedHat"
- name: Enable IPv4 forwarding
sysctl:
ansible.posix.sysctl:
name: net.ipv4.ip_forward
value: "1"
state: present
reload: yes
tags: sysctl
- name: Enable IPv6 forwarding
sysctl:
ansible.posix.sysctl:
name: net.ipv6.conf.all.forwarding
value: "1"
state: present
reload: yes
tags: sysctl
- name: Enable IPv6 router advertisements
sysctl:
ansible.posix.sysctl:
name: net.ipv6.conf.all.accept_ra
value: "2"
state: present
reload: yes
tags: sysctl
- name: Add br_netfilter to /etc/modules-load.d/
copy:
@@ -38,13 +41,13 @@
when: ansible_os_family == "RedHat"
- name: Load br_netfilter
modprobe:
community.general.modprobe:
name: br_netfilter
state: present
when: ansible_os_family == "RedHat"
- name: Set bridge-nf-call-iptables (just to be sure)
sysctl:
ansible.posix.sysctl:
name: "{{ item }}"
value: "1"
state: present
@@ -53,13 +56,14 @@
loop:
- net.bridge.bridge-nf-call-iptables
- net.bridge.bridge-nf-call-ip6tables
tags: sysctl
- name: Add /usr/local/bin to sudo secure_path
lineinfile:
line: 'Defaults secure_path = /sbin:/bin:/usr/sbin:/usr/bin:/usr/local/bin'
line: 'Defaults secure_path = {{ secure_path[ansible_os_family] }}'
regexp: "Defaults(\\s)*secure_path(\\s)*="
state: present
insertafter: EOF
path: /etc/sudoers
validate: 'visudo -cf %s'
when: ansible_os_family == "RedHat"
when: ansible_os_family in [ "RedHat", "Suse" ]

View File

@@ -1,5 +1,13 @@
---
- name: reboot containers
command:
"pct reboot {{ item }}"
- name: Reboot containers
block:
- name: Get container ids from filtered files
set_fact:
proxmox_lxc_filtered_ids: >-
{{ proxmox_lxc_filtered_files | map("split", "/") | map("last") | map("split", ".") | map("first") }}
listen: reboot containers
- name: Reboot container
command: "pct reboot {{ item }}"
loop: "{{ proxmox_lxc_filtered_ids }}"
changed_when: true
listen: reboot containers

View File

@@ -1,21 +1,15 @@
---
- name: check for container files that exist on this host
- name: Check for container files that exist on this host
stat:
path: "/etc/pve/lxc/{{ item }}.conf"
loop: "{{ proxmox_lxc_ct_ids }}"
register: stat_results
- name: filter out files that do not exist
- name: Filter out files that do not exist
set_fact:
proxmox_lxc_filtered_files:
'{{ stat_results.results | rejectattr("stat.exists", "false") | map(attribute="stat.path") }}'
# used for the reboot handler
- name: get container ids from filtered files
set_fact:
proxmox_lxc_filtered_ids:
'{{ proxmox_lxc_filtered_files | map("split", "/") | map("last") | map("split", ".") | map("first") }}'
# https://gist.github.com/triangletodd/02f595cd4c0dc9aac5f7763ca2264185
- name: Ensure lxc config has the right apparmor profile
lineinfile:

View File

@@ -1,3 +1,4 @@
---
- name: reboot
- name: Reboot
reboot:
listen: reboot

View File

@@ -47,20 +47,16 @@
- raspberry_pi|default(false)
- ansible_facts.lsb.description|default("") is match("Debian.*bullseye")
- name: execute OS related tasks on the Raspberry Pi - {{ action }}
- name: Execute OS related tasks on the Raspberry Pi - {{ action_ }}
include_tasks: "{{ item }}"
with_first_found:
- "{{ action }}/{{ detected_distribution }}-{{ detected_distribution_major_version }}.yml"
- "{{ action }}/{{ detected_distribution }}.yml"
- "{{ action }}/{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml"
- "{{ action }}/{{ ansible_distribution }}.yml"
- "{{ action }}/default.yml"
- "{{ action_ }}/{{ detected_distribution }}-{{ detected_distribution_major_version }}.yml"
- "{{ action_ }}/{{ detected_distribution }}.yml"
- "{{ action_ }}/{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml"
- "{{ action_ }}/{{ ansible_distribution }}.yml"
- "{{ action_ }}/default.yml"
vars:
action: >-
{% if state == "present" -%}
setup
{%- else -%}
teardown
{%- endif %}
action_: >-
{% if state == "present" %}setup{% else %}teardown{% endif %}
when:
- raspberry_pi|default(false)

View File

@@ -8,20 +8,22 @@
notify: reboot
- name: Install iptables
apt: name=iptables state=present
apt:
name: iptables
state: present
- name: Flush iptables before changing to iptables-legacy
iptables:
flush: true
- name: Changing to iptables-legacy
alternatives:
community.general.alternatives:
path: /usr/sbin/iptables-legacy
name: iptables
register: ip4_legacy
- name: Changing to ip6tables-legacy
alternatives:
community.general.alternatives:
path: /usr/sbin/ip6tables-legacy
name: ip6tables
register: ip6_legacy

View File

@@ -46,6 +46,15 @@
- /var/lib/rancher/
- /var/lib/cni/
- name: Remove K3s http_proxy files
file:
name: "{{ item }}"
state: absent
with_items:
- "{{ systemd_dir }}/k3s.service.d"
- "{{ systemd_dir }}/k3s-node.service.d"
when: proxy_env is defined
- name: Reload daemon_reload
systemd:
daemon_reload: yes

View File

@@ -9,7 +9,7 @@
check_mode: false
- name: Umount filesystem
mount:
ansible.posix.mount:
path: "{{ item }}"
state: unmounted
with_items:

View File

@@ -1,5 +0,0 @@
---
- name: reboot containers
command:
"pct reboot {{ item }}"
loop: "{{ proxmox_lxc_filtered_ids }}"

View File

@@ -0,0 +1 @@
../../proxmox_lxc/handlers/main.yml

View File

@@ -1,21 +1,15 @@
---
- name: check for container files that exist on this host
- name: Check for container files that exist on this host
stat:
path: "/etc/pve/lxc/{{ item }}.conf"
loop: "{{ proxmox_lxc_ct_ids }}"
register: stat_results
- name: filter out files that do not exist
- name: Filter out files that do not exist
set_fact:
proxmox_lxc_filtered_files:
'{{ stat_results.results | rejectattr("stat.exists", "false") | map(attribute="stat.path") }}'
# used for the reboot handler
- name: get container ids from filtered files
set_fact:
proxmox_lxc_filtered_ids:
'{{ proxmox_lxc_filtered_files | map("split", "/") | map("last") | map("split", ".") | map("first") }}'
- name: Remove LXC apparmor profile
lineinfile:
dest: "{{ item }}"

View File

@@ -1,14 +1,17 @@
---
- hosts: proxmox
- name: Prepare Proxmox cluster
hosts: proxmox
gather_facts: true
become: yes
environment: "{{ proxy_env | default({}) }}"
roles:
- role: proxmox_lxc
when: proxmox_lxc_configure
- hosts: k3s_cluster
- name: Prepare k3s nodes
hosts: k3s_cluster
gather_facts: yes
environment: "{{ proxy_env | default({}) }}"
roles:
- role: lxc
become: true
@@ -19,18 +22,27 @@
become: true
- role: raspberrypi
become: true
- role: k3s_custom_registries
become: true
when: custom_registries
- hosts: master
- name: Setup k3s servers
hosts: master
environment: "{{ proxy_env | default({}) }}"
roles:
- role: k3s/master
- role: k3s_server
become: true
- hosts: node
- name: Setup k3s agents
hosts: node
environment: "{{ proxy_env | default({}) }}"
roles:
- role: k3s/node
- role: k3s_agent
become: true
- hosts: master
- name: Configure k3s cluster
hosts: master
environment: "{{ proxy_env | default({}) }}"
roles:
- role: k3s/post
- role: k3s_server_post
become: true