Compare commits

..

90 Commits

Author SHA1 Message Date
Timothy Stewart
ca79207dc9 chore(ci): triggering build 2023-12-09 22:22:19 -06:00
Timothy Stewart
af7f41379b fix(CI): testing macos-13 2023-11-01 12:05:58 -05:00
Timothy Stewart
69c8d28c36 fix(CI): testing macos-latest 2023-11-01 11:29:32 -05:00
Timothy Stewart
723f07caf1 fix(CI): testing ubuntu 2023-11-01 11:24:06 -05:00
Timothy Stewart
9e9b862334 fix(CI): Remove vagrant box cache 2023-10-30 16:00:55 -05:00
Timothy Stewart
df99f988c5 fix(CI): Break up workflows and use templates 2023-10-29 15:29:25 -05:00
Timothy Stewart
71821b2be3 fix(CI): Break up workflows and use templates 2023-10-29 15:22:05 -05:00
Timothy Stewart
4066ca7b6c fix(CI): Break up workflows and use templates 2023-10-29 15:03:05 -05:00
Timothy Stewart
6ca0dcecae fix(CI): Break up workflows and use templates 2023-10-29 15:00:37 -05:00
Timothy Stewart
9593ae69ef fix(CI): Break up workflows and use templates 2023-10-29 14:57:58 -05:00
Timothy Stewart
926b245281 fix(CI): Break up workflows and use templates 2023-10-29 14:57:10 -05:00
Timothy Stewart
f89eb5ec07 fix(CI): Break up workflows and use templates 2023-10-29 14:55:40 -05:00
Timothy Stewart
d767e6add0 fix(CI): Break up workflows and use templates 2023-10-29 14:40:50 -05:00
Timothy Stewart
0cb638faf1 fix(CI): Break up workflows and use templates 2023-10-29 14:39:50 -05:00
Timothy Stewart
0c3d5e9a29 fix(CI): Break up workflows and use templates 2023-10-29 14:37:19 -05:00
Timothy Stewart
841e924449 fix(CI): Break up workflows and use templates 2023-10-29 14:32:27 -05:00
Timothy Stewart
22c1bccd13 fix(CI): Break up workflows and use templates 2023-10-29 14:30:22 -05:00
Timothy Stewart
26aa538e5f fix(CI): Break up workflows and use templates 2023-10-29 14:28:36 -05:00
Timothy Stewart
06d4773e42 fix(CI): Break up workflows and use templates 2023-10-29 14:27:51 -05:00
Timothy Stewart
c6a20c7f14 fix(CI): Break up workflows and use templates 2023-10-29 14:27:20 -05:00
Timothy Stewart
856015d127 fix(CI): Break up workflows and use templates 2023-10-29 14:26:53 -05:00
Timothy Stewart
8637027635 fix(CI): Break up workflows and use templates 2023-10-29 14:19:14 -05:00
Timothy Stewart
207b8f6f88 fix(CI): Break up workflows and use templates 2023-10-29 14:16:09 -05:00
Timothy Stewart
4d6dc254f3 fix(CI): Break up workflows and use templates 2023-10-29 14:11:48 -05:00
Timothy Stewart
f54f180997 fix(CI): Break up workflows and use templates 2023-10-29 14:10:04 -05:00
Timothy Stewart
c9a96de15b fix(CI): Break up workflows and use templates 2023-10-29 14:04:29 -05:00
Timothy Stewart
7404865bd7 fix(CI): Break up workflows and use templates 2023-10-29 14:01:43 -05:00
Timothy Stewart
015e91bca9 fix(CI): Break up workflows and use templates 2023-10-29 13:58:32 -05:00
Timothy Stewart
4450075425 fix(CI): Break up workflows and use templates 2023-10-29 13:56:11 -05:00
Timothy Stewart
fc88777e16 fix(CI): Break up workflows and use templates 2023-10-29 13:55:34 -05:00
Timothy Stewart
2884d54e6b fix(CI): Break up workflows and use templates 2023-10-29 13:55:09 -05:00
Timothy Stewart
0874e56045 fix(CI): Break up workflows and use templates 2023-10-29 13:53:58 -05:00
Timothy Stewart
4bb58181e8 fix(CI): Break up workflows and use templates 2023-10-29 13:47:52 -05:00
Timothy Stewart
f52a556109 fix(CI): Break up workflows and use templates 2023-10-29 13:44:22 -05:00
Timothy Stewart
90fe88379c fix(CI): Break up workflows and use templates 2023-10-29 13:43:22 -05:00
Timothy Stewart
3ff7547f8c fix(CI): Break up workflows and use templates 2023-10-29 13:42:08 -05:00
Timothy Stewart
edd2ba29ce fix(CI): Break up workflows and use templates 2023-10-29 13:41:31 -05:00
Timothy Stewart
4174e81e68 fix(CI): Break up workflows and use templates 2023-10-29 13:40:45 -05:00
Timothy Stewart
8c8ac8b942 fix(CI): Break up workflows and use templates 2023-10-29 13:40:22 -05:00
Timothy Stewart
eb10968a75 fix(CI): Break up workflows and use templates 2023-10-29 13:38:40 -05:00
Timothy Stewart
96b2d243ea fix(CI): Break up workflows and use templates 2023-10-29 13:38:09 -05:00
Timothy Stewart
2490fecb69 fix(CI): Break up workflows and use templates 2023-10-29 13:37:25 -05:00
Timothy Stewart
80fc191c20 fix(CI): Break up workflows and use templates 2023-10-29 13:36:36 -05:00
Timothy Stewart
bc6aa32198 fix(CI): Break up workflows and use templates 2023-10-29 13:35:35 -05:00
Timothy Stewart
85f7ec6889 fix(CI): Break up workflows and use templates 2023-10-29 13:33:44 -05:00
Timothy Stewart
d90fde7b3b fix(CI): Break up workflows and use templates 2023-10-29 13:33:16 -05:00
Timothy Stewart
756c140bee fix(CI): Break up workflows and use templates 2023-10-29 13:32:34 -05:00
Timothy Stewart
85c1826985 fix(CI): Break up workflows and use templates 2023-10-29 13:31:34 -05:00
Timothy Stewart
ad672b3965 fix(CI): Break up workflows and use templates 2023-10-29 13:30:20 -05:00
Timothy Stewart
f19ff12057 fix(CI): Break up workflows and use templates 2023-10-29 13:29:28 -05:00
Timothy Stewart
173e104663 fix(CI): Break up workflows and use templates 2023-10-29 13:27:13 -05:00
Timothy Stewart
1e06289dca fix(CI): Break up workflows and use templates 2023-10-29 13:26:35 -05:00
Timothy Stewart
c2a3b9f56f fix(CI): Break up workflows and use templates 2023-10-29 13:20:32 -05:00
Timothy Stewart
14aa2f8383 fix(CI): Break up workflows and use templates 2023-10-29 13:20:01 -05:00
Timothy Stewart
fb6d94362c fix(CI): Break up workflows and use templates 2023-10-29 13:18:09 -05:00
Timothy Stewart
9483f18ae2 fix(CI): Break up workflows and use templates 2023-10-29 13:16:01 -05:00
Timothy Stewart
2b4ef14b49 fix(CI): Break up workflows and use templates 2023-10-29 13:15:25 -05:00
Timothy Stewart
32b9bfa44f fix(CI): Break up workflows and use templates 2023-10-29 13:11:55 -05:00
Timothy Stewart
a7d4f88b79 fix(CI): Break up workflows and use templates 2023-10-29 13:09:41 -05:00
Balázs Hasprai
e880f08d26 Add option for install behind http_proxy (#384)
* Add option for install behind http_proxy

* Tidy up http_proxy usage
2023-10-21 00:18:36 +00:00
Balázs Hasprai
95b2836dfc Add option to disable MetalLB, for use w/ ext LBs (#383)
* Add option to disable MetalLB, for use w/ ext LBs

* Add option to disable MetalLB, for use w/ ext LBs - add defaults

* Skip MetalLB with tags instead of flag
2023-10-18 22:07:07 +00:00
balazshasprai
505c2eeff2 Add option for custom registries / mirrors (#382) 2023-10-18 03:33:30 +00:00
balazshasprai
9b6d551dd6 Expand secure_path with support for Suse (#381) 2023-10-13 04:14:47 +00:00
dependabot[bot]
a64e882fb7 chore(deps): bump pre-commit-hooks from 4.4.0 to 4.5.0 (#379)
Bumps [pre-commit-hooks](https://github.com/pre-commit/pre-commit-hooks) from 4.4.0 to 4.5.0.
- [Release notes](https://github.com/pre-commit/pre-commit-hooks/releases)
- [Changelog](https://github.com/pre-commit/pre-commit-hooks/blob/main/CHANGELOG.md)
- [Commits](https://github.com/pre-commit/pre-commit-hooks/compare/v4.4.0...v4.5.0)

---
updated-dependencies:
- dependency-name: pre-commit-hooks
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-10-09 15:39:23 +00:00
johnnyrun
38e773315b sysctl tags (#373)
* sysctl tags

* lost tag

---------

Co-authored-by: Gianni <gianni@chainlabo.com>
Co-authored-by: Gianni Carabelli <gianni.carabelli@skytv.it>
2023-10-09 10:00:31 -05:00
dependabot[bot]
70ddf7b63c chore(deps): bump netaddr from 0.8.0 to 0.9.0 (#365)
Bumps [netaddr](https://github.com/drkjam/netaddr) from 0.8.0 to 0.9.0.
- [Changelog](https://github.com/netaddr/netaddr/blob/master/CHANGELOG)
- [Commits](https://github.com/drkjam/netaddr/commits)

---
updated-dependencies:
- dependency-name: netaddr
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-21 12:39:15 -05:00
dependabot[bot]
fb3128a783 chore(deps): bump ansible-core from 2.15.3 to 2.15.4 (#362)
Bumps [ansible-core](https://github.com/ansible/ansible) from 2.15.3 to 2.15.4.
- [Release notes](https://github.com/ansible/ansible/releases)
- [Commits](https://github.com/ansible/ansible/compare/v2.15.3...v2.15.4)

---
updated-dependencies:
- dependency-name: ansible-core
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-14 13:48:59 -05:00
Techno Tim
2e318e0862 feat(k3s): Updated to v1.25.12+k3s1 (#351) 2023-08-18 08:59:08 -05:00
dependabot[bot]
0607eb8aa4 chore(deps): bump ansible-core from 2.15.2 to 2.15.3 (#349)
Bumps [ansible-core](https://github.com/ansible/ansible) from 2.15.2 to 2.15.3.
- [Release notes](https://github.com/ansible/ansible/releases)
- [Commits](https://github.com/ansible/ansible/compare/v2.15.2...v2.15.3)

---
updated-dependencies:
- dependency-name: ansible-core
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-08-16 13:27:35 -05:00
Marek Pilch
a9904d1562 fixes: ERROR! The requested handler <'Reboot containers' / 'Reboot se… (#348)
* fixes: ERROR! The requested handler <'Reboot containers' / 'Reboot server' / 'Reboot>' was not found in either the main handlers list nor in the listening handlers list

* Update main.yml
2023-08-14 17:37:20 -05:00
Techno Tim
9707bc8a58 fix(docs): updated kube-vip url (#341) 2023-08-14 17:30:42 +00:00
Phil Bolduc
e635bd2626 Change reboot.sh to be executable (#344)
Co-authored-by: Techno Tim <timothystewart6@gmail.com>
2023-08-07 11:29:03 -05:00
dependabot[bot]
1aabb5a927 chore(deps): bump jsonpatch from 1.32 to 1.33 (#318) 2023-07-23 19:32:01 +00:00
Christian Berendt
215690b55b Replace hardcoded 'master' group name with 'group_name_master' variable (#337)
For improved flexibility and maintainability.

* Update tasks in node role to use 'group_name_master' variable instead
  of hardcoded 'master' group name
* Update tasks in master role to use 'group_name_master' variable instead
  of hardcoded 'master' group name
* Update tasks in post role to use 'group_name_master' variable instead of
  hardcoded 'master' group name

Signed-off-by: Christian Berendt <berendt@23technologies.cloud>
2023-07-21 16:37:57 -05:00
Simon Leiner
bd44a9b126 Remove unused variable metal_lb_frr_tag_version (#331) 2023-07-21 05:06:04 +00:00
dependabot[bot]
8d61fe81e5 chore(deps): bump pyyaml from 6.0 to 6.0.1 (#334) 2023-07-20 23:20:55 -05:00
dependabot[bot]
c0ff304f22 chore(deps): bump ansible-core from 2.14.5 to 2.15.2 (#335)
Bumps [ansible-core](https://github.com/ansible/ansible) from 2.14.5 to 2.15.2.
- [Release notes](https://github.com/ansible/ansible/releases)
- [Commits](https://github.com/ansible/ansible/compare/v2.14.5...v2.15.2)

---
updated-dependencies:
- dependency-name: ansible-core
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-07-20 21:54:40 -05:00
Techno Tim
83077ecdd1 Fix CI - python version (#338)
* fix(README): Updated docs link

* fix(ci): set PYTHON_VERSION to 3.11
2023-07-20 21:19:53 -05:00
Simon Leiner
33ae0d4970 Fix CI (#332)
* Update pre-commit actions

This was done by running "pre-commit autoupdate --freeze".

* Remove pre-commit only dependencies from requirements.in

Including them in the file would create the illusion that those were the
versions actually used in CI, but they are not. The exact versions are
determined by the pre-commit hooks which are pinned in
.pre-commit-config.yaml.

* Ansible Lint: Fix role-name[path]

* Ansible Lint: Fix name[play]

* Ansible Lint: Fix key-order[task]

* Ansible Lint: Fix jinja[spacing]

* Ansible Lint: Fix no-free-form

* Ansible Lint: Fix var-naming[no-reserved]

* Ansible Lint: Fix yaml[comments]

* Ansible Lint: Fix yaml[line-length]

* Ansible Lint: Fix name[casing]

* Ansible Lint: Fix no-changed-when

* Ansible Lint: Fix fqcn[action]

* Ansible Lint: Fix args[module]

* Improve task naming
2023-07-20 10:50:02 -05:00
Techno Tim
edd4838407 feat(k3s): Updated to v1.25 (#187)
* feat(k3s): Updated to v1.25.4+k3s1

* feat(k3s): Updated to v1.25.5+k3s1

* feat(k3s): Updated to v1.25.7+k3s1

* feat(k3s): Updated to v1.25.8+k3s1

* feat(k3s): Updated to v1.25.9+k3s1

* feat(kube-vip): Update to v0.5.12
2023-04-27 23:09:46 -05:00
dependabot[bot]
5c79ea9b71 chore(deps): bump ansible-core from 2.14.4 to 2.14.5 (#287)
Bumps [ansible-core](https://github.com/ansible/ansible) from 2.14.4 to 2.14.5.
- [Release notes](https://github.com/ansible/ansible/releases)
- [Commits](https://github.com/ansible/ansible/compare/v2.14.4...v2.14.5)

---
updated-dependencies:
- dependency-name: ansible-core
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-04-25 14:19:52 -05:00
dependabot[bot]
3d204ad851 chore(deps): bump yamllint from 1.30.0 to 1.31.0 (#284)
Bumps [yamllint](https://github.com/adrienverge/yamllint) from 1.30.0 to 1.31.0.
- [Release notes](https://github.com/adrienverge/yamllint/releases)
- [Changelog](https://github.com/adrienverge/yamllint/blob/master/CHANGELOG.rst)
- [Commits](https://github.com/adrienverge/yamllint/compare/v1.30.0...v1.31.0)

---
updated-dependencies:
- dependency-name: yamllint
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Techno Tim <timothystewart6@gmail.com>
2023-04-24 11:17:02 -05:00
dependabot[bot]
13bd868faa chore(deps): bump ansible-lint from 6.14.6 to 6.15.0 (#285)
Bumps [ansible-lint](https://github.com/ansible/ansible-lint) from 6.14.6 to 6.15.0.
- [Release notes](https://github.com/ansible/ansible-lint/releases)
- [Commits](https://github.com/ansible/ansible-lint/compare/v6.14.6...v6.15.0)

---
updated-dependencies:
- dependency-name: ansible-lint
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-04-23 23:10:28 -05:00
dependabot[bot]
c564a8562a chore(deps): bump ansible-lint from 6.14.3 to 6.14.6 (#275)
Bumps [ansible-lint](https://github.com/ansible/ansible-lint) from 6.14.3 to 6.14.6.
- [Release notes](https://github.com/ansible/ansible-lint/releases)
- [Commits](https://github.com/ansible/ansible-lint/compare/v6.14.3...v6.14.6)

---
updated-dependencies:
- dependency-name: ansible-lint
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-04-14 23:34:03 -05:00
Sam Schmit-Van Werweke
0d6d43e7ca Bump k3s version to v1.24.12+k3s1 (#269) 2023-04-02 21:31:20 -05:00
dependabot[bot]
c0952288c2 chore(deps): bump ansible-core from 2.14.3 to 2.14.4 (#265)
Bumps [ansible-core](https://github.com/ansible/ansible) from 2.14.3 to 2.14.4.
- [Release notes](https://github.com/ansible/ansible/releases)
- [Commits](https://github.com/ansible/ansible/compare/v2.14.3...v2.14.4)

---
updated-dependencies:
- dependency-name: ansible-core
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-28 15:07:16 -05:00
dependabot[bot]
1c9796e98b chore(deps): bump ansible-lint from 6.14.2 to 6.14.3 (#264)
Bumps [ansible-lint](https://github.com/ansible/ansible-lint) from 6.14.2 to 6.14.3.
- [Release notes](https://github.com/ansible/ansible-lint/releases)
- [Commits](https://github.com/ansible/ansible-lint/compare/v6.14.2...v6.14.3)

---
updated-dependencies:
- dependency-name: ansible-lint
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-27 12:18:52 -05:00
ThePCGeek
288c4089e0 Pc geek fix proxmox lxc (#263)
* (fix): correct var

var registered for rc.local check is rcfile but under when it said rclocal which was undefined. changed to rcfile to correct.

* add vars file for proxmox host group

* remove remote_user from site.yml for proxmox

* added newline to fix lint issue

* fix added ---

---------

Co-authored-by: ThePCGeek <thepcgeek1776@gmail.com>
2023-03-25 22:02:59 -05:00
ThePCGeek
49f0a2ce6b (fix): correct var (#262)
var registered for rc.local check is rcfile but under when it said rclocal which was undefined. changed to rcfile to correct.
2023-03-25 20:41:04 -05:00
dependabot[bot]
6c4621bd56 chore(deps): bump yamllint from 1.29.0 to 1.30.0 (#261)
Bumps [yamllint](https://github.com/adrienverge/yamllint) from 1.29.0 to 1.30.0.
- [Release notes](https://github.com/adrienverge/yamllint/releases)
- [Changelog](https://github.com/adrienverge/yamllint/blob/master/CHANGELOG.rst)
- [Commits](https://github.com/adrienverge/yamllint/compare/v1.29.0...v1.30.0)

---
updated-dependencies:
- dependency-name: yamllint
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-24 02:46:16 +00:00
58 changed files with 473 additions and 197 deletions

View File

@@ -10,6 +10,12 @@ on:
jobs: jobs:
lint: lint:
uses: ./.github/workflows/lint.yml uses: ./.github/workflows/lint.yml
test: test-default:
uses: ./.github/workflows/test.yml uses: ./.github/workflows/test-default.yml
needs: [lint] needs: [lint]
test-ipv6:
uses: ./.github/workflows/test-ipv6.yml
needs: [lint, test-default]
test-single-node:
uses: ./.github/workflows/test-single-node.yml
needs: [lint, test-default, test-ipv6]

View File

@@ -5,24 +5,24 @@ on:
jobs: jobs:
pre-commit-ci: pre-commit-ci:
name: Pre-Commit name: Pre-Commit
runs-on: ubuntu-latest runs-on: ubuntu-22.04
env: env:
PYTHON_VERSION: "3.10" PYTHON_VERSION: "3.11"
steps: steps:
- name: Check out the codebase - name: Check out the codebase
uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v3 2.5.0 uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # 4.1.1
with: with:
ref: ${{ github.event.pull_request.head.sha }} ref: ${{ github.event.pull_request.head.sha }}
- name: Set up Python ${{ env.PYTHON_VERSION }} - name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@75f3110429a8c05be0e1bf360334e4cced2b63fa # 2.3.3 uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # 4.7.1
with: with:
python-version: ${{ env.PYTHON_VERSION }} python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies cache: 'pip' # caching pip dependencies
- name: Cache pip - name: Cache pip
uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # 3.0.11 uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # 3.3.2
with: with:
path: ~/.cache/pip path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('./requirements.txt') }} key: ${{ runner.os }}-pip-${{ hashFiles('./requirements.txt') }}
@@ -30,7 +30,7 @@ jobs:
${{ runner.os }}-pip- ${{ runner.os }}-pip-
- name: Cache Ansible - name: Cache Ansible
uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # 3.0.11 uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # 3.3.2
with: with:
path: ~/.ansible/collections path: ~/.ansible/collections
key: ${{ runner.os }}-ansible-${{ hashFiles('collections/requirements.txt') }} key: ${{ runner.os }}-ansible-${{ hashFiles('collections/requirements.txt') }}
@@ -59,9 +59,9 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v3 2.5.0 uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # 4.1.1
- name: Ensure SHA pinned actions - name: Ensure SHA pinned actions
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@af2eb3226618e2494e3d9084f515ad6dcf16e229 # 2.0.1 uses: zgosalvez/github-actions-ensure-sha-pinned-actions@f32435541e24cd6a4700a7f52bb2ec59e80603b1 # 2.0.1
with: with:
allowlist: | allowlist: |
aws-actions/ aws-actions/

80
.github/workflows/test-default.yml vendored Normal file
View File

@@ -0,0 +1,80 @@
---
name: Molecule Default
on:
workflow_call:
jobs:
molecule:
name: Molecule
runs-on: macos-13
strategy:
matrix:
scenario:
- default
fail-fast: false
env:
PYTHON_VERSION: "3.11"
steps:
- name: Check out the codebase
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # 4.1.1
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Configure VirtualBox
run: |-
sudo mkdir -p /etc/vbox
cat <<EOF | sudo tee -a /etc/vbox/networks.conf > /dev/null
* 192.168.30.0/24
* fdad:bad:ba55::/64
EOF
- name: Cache pip
uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # 3.3.2
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('./requirements.txt') }}
restore-keys: |
${{ runner.os }}-pip-
- name: Download Vagrant boxes for all scenarios
# To save some cache space, all scenarios share the same cache key.
# On the other hand, this means that the cache contents should be
# the same across all scenarios. This step ensures that.
run: ./.github/download-boxes.sh
- name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # 4.7.1
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies
- name: Install dependencies
run: |
echo "::group::Upgrade pip"
python3 -m pip install --upgrade pip
echo "::endgroup::"
echo "::group::Install Python requirements from requirements.txt"
python3 -m pip install -r requirements.txt
echo "::endgroup::"
- name: Test with molecule
run: molecule test --scenario-name ${{ matrix.scenario }}
timeout-minutes: 90
env:
ANSIBLE_K3S_LOG_DIR: ${{ runner.temp }}/logs/k3s-ansible/${{ matrix.scenario }}
ANSIBLE_SSH_RETRIES: 4
ANSIBLE_TIMEOUT: 60
PY_COLORS: 1
ANSIBLE_FORCE_COLOR: 1
- name: Upload log files
if: always() # do this even if a step before has failed
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # 3.1.3
with:
name: logs
path: |
${{ runner.temp }}/logs
- name: Delete old box versions
if: always() # do this even if a step before has failed
run: vagrant box prune --force

80
.github/workflows/test-ipv6.yml vendored Normal file
View File

@@ -0,0 +1,80 @@
---
name: Molecule IPv6
on:
workflow_call:
jobs:
molecule:
name: Molecule
runs-on: macos-13
strategy:
matrix:
scenario:
- ipv6
fail-fast: false
env:
PYTHON_VERSION: "3.11"
steps:
- name: Check out the codebase
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # 4.1.1
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Configure VirtualBox
run: |-
sudo mkdir -p /etc/vbox
cat <<EOF | sudo tee -a /etc/vbox/networks.conf > /dev/null
* 192.168.30.0/24
* fdad:bad:ba55::/64
EOF
- name: Cache pip
uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # 3.3.2
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('./requirements.txt') }}
restore-keys: |
${{ runner.os }}-pip-
- name: Download Vagrant boxes for all scenarios
# To save some cache space, all scenarios share the same cache key.
# On the other hand, this means that the cache contents should be
# the same across all scenarios. This step ensures that.
run: ./.github/download-boxes.sh
- name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # 4.7.1
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies
- name: Install dependencies
run: |
echo "::group::Upgrade pip"
python3 -m pip install --upgrade pip
echo "::endgroup::"
echo "::group::Install Python requirements from requirements.txt"
python3 -m pip install -r requirements.txt
echo "::endgroup::"
- name: Test with molecule
run: molecule test --scenario-name ${{ matrix.scenario }}
timeout-minutes: 90
env:
ANSIBLE_K3S_LOG_DIR: ${{ runner.temp }}/logs/k3s-ansible/${{ matrix.scenario }}
ANSIBLE_SSH_RETRIES: 4
ANSIBLE_TIMEOUT: 60
PY_COLORS: 1
ANSIBLE_FORCE_COLOR: 1
- name: Upload log files
if: always() # do this even if a step before has failed
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # 3.1.3
with:
name: logs
path: |
${{ runner.temp }}/logs
- name: Delete old box versions
if: always() # do this even if a step before has failed
run: vagrant box prune --force

View File

@@ -1,24 +1,21 @@
--- ---
name: Test name: Molecule Single Node
on: on:
workflow_call: workflow_call:
jobs: jobs:
molecule: molecule:
name: Molecule name: Molecule
runs-on: macos-12 runs-on: macos-13
strategy: strategy:
matrix: matrix:
scenario: scenario:
- default
- ipv6
- single_node - single_node
fail-fast: false fail-fast: false
env: env:
PYTHON_VERSION: "3.10" PYTHON_VERSION: "3.11"
steps: steps:
- name: Check out the codebase - name: Check out the codebase
uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v3 2.5.0 uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # 4.1.1
with: with:
ref: ${{ github.event.pull_request.head.sha }} ref: ${{ github.event.pull_request.head.sha }}
@@ -31,22 +28,13 @@ jobs:
EOF EOF
- name: Cache pip - name: Cache pip
uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # 3.0.11 uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # 3.3.2
with: with:
path: ~/.cache/pip path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('./requirements.txt') }} key: ${{ runner.os }}-pip-${{ hashFiles('./requirements.txt') }}
restore-keys: | restore-keys: |
${{ runner.os }}-pip- ${{ runner.os }}-pip-
- name: Cache Vagrant boxes
uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # 3.0.11
with:
path: |
~/.vagrant.d/boxes
key: vagrant-boxes-${{ hashFiles('**/molecule.yml') }}
restore-keys: |
vagrant-boxes
- name: Download Vagrant boxes for all scenarios - name: Download Vagrant boxes for all scenarios
# To save some cache space, all scenarios share the same cache key. # To save some cache space, all scenarios share the same cache key.
# On the other hand, this means that the cache contents should be # On the other hand, this means that the cache contents should be
@@ -54,7 +42,7 @@ jobs:
run: ./.github/download-boxes.sh run: ./.github/download-boxes.sh
- name: Set up Python ${{ env.PYTHON_VERSION }} - name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@75f3110429a8c05be0e1bf360334e4cced2b63fa # 2.3.3 uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # 4.7.1
with: with:
python-version: ${{ env.PYTHON_VERSION }} python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies cache: 'pip' # caching pip dependencies
@@ -81,7 +69,7 @@ jobs:
- name: Upload log files - name: Upload log files
if: always() # do this even if a step before has failed if: always() # do this even if a step before has failed
uses: actions/upload-artifact@83fd05a356d7e2593de66fc9913b3002723633cb # 3.1.1 uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # 3.1.3
with: with:
name: logs name: logs
path: | path: |

View File

@@ -1,7 +1,7 @@
--- ---
repos: repos:
- repo: https://github.com/pre-commit/pre-commit-hooks - repo: https://github.com/pre-commit/pre-commit-hooks
rev: 3298ddab3c13dd77d6ce1fc0baf97691430d84b0 # v4.3.0 rev: f71fa2c1f9cf5cb705f73dffe4b21f7c61470ba9 # frozen: v4.4.0
hooks: hooks:
- id: requirements-txt-fixer - id: requirements-txt-fixer
- id: sort-simple-yaml - id: sort-simple-yaml
@@ -12,24 +12,24 @@ repos:
- id: trailing-whitespace - id: trailing-whitespace
args: [--markdown-linebreak-ext=md] args: [--markdown-linebreak-ext=md]
- repo: https://github.com/adrienverge/yamllint.git - repo: https://github.com/adrienverge/yamllint.git
rev: 9cce2940414e9560ae4c8518ddaee2ac1863a4d2 # v1.28.0 rev: b05e028c5881819161d11cb543fd96a30c06cceb # frozen: v1.32.0
hooks: hooks:
- id: yamllint - id: yamllint
args: [-c=.yamllint] args: [-c=.yamllint]
- repo: https://github.com/ansible-community/ansible-lint.git - repo: https://github.com/ansible-community/ansible-lint.git
rev: a058554b9bcf88f12ad09ab9fb93b267a214368f # v6.8.6 rev: 3293b64b939c0de16ef8cb81dd49255e475bf89a # frozen: v6.17.2
hooks: hooks:
- id: ansible-lint - id: ansible-lint
- repo: https://github.com/shellcheck-py/shellcheck-py - repo: https://github.com/shellcheck-py/shellcheck-py
rev: 4c7c3dd7161ef39e984cb295e93a968236dc8e8a # v0.8.0.4 rev: 375289a39f5708101b1f916eb729e8d6da96993f # frozen: v0.9.0.5
hooks: hooks:
- id: shellcheck - id: shellcheck
- repo: https://github.com/Lucas-C/pre-commit-hooks - repo: https://github.com/Lucas-C/pre-commit-hooks
rev: 04618e68aa2380828a36a23ff5f65a06ae8f59b9 # v1.3.1 rev: 12885e376b93dc4536ad68d156065601e4433665 # frozen: v1.5.1
hooks: hooks:
- id: remove-crlf - id: remove-crlf
- id: remove-tabs - id: remove-tabs
- repo: https://github.com/sirosen/texthooks - repo: https://github.com/sirosen/texthooks
rev: 30d9af95631de0d7cff4e282bde9160d38bb0359 # 0.4.0 rev: c4ffd3e31669dd4fa4d31a23436cc13839730084 # frozen: 0.5.0
hooks: hooks:
- id: fix-smartquotes - id: fix-smartquotes

View File

@@ -4,11 +4,11 @@
This playbook will build an HA Kubernetes cluster with `k3s`, `kube-vip` and MetalLB via `ansible`. This playbook will build an HA Kubernetes cluster with `k3s`, `kube-vip` and MetalLB via `ansible`.
This is based on the work from [this fork](https://github.com/212850a/k3s-ansible) which is based on the work from [k3s-io/k3s-ansible](https://github.com/k3s-io/k3s-ansible). It uses [kube-vip](https://kube-vip.chipzoller.dev/) to create a load balancer for control plane, and [metal-lb](https://metallb.universe.tf/installation/) for its service `LoadBalancer`. This is based on the work from [this fork](https://github.com/212850a/k3s-ansible) which is based on the work from [k3s-io/k3s-ansible](https://github.com/k3s-io/k3s-ansible). It uses [kube-vip](https://kube-vip.io/) to create a load balancer for control plane, and [metal-lb](https://metallb.universe.tf/installation/) for its service `LoadBalancer`.
If you want more context on how this works, see: If you want more context on how this works, see:
📄 [Documentation](https://docs.technotim.live/posts/k3s-etcd-ansible/) (including example commands) 📄 [Documentation](https://technotim.live/posts/k3s-etcd-ansible/) (including example commands)
📺 [Watch the Video](https://www.youtube.com/watch?v=CbkEWcUZ7zM) 📺 [Watch the Video](https://www.youtube.com/watch?v=CbkEWcUZ7zM)
@@ -28,7 +28,7 @@ on processor architecture:
## ✅ System requirements ## ✅ System requirements
- Control Node (the machine you are running `ansible` commands) must have Ansible 2.11+ If you need a quick primer on Ansible [you can check out my docs and setting up Ansible](https://docs.technotim.live/posts/ansible-automation/). - Control Node (the machine you are running `ansible` commands) must have Ansible 2.11+ If you need a quick primer on Ansible [you can check out my docs and setting up Ansible](https://technotim.live/posts/ansible-automation/).
- You will also need to install collections that this playbook uses by running `ansible-galaxy collection install -r ./collections/requirements.yml` (important❗) - You will also need to install collections that this playbook uses by running `ansible-galaxy collection install -r ./collections/requirements.yml` (important❗)
@@ -101,7 +101,7 @@ scp debian@master_ip:~/.kube/config ~/.kube/config
### 🔨 Testing your cluster ### 🔨 Testing your cluster
See the commands [here](https://docs.technotim.live/posts/k3s-etcd-ansible/#testing-your-cluster). See the commands [here](https://technotim.live/posts/k3s-etcd-ansible/#testing-your-cluster).
### Troubleshooting ### Troubleshooting

View File

@@ -1,5 +1,5 @@
--- ---
k3s_version: v1.24.11+k3s1 k3s_version: v1.25.12+k3s1
# this is the user that has ssh access to these machines # this is the user that has ssh access to these machines
ansible_user: ansibleuser ansible_user: ansibleuser
systemd_dir: /etc/systemd/system systemd_dir: /etc/systemd/system
@@ -41,7 +41,7 @@ extra_agent_args: >-
{{ extra_args }} {{ extra_args }}
# image tag for kube-vip # image tag for kube-vip
kube_vip_tag_version: "v0.5.11" kube_vip_tag_version: "v0.5.12"
# metallb type frr or native # metallb type frr or native
metal_lb_type: "native" metal_lb_type: "native"
@@ -55,7 +55,6 @@ metal_lb_mode: "layer2"
# metal_lb_bgp_peer_address: "192.168.30.1" # metal_lb_bgp_peer_address: "192.168.30.1"
# image tag for metal lb # image tag for metal lb
metal_lb_frr_tag_version: "v7.5.1"
metal_lb_speaker_tag_version: "v0.13.9" metal_lb_speaker_tag_version: "v0.13.9"
metal_lb_controller_tag_version: "v0.13.9" metal_lb_controller_tag_version: "v0.13.9"
@@ -82,3 +81,49 @@ proxmox_lxc_ct_ids:
- 202 - 202
- 203 - 203
- 204 - 204
# Only enable this if you have set up your own container registry to act as a mirror / pull-through cache
# (harbor / nexus / docker's official registry / etc).
# Can be beneficial for larger dev/test environments (for example if you're getting rate limited by docker hub),
# or air-gapped environments where your nodes don't have internet access after the initial setup
# (which is still needed for downloading the k3s binary and such).
# k3s's documentation about private registries here: https://docs.k3s.io/installation/private-registry
custom_registries: false
# The registries can be authenticated or anonymous, depending on your registry server configuration.
# If they allow anonymous access, simply remove the following bit from custom_registries_yaml
# configs:
# "registry.domain.com":
# auth:
# username: yourusername
# password: yourpassword
# The following is an example that pulls all images used in this playbook through your private registries.
# It also allows you to pull your own images from your private registry, without having to use imagePullSecrets
# in your deployments.
# If all you need is your own images and you don't care about caching the docker/quay/ghcr.io images,
# you can just remove those from the mirrors: section.
custom_registries_yaml: |
mirrors:
docker.io:
endpoint:
- "https://registry.domain.com/v2/dockerhub"
quay.io:
endpoint:
- "https://registry.domain.com/v2/quayio"
ghcr.io:
endpoint:
- "https://registry.domain.com/v2/ghcrio"
registry.domain.com:
endpoint:
- "https://registry.domain.com"
configs:
"registry.domain.com":
auth:
username: yourusername
password: yourpassword
# Only enable and configure these if you access the internet through a proxy
# proxy_env:
# HTTP_PROXY: "http://proxy.domain.local:3128"
# HTTPS_PROXY: "http://proxy.domain.local:3128"
# NO_PROXY: "*.domain.local,127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16"

View File

@@ -0,0 +1,2 @@
---
ansible_user: '{{ proxmox_lxc_ssh_user }}'

View File

@@ -4,7 +4,8 @@
tasks: tasks:
- name: Override host variables - name: Override host variables
ansible.builtin.set_fact: ansible.builtin.set_fact:
# See: https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant # noqa yaml[line-length] # See:
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
flannel_iface: eth1 flannel_iface: eth1
# The test VMs might be a bit slow, so we give them more time to join the cluster: # The test VMs might be a bit slow, so we give them more time to join the cluster:

View File

@@ -4,7 +4,8 @@
tasks: tasks:
- name: Override host variables (1/2) - name: Override host variables (1/2)
ansible.builtin.set_fact: ansible.builtin.set_fact:
# See: https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant # noqa yaml[line-length] # See:
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
flannel_iface: eth1 flannel_iface: eth1
# In this scenario, we have multiple interfaces that the VIP could be # In this scenario, we have multiple interfaces that the VIP could be

View File

@@ -2,4 +2,4 @@
- name: Verify - name: Verify
hosts: all hosts: all
roles: roles:
- verify/from_outside - verify_from_outside

View File

@@ -6,4 +6,4 @@ outside_host: localhost
testing_namespace: molecule-verify-from-outside testing_namespace: molecule-verify-from-outside
# The directory in which the example manifests reside # The directory in which the example manifests reside
example_manifests_path: ../../../../example example_manifests_path: ../../../example

View File

@@ -34,14 +34,14 @@
- name: Assert that the nginx welcome page is available - name: Assert that the nginx welcome page is available
ansible.builtin.uri: ansible.builtin.uri:
url: http://{{ ip | ansible.utils.ipwrap }}:{{ port }}/ url: http://{{ ip | ansible.utils.ipwrap }}:{{ port_ }}/
return_content: yes return_content: yes
register: result register: result
failed_when: "'Welcome to nginx!' not in result.content" failed_when: "'Welcome to nginx!' not in result.content"
vars: vars:
ip: >- ip: >-
{{ nginx_services.resources[0].status.loadBalancer.ingress[0].ip }} {{ nginx_services.resources[0].status.loadBalancer.ingress[0].ip }}
port: >- port_: >-
{{ nginx_services.resources[0].spec.ports[0].port }} {{ nginx_services.resources[0].spec.ports[0].port }}
# Deactivated linter rules: # Deactivated linter rules:
# - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap # - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap

View File

@@ -4,7 +4,8 @@
tasks: tasks:
- name: Override host variables - name: Override host variables
ansible.builtin.set_fact: ansible.builtin.set_fact:
# See: https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant # noqa yaml[line-length] # See:
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
flannel_iface: eth1 flannel_iface: eth1
# The test VMs might be a bit slow, so we give them more time to join the cluster: # The test VMs might be a bit slow, so we give them more time to join the cluster:

0
reboot.sh Normal file → Executable file
View File

View File

@@ -1,5 +1,4 @@
ansible-core>=2.13.5 ansible-core>=2.13.5
ansible-lint>=6.8.6
jmespath>=1.0.1 jmespath>=1.0.1
jsonpatch>=1.32 jsonpatch>=1.32
kubernetes>=25.3.0 kubernetes>=25.3.0
@@ -9,4 +8,3 @@ netaddr>=0.8.0
pre-commit>=2.20.0 pre-commit>=2.20.0
pre-commit-hooks>=1.3.1 pre-commit-hooks>=1.3.1
pyyaml>=6.0 pyyaml>=6.0
yamllint>=1.28.0

View File

@@ -1,28 +1,21 @@
# #
# This file is autogenerated by pip-compile with python 3.8 # This file is autogenerated by pip-compile with Python 3.11
# To update, run: # by the following command:
# #
# pip-compile requirements.in # pip-compile requirements.in
# #
ansible-compat==3.0.1 ansible-compat==3.0.1
# via molecule # via molecule
ansible-core==2.14.3 ansible-core==2.15.4
# via # via
# -r requirements.in # -r requirements.in
# ansible-compat # ansible-compat
# ansible-lint
ansible-lint==6.14.2
# via -r requirements.in
arrow==1.2.3 arrow==1.2.3
# via jinja2-time # via jinja2-time
attrs==22.1.0 attrs==22.1.0
# via jsonschema # via jsonschema
binaryornot==0.4.4 binaryornot==0.4.4
# via cookiecutter # via cookiecutter
black==22.10.0
# via ansible-lint
bracex==2.3.post1
# via wcmatch
cachetools==5.2.0 cachetools==5.2.0
# via google-auth # via google-auth
certifi==2022.9.24 certifi==2022.9.24
@@ -39,7 +32,6 @@ charset-normalizer==2.1.1
# via requests # via requests
click==8.1.3 click==8.1.3
# via # via
# black
# click-help-colors # click-help-colors
# cookiecutter # cookiecutter
# molecule # molecule
@@ -58,9 +50,7 @@ distro==1.8.0
enrich==1.2.7 enrich==1.2.7
# via molecule # via molecule
filelock==3.8.0 filelock==3.8.0
# via # via virtualenv
# ansible-lint
# virtualenv
google-auth==2.14.0 google-auth==2.14.0
# via kubernetes # via kubernetes
identify==2.5.8 identify==2.5.8
@@ -78,14 +68,13 @@ jinja2-time==0.2.0
# via cookiecutter # via cookiecutter
jmespath==1.0.1 jmespath==1.0.1
# via -r requirements.in # via -r requirements.in
jsonpatch==1.32 jsonpatch==1.33
# via -r requirements.in # via -r requirements.in
jsonpointer==2.3 jsonpointer==2.3
# via jsonpatch # via jsonpatch
jsonschema==4.17.0 jsonschema==4.17.0
# via # via
# ansible-compat # ansible-compat
# ansible-lint
# molecule # molecule
kubernetes==25.3.0 kubernetes==25.3.0
# via -r requirements.in # via -r requirements.in
@@ -97,9 +86,7 @@ molecule==4.0.4
# molecule-vagrant # molecule-vagrant
molecule-vagrant==1.0.0 molecule-vagrant==1.0.0
# via -r requirements.in # via -r requirements.in
mypy-extensions==0.4.3 netaddr==0.9.0
# via black
netaddr==0.8.0
# via -r requirements.in # via -r requirements.in
nodeenv==1.7.0 nodeenv==1.7.0
# via pre-commit # via pre-commit
@@ -109,21 +96,14 @@ packaging==21.3
# via # via
# ansible-compat # ansible-compat
# ansible-core # ansible-core
# ansible-lint
# molecule # molecule
pathspec==0.10.1
# via
# black
# yamllint
platformdirs==2.5.2 platformdirs==2.5.2
# via # via virtualenv
# black
# virtualenv
pluggy==1.0.0 pluggy==1.0.0
# via molecule # via molecule
pre-commit==2.21.0 pre-commit==2.21.0
# via -r requirements.in # via -r requirements.in
pre-commit-hooks==4.4.0 pre-commit-hooks==4.5.0
# via -r requirements.in # via -r requirements.in
pyasn1==0.4.8 pyasn1==0.4.8
# via # via
@@ -147,18 +127,16 @@ python-slugify==6.1.2
# via cookiecutter # via cookiecutter
python-vagrant==1.0.0 python-vagrant==1.0.0
# via molecule-vagrant # via molecule-vagrant
pyyaml==6.0 pyyaml==6.0.1
# via # via
# -r requirements.in # -r requirements.in
# ansible-compat # ansible-compat
# ansible-core # ansible-core
# ansible-lint
# cookiecutter # cookiecutter
# kubernetes # kubernetes
# molecule # molecule
# molecule-vagrant # molecule-vagrant
# pre-commit # pre-commit
# yamllint
requests==2.28.1 requests==2.28.1
# via # via
# cookiecutter # cookiecutter
@@ -170,15 +148,12 @@ resolvelib==0.8.1
# via ansible-core # via ansible-core
rich==12.6.0 rich==12.6.0
# via # via
# ansible-lint
# enrich # enrich
# molecule # molecule
rsa==4.9 rsa==4.9
# via google-auth # via google-auth
ruamel-yaml==0.17.21 ruamel-yaml==0.17.21
# via # via pre-commit-hooks
# ansible-lint
# pre-commit-hooks
selinux==0.2.1 selinux==0.2.1
# via molecule-vagrant # via molecule-vagrant
six==1.16.0 six==1.16.0
@@ -187,9 +162,7 @@ six==1.16.0
# kubernetes # kubernetes
# python-dateutil # python-dateutil
subprocess-tee==0.4.1 subprocess-tee==0.4.1
# via # via ansible-compat
# ansible-compat
# ansible-lint
text-unidecode==1.3 text-unidecode==1.3
# via python-slugify # via python-slugify
urllib3==1.26.12 urllib3==1.26.12
@@ -198,14 +171,8 @@ urllib3==1.26.12
# requests # requests
virtualenv==20.16.6 virtualenv==20.16.6
# via pre-commit # via pre-commit
wcmatch==8.4.1
# via ansible-lint
websocket-client==1.4.2 websocket-client==1.4.2
# via kubernetes # via kubernetes
yamllint==1.29.0
# via
# -r requirements.in
# ansible-lint
# The following packages are considered to be unsafe in a requirements file: # The following packages are considered to be unsafe in a requirements file:
# setuptools # setuptools

View File

@@ -1,6 +1,6 @@
--- ---
- name: Reset k3s cluster
- hosts: k3s_cluster hosts: k3s_cluster
gather_facts: yes gather_facts: yes
roles: roles:
- role: reset - role: reset
@@ -14,7 +14,8 @@
reboot: reboot:
reboot_timeout: 3600 reboot_timeout: 3600
- hosts: proxmox - name: Revert changes to Proxmox cluster
hosts: proxmox
gather_facts: true gather_facts: true
become: yes become: yes
remote_user: "{{ proxmox_lxc_ssh_user }}" remote_user: "{{ proxmox_lxc_ssh_user }}"

View File

@@ -1,16 +0,0 @@
---
# If you want to explicitly define an interface that ALL control nodes
# should use to propagate the VIP, define it here. Otherwise, kube-vip
# will determine the right interface automatically at runtime.
kube_vip_iface: null
server_init_args: >-
{% if groups['master'] | length > 1 %}
{% if ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] %}
--cluster-init
{% else %}
--server https://{{ hostvars[groups['master'][0]].k3s_node_ip | split(",") | first | ansible.utils.ipwrap }}:6443
{% endif %}
--token {{ k3s_token }}
{% endif %}
{{ extra_server_args | default('') }}

View File

@@ -0,0 +1,3 @@
---
# Name of the master group
group_name_master: master

View File

@@ -0,0 +1,18 @@
---
- name: Create k3s.service.d directory
file:
path: '{{ systemd_dir }}/k3s.service.d'
state: directory
owner: root
group: root
mode: '0755'
- name: Copy K3s http_proxy conf file
template:
src: "http_proxy.conf.j2"
dest: "{{ systemd_dir }}/k3s.service.d/http_proxy.conf"
owner: root
group: root
mode: '0755'

View File

@@ -1,5 +1,9 @@
--- ---
- name: Deploy K3s http_proxy conf
include_tasks: http_proxy.yml
when: proxy_env is defined
- name: Copy K3s service file - name: Copy K3s service file
template: template:
src: "k3s.service.j2" src: "k3s.service.j2"

View File

@@ -0,0 +1,4 @@
[Service]
Environment=HTTP_PROXY={{ proxy_env.HTTP_PROXY }}
Environment=HTTPS_PROXY={{ proxy_env.HTTPS_PROXY }}
Environment=NO_PROXY={{ proxy_env.NO_PROXY }}

View File

@@ -7,7 +7,7 @@ After=network-online.target
Type=notify Type=notify
ExecStartPre=-/sbin/modprobe br_netfilter ExecStartPre=-/sbin/modprobe br_netfilter
ExecStartPre=-/sbin/modprobe overlay ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/local/bin/k3s agent --server https://{{ apiserver_endpoint | ansible.utils.ipwrap }}:6443 --token {{ hostvars[groups['master'][0]]['token'] | default(k3s_token) }} {{ extra_agent_args | default("") }} ExecStart=/usr/local/bin/k3s agent --server https://{{ apiserver_endpoint | ansible.utils.ipwrap }}:6443 --token {{ hostvars[groups[group_name_master | default('master')][0]]['token'] | default(k3s_token) }} {{ extra_agent_args | default("") }}
KillMode=process KillMode=process
Delegate=yes Delegate=yes
# Having non-zero Limit*s causes performance problems due to accounting overhead # Having non-zero Limit*s causes performance problems due to accounting overhead

View File

@@ -0,0 +1,6 @@
---
# Indicates whether custom registries for k3s should be configured
# Possible values:
# - present
# - absent
state: present

View File

@@ -0,0 +1,17 @@
---
- name: Create directory /etc/rancher/k3s
file:
path: "/etc/{{ item }}"
state: directory
mode: '0755'
loop:
- rancher
- rancher/k3s
- name: Insert registries into /etc/rancher/k3s/registries.yaml
blockinfile:
path: /etc/rancher/k3s/registries.yaml
block: "{{ custom_registries_yaml }}"
mode: '0600'
create: true

View File

@@ -0,0 +1,20 @@
---
# If you want to explicitly define an interface that ALL control nodes
# should use to propagate the VIP, define it here. Otherwise, kube-vip
# will determine the right interface automatically at runtime.
kube_vip_iface: null
# Name of the master group
group_name_master: master
# yamllint disable rule:line-length
server_init_args: >-
{% if groups[group_name_master | default('master')] | length > 1 %}
{% if ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] %}
--cluster-init
{% else %}
--server https://{{ hostvars[groups[group_name_master | default('master')][0]].k3s_node_ip | split(",") | first | ansible.utils.ipwrap }}:6443
{% endif %}
--token {{ k3s_token }}
{% endif %}
{{ extra_server_args | default('') }}

View File

@@ -0,0 +1,18 @@
---
- name: Create k3s.service.d directory
file:
path: '{{ systemd_dir }}/k3s.service.d'
state: directory
owner: root
group: root
mode: '0755'
- name: Copy K3s http_proxy conf file
template:
src: "http_proxy.conf.j2"
dest: "{{ systemd_dir }}/k3s.service.d/http_proxy.conf"
owner: root
group: root
mode: '0755'

View File

@@ -1,23 +1,27 @@
--- ---
- name: Clean previous runs of k3s-init - name: Stop k3s-init
systemd: systemd:
name: k3s-init name: k3s-init
state: stopped state: stopped
failed_when: false failed_when: false
- name: Clean previous runs of k3s-init - name: Clean previous runs of k3s-init # noqa command-instead-of-module
# The systemd module does not support "reset-failed", so we need to resort to command.
command: systemctl reset-failed k3s-init command: systemctl reset-failed k3s-init
failed_when: false failed_when: false
changed_when: false changed_when: false
args:
warn: false # The ansible systemd module does not support reset-failed - name: Deploy K3s http_proxy conf
include_tasks: http_proxy.yml
when: proxy_env is defined
- name: Deploy vip manifest - name: Deploy vip manifest
include_tasks: vip.yml include_tasks: vip.yml
- name: Deploy metallb manifest - name: Deploy metallb manifest
include_tasks: metallb.yml include_tasks: metallb.yml
tags: metallb
- name: Init cluster inside the transient k3s-init service - name: Init cluster inside the transient k3s-init service
command: command:
@@ -28,12 +32,13 @@
creates: "{{ systemd_dir }}/k3s.service" creates: "{{ systemd_dir }}/k3s.service"
- name: Verification - name: Verification
when: not ansible_check_mode
block: block:
- name: Verify that all nodes actually joined (check k3s-init.service if this fails) - name: Verify that all nodes actually joined (check k3s-init.service if this fails)
command: command:
cmd: k3s kubectl get nodes -l "node-role.kubernetes.io/master=true" -o=jsonpath="{.items[*].metadata.name}" cmd: k3s kubectl get nodes -l "node-role.kubernetes.io/master=true" -o=jsonpath="{.items[*].metadata.name}"
register: nodes register: nodes
until: nodes.rc == 0 and (nodes.stdout.split() | length) == (groups['master'] | length) until: nodes.rc == 0 and (nodes.stdout.split() | length) == (groups[group_name_master | default('master')] | length) # yamllint disable-line rule:line-length
retries: "{{ retry_count | default(20) }}" retries: "{{ retry_count | default(20) }}"
delay: 10 delay: 10
changed_when: false changed_when: false
@@ -49,7 +54,6 @@
name: k3s-init name: k3s-init
state: stopped state: stopped
failed_when: false failed_when: false
when: not ansible_check_mode
- name: Copy K3s service file - name: Copy K3s service file
register: k3s_service register: k3s_service

View File

@@ -6,16 +6,16 @@
owner: root owner: root
group: root group: root
mode: 0644 mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
- name: "Download to first master: manifest for metallb-{{ metal_lb_type }}" - name: "Download to first master: manifest for metallb-{{ metal_lb_type }}"
ansible.builtin.get_url: ansible.builtin.get_url:
url: "https://raw.githubusercontent.com/metallb/metallb/{{ metal_lb_controller_tag_version }}/config/manifests/metallb-{{metal_lb_type}}.yaml" # noqa yaml[line-length] url: "https://raw.githubusercontent.com/metallb/metallb/{{ metal_lb_controller_tag_version }}/config/manifests/metallb-{{ metal_lb_type }}.yaml" # noqa yaml[line-length]
dest: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml" dest: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml"
owner: root owner: root
group: root group: root
mode: 0644 mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
- name: Set image versions in manifest for metallb-{{ metal_lb_type }} - name: Set image versions in manifest for metallb-{{ metal_lb_type }}
ansible.builtin.replace: ansible.builtin.replace:
@@ -27,4 +27,4 @@
to: "metallb/speaker:{{ metal_lb_speaker_tag_version }}" to: "metallb/speaker:{{ metal_lb_speaker_tag_version }}"
loop_control: loop_control:
label: "{{ item.change }} => {{ item.to }}" label: "{{ item.change }} => {{ item.to }}"
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']

View File

@@ -6,7 +6,7 @@
owner: root owner: root
group: root group: root
mode: 0644 mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
- name: Download vip rbac manifest to first master - name: Download vip rbac manifest to first master
ansible.builtin.get_url: ansible.builtin.get_url:
@@ -15,7 +15,7 @@
owner: root owner: root
group: root group: root
mode: 0644 mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
- name: Copy vip manifest to first master - name: Copy vip manifest to first master
template: template:
@@ -24,4 +24,4 @@
owner: root owner: root
group: root group: root
mode: 0644 mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']

View File

@@ -0,0 +1,4 @@
[Service]
Environment=HTTP_PROXY={{ proxy_env.HTTP_PROXY }}
Environment=HTTPS_PROXY={{ proxy_env.HTTPS_PROXY }}
Environment=NO_PROXY={{ proxy_env.NO_PROXY }}

View File

@@ -1,3 +1,6 @@
--- ---
# Timeout to wait for MetalLB services to come up # Timeout to wait for MetalLB services to come up
metal_lb_available_timeout: 120s metal_lb_available_timeout: 120s
# Name of the master group
group_name_master: master

View File

@@ -1,6 +1,7 @@
--- ---
- name: Deploy metallb pool - name: Deploy metallb pool
include_tasks: metallb.yml include_tasks: metallb.yml
tags: metallb
- name: Remove tmp directory used for manifests - name: Remove tmp directory used for manifests
file: file:

View File

@@ -5,7 +5,7 @@
state: directory state: directory
owner: "{{ ansible_user_id }}" owner: "{{ ansible_user_id }}"
mode: 0755 mode: 0755
with_items: "{{ groups['master'] }}" with_items: "{{ groups[group_name_master | default('master')] }}"
run_once: true run_once: true
- name: Copy metallb CRs manifest to first master - name: Copy metallb CRs manifest to first master
@@ -14,14 +14,14 @@
dest: "/tmp/k3s/metallb-crs.yaml" dest: "/tmp/k3s/metallb-crs.yaml"
owner: "{{ ansible_user_id }}" owner: "{{ ansible_user_id }}"
mode: 0755 mode: 0755
with_items: "{{ groups['master'] }}" with_items: "{{ groups[group_name_master | default('master')] }}"
run_once: true run_once: true
- name: Test metallb-system namespace - name: Test metallb-system namespace
command: >- command: >-
k3s kubectl -n metallb-system k3s kubectl -n metallb-system
changed_when: false changed_when: false
with_items: "{{ groups['master'] }}" with_items: "{{ groups[group_name_master | default('master')] }}"
run_once: true run_once: true
- name: Wait for MetalLB resources - name: Wait for MetalLB resources
@@ -66,7 +66,7 @@
command: >- command: >-
k3s kubectl -n metallb-system get endpoints webhook-service k3s kubectl -n metallb-system get endpoints webhook-service
changed_when: false changed_when: false
with_items: "{{ groups['master'] }}" with_items: "{{ groups[group_name_master | default('master')] }}"
run_once: true run_once: true
- name: Apply metallb CRs - name: Apply metallb CRs

View File

@@ -1,4 +1,5 @@
--- ---
- name: reboot server - name: Reboot server
become: true become: true
reboot: reboot:
listen: reboot server

View File

@@ -0,0 +1,4 @@
---
secure_path:
RedHat: '/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/bin'
Suse: '/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/bin'

View File

@@ -1,34 +1,37 @@
--- ---
- name: Set same timezone on every Server - name: Set same timezone on every Server
timezone: community.general.timezone:
name: "{{ system_timezone }}" name: "{{ system_timezone }}"
when: (system_timezone is defined) and (system_timezone != "Your/Timezone") when: (system_timezone is defined) and (system_timezone != "Your/Timezone")
- name: Set SELinux to disabled state - name: Set SELinux to disabled state
selinux: ansible.posix.selinux:
state: disabled state: disabled
when: ansible_os_family == "RedHat" when: ansible_os_family == "RedHat"
- name: Enable IPv4 forwarding - name: Enable IPv4 forwarding
sysctl: ansible.posix.sysctl:
name: net.ipv4.ip_forward name: net.ipv4.ip_forward
value: "1" value: "1"
state: present state: present
reload: yes reload: yes
tags: sysctl
- name: Enable IPv6 forwarding - name: Enable IPv6 forwarding
sysctl: ansible.posix.sysctl:
name: net.ipv6.conf.all.forwarding name: net.ipv6.conf.all.forwarding
value: "1" value: "1"
state: present state: present
reload: yes reload: yes
tags: sysctl
- name: Enable IPv6 router advertisements - name: Enable IPv6 router advertisements
sysctl: ansible.posix.sysctl:
name: net.ipv6.conf.all.accept_ra name: net.ipv6.conf.all.accept_ra
value: "2" value: "2"
state: present state: present
reload: yes reload: yes
tags: sysctl
- name: Add br_netfilter to /etc/modules-load.d/ - name: Add br_netfilter to /etc/modules-load.d/
copy: copy:
@@ -38,13 +41,13 @@
when: ansible_os_family == "RedHat" when: ansible_os_family == "RedHat"
- name: Load br_netfilter - name: Load br_netfilter
modprobe: community.general.modprobe:
name: br_netfilter name: br_netfilter
state: present state: present
when: ansible_os_family == "RedHat" when: ansible_os_family == "RedHat"
- name: Set bridge-nf-call-iptables (just to be sure) - name: Set bridge-nf-call-iptables (just to be sure)
sysctl: ansible.posix.sysctl:
name: "{{ item }}" name: "{{ item }}"
value: "1" value: "1"
state: present state: present
@@ -53,13 +56,14 @@
loop: loop:
- net.bridge.bridge-nf-call-iptables - net.bridge.bridge-nf-call-iptables
- net.bridge.bridge-nf-call-ip6tables - net.bridge.bridge-nf-call-ip6tables
tags: sysctl
- name: Add /usr/local/bin to sudo secure_path - name: Add /usr/local/bin to sudo secure_path
lineinfile: lineinfile:
line: 'Defaults secure_path = /sbin:/bin:/usr/sbin:/usr/bin:/usr/local/bin' line: 'Defaults secure_path = {{ secure_path[ansible_os_family] }}'
regexp: "Defaults(\\s)*secure_path(\\s)*=" regexp: "Defaults(\\s)*secure_path(\\s)*="
state: present state: present
insertafter: EOF insertafter: EOF
path: /etc/sudoers path: /etc/sudoers
validate: 'visudo -cf %s' validate: 'visudo -cf %s'
when: ansible_os_family == "RedHat" when: ansible_os_family in [ "RedHat", "Suse" ]

View File

@@ -1,5 +1,13 @@
--- ---
- name: reboot containers - name: Reboot containers
command: block:
"pct reboot {{ item }}" - name: Get container ids from filtered files
loop: "{{ proxmox_lxc_filtered_ids }}" set_fact:
proxmox_lxc_filtered_ids: >-
{{ proxmox_lxc_filtered_files | map("split", "/") | map("last") | map("split", ".") | map("first") }}
listen: reboot containers
- name: Reboot container
command: "pct reboot {{ item }}"
loop: "{{ proxmox_lxc_filtered_ids }}"
changed_when: true
listen: reboot containers

View File

@@ -1,21 +1,15 @@
--- ---
- name: check for container files that exist on this host - name: Check for container files that exist on this host
stat: stat:
path: "/etc/pve/lxc/{{ item }}.conf" path: "/etc/pve/lxc/{{ item }}.conf"
loop: "{{ proxmox_lxc_ct_ids }}" loop: "{{ proxmox_lxc_ct_ids }}"
register: stat_results register: stat_results
- name: filter out files that do not exist - name: Filter out files that do not exist
set_fact: set_fact:
proxmox_lxc_filtered_files: proxmox_lxc_filtered_files:
'{{ stat_results.results | rejectattr("stat.exists", "false") | map(attribute="stat.path") }}' '{{ stat_results.results | rejectattr("stat.exists", "false") | map(attribute="stat.path") }}'
# used for the reboot handler
- name: get container ids from filtered files
set_fact:
proxmox_lxc_filtered_ids:
'{{ proxmox_lxc_filtered_files | map("split", "/") | map("last") | map("split", ".") | map("first") }}'
# https://gist.github.com/triangletodd/02f595cd4c0dc9aac5f7763ca2264185 # https://gist.github.com/triangletodd/02f595cd4c0dc9aac5f7763ca2264185
- name: Ensure lxc config has the right apparmor profile - name: Ensure lxc config has the right apparmor profile
lineinfile: lineinfile:

View File

@@ -1,3 +1,4 @@
--- ---
- name: reboot - name: Reboot
reboot: reboot:
listen: reboot

View File

@@ -47,20 +47,16 @@
- raspberry_pi|default(false) - raspberry_pi|default(false)
- ansible_facts.lsb.description|default("") is match("Debian.*bullseye") - ansible_facts.lsb.description|default("") is match("Debian.*bullseye")
- name: execute OS related tasks on the Raspberry Pi - {{ action }} - name: Execute OS related tasks on the Raspberry Pi - {{ action_ }}
include_tasks: "{{ item }}" include_tasks: "{{ item }}"
with_first_found: with_first_found:
- "{{ action }}/{{ detected_distribution }}-{{ detected_distribution_major_version }}.yml" - "{{ action_ }}/{{ detected_distribution }}-{{ detected_distribution_major_version }}.yml"
- "{{ action }}/{{ detected_distribution }}.yml" - "{{ action_ }}/{{ detected_distribution }}.yml"
- "{{ action }}/{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml" - "{{ action_ }}/{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml"
- "{{ action }}/{{ ansible_distribution }}.yml" - "{{ action_ }}/{{ ansible_distribution }}.yml"
- "{{ action }}/default.yml" - "{{ action_ }}/default.yml"
vars: vars:
action: >- action_: >-
{% if state == "present" -%} {% if state == "present" %}setup{% else %}teardown{% endif %}
setup
{%- else -%}
teardown
{%- endif %}
when: when:
- raspberry_pi|default(false) - raspberry_pi|default(false)

View File

@@ -8,20 +8,22 @@
notify: reboot notify: reboot
- name: Install iptables - name: Install iptables
apt: name=iptables state=present apt:
name: iptables
state: present
- name: Flush iptables before changing to iptables-legacy - name: Flush iptables before changing to iptables-legacy
iptables: iptables:
flush: true flush: true
- name: Changing to iptables-legacy - name: Changing to iptables-legacy
alternatives: community.general.alternatives:
path: /usr/sbin/iptables-legacy path: /usr/sbin/iptables-legacy
name: iptables name: iptables
register: ip4_legacy register: ip4_legacy
- name: Changing to ip6tables-legacy - name: Changing to ip6tables-legacy
alternatives: community.general.alternatives:
path: /usr/sbin/ip6tables-legacy path: /usr/sbin/ip6tables-legacy
name: ip6tables name: ip6tables
register: ip6_legacy register: ip6_legacy

View File

@@ -46,6 +46,15 @@
- /var/lib/rancher/ - /var/lib/rancher/
- /var/lib/cni/ - /var/lib/cni/
- name: Remove K3s http_proxy files
file:
name: "{{ item }}"
state: absent
with_items:
- "{{ systemd_dir }}/k3s.service.d"
- "{{ systemd_dir }}/k3s-node.service.d"
when: proxy_env is defined
- name: Reload daemon_reload - name: Reload daemon_reload
systemd: systemd:
daemon_reload: yes daemon_reload: yes
@@ -67,18 +76,18 @@
content: "{{ lookup('template', 'templates/rc.local.j2') }}" content: "{{ lookup('template', 'templates/rc.local.j2') }}"
create: false create: false
state: absent state: absent
when: proxmox_lxc_configure and rclocal.stat.exists when: proxmox_lxc_configure and rcfile.stat.exists
- name: Check rc.local for cleanup - name: Check rc.local for cleanup
become: true become: true
slurp: slurp:
src: /etc/rc.local src: /etc/rc.local
register: rcslurp register: rcslurp
when: proxmox_lxc_configure and rclocal.stat.exists when: proxmox_lxc_configure and rcfile.stat.exists
- name: Cleanup rc.local if we only have a Shebang line - name: Cleanup rc.local if we only have a Shebang line
become: true become: true
file: file:
path: /etc/rc.local path: /etc/rc.local
state: absent state: absent
when: proxmox_lxc_configure and rclocal.stat.exists and ((rcslurp.content | b64decode).splitlines() | length) <= 1 when: proxmox_lxc_configure and rcfile.stat.exists and ((rcslurp.content | b64decode).splitlines() | length) <= 1

View File

@@ -9,7 +9,7 @@
check_mode: false check_mode: false
- name: Umount filesystem - name: Umount filesystem
mount: ansible.posix.mount:
path: "{{ item }}" path: "{{ item }}"
state: unmounted state: unmounted
with_items: with_items:

View File

@@ -1,5 +0,0 @@
---
- name: reboot containers
command:
"pct reboot {{ item }}"
loop: "{{ proxmox_lxc_filtered_ids }}"

View File

@@ -0,0 +1 @@
../../proxmox_lxc/handlers/main.yml

View File

@@ -1,21 +1,15 @@
--- ---
- name: check for container files that exist on this host - name: Check for container files that exist on this host
stat: stat:
path: "/etc/pve/lxc/{{ item }}.conf" path: "/etc/pve/lxc/{{ item }}.conf"
loop: "{{ proxmox_lxc_ct_ids }}" loop: "{{ proxmox_lxc_ct_ids }}"
register: stat_results register: stat_results
- name: filter out files that do not exist - name: Filter out files that do not exist
set_fact: set_fact:
proxmox_lxc_filtered_files: proxmox_lxc_filtered_files:
'{{ stat_results.results | rejectattr("stat.exists", "false") | map(attribute="stat.path") }}' '{{ stat_results.results | rejectattr("stat.exists", "false") | map(attribute="stat.path") }}'
# used for the reboot handler
- name: get container ids from filtered files
set_fact:
proxmox_lxc_filtered_ids:
'{{ proxmox_lxc_filtered_files | map("split", "/") | map("last") | map("split", ".") | map("first") }}'
- name: Remove LXC apparmor profile - name: Remove LXC apparmor profile
lineinfile: lineinfile:
dest: "{{ item }}" dest: "{{ item }}"

View File

@@ -1,15 +1,17 @@
--- ---
- name: Prepare Proxmox cluster
- hosts: proxmox hosts: proxmox
gather_facts: true gather_facts: true
become: yes become: yes
remote_user: "{{ proxmox_lxc_ssh_user }}" environment: "{{ proxy_env | default({}) }}"
roles: roles:
- role: proxmox_lxc - role: proxmox_lxc
when: proxmox_lxc_configure when: proxmox_lxc_configure
- hosts: k3s_cluster - name: Prepare k3s nodes
hosts: k3s_cluster
gather_facts: yes gather_facts: yes
environment: "{{ proxy_env | default({}) }}"
roles: roles:
- role: lxc - role: lxc
become: true become: true
@@ -20,18 +22,27 @@
become: true become: true
- role: raspberrypi - role: raspberrypi
become: true become: true
- role: k3s_custom_registries
become: true
when: custom_registries
- hosts: master - name: Setup k3s servers
hosts: master
environment: "{{ proxy_env | default({}) }}"
roles: roles:
- role: k3s/master - role: k3s_server
become: true become: true
- hosts: node - name: Setup k3s agents
hosts: node
environment: "{{ proxy_env | default({}) }}"
roles: roles:
- role: k3s/node - role: k3s_agent
become: true become: true
- hosts: master - name: Configure k3s cluster
hosts: master
environment: "{{ proxy_env | default({}) }}"
roles: roles:
- role: k3s/post - role: k3s_server_post
become: true become: true