Compare commits

...

110 Commits

Author SHA1 Message Date
Timothy Stewart
2ae9ee82f0 fix(ci): pin + cache 2022-11-02 21:27:35 -05:00
Timothy Stewart
5380f93b64 fix(ci): pin + cache 2022-11-02 21:20:33 -05:00
Timothy Stewart
a414453ad4 fix(ci): pin + cache 2022-11-02 21:11:31 -05:00
Timothy Stewart
23c5d9ec89 fix(ci): pin + cache 2022-11-02 21:00:37 -05:00
Timothy Stewart
6b339e1985 fix(ci): pin + cache 2022-11-02 20:55:40 -05:00
Timothy Stewart
a031807660 fix(ci): pin + cache 2022-11-02 20:15:03 -05:00
Timothy Stewart
7dd305aabc fix(ci): pin + cache 2022-11-02 20:00:27 -05:00
Timothy Stewart
500931e2fd fix(ci): pin + cache 2022-11-02 19:55:06 -05:00
Timothy Stewart
cf357cf164 fix(ci): pin + cache 2022-11-02 19:49:32 -05:00
Timothy Stewart
215e0d10ed fix(ci): pin + cache 2022-11-02 19:49:03 -05:00
Timothy Stewart
c6ed680dc1 fix(ci): pin + cache 2022-11-02 19:41:30 -05:00
Timothy Stewart
8343a6199e fix(ci): pin + cache 2022-11-02 19:40:11 -05:00
Timothy Stewart
b524f97552 fix(ci): pin + cache 2022-11-02 19:38:39 -05:00
Timothy Stewart
f741040e44 fix(ci): pin + cache 2022-11-02 19:32:26 -05:00
Timothy Stewart
09bc628ba6 fix(ci): pin + cache 2022-11-01 22:55:42 -05:00
Timothy Stewart
71ff6b86cd fix(ci): pin + cache 2022-11-01 22:43:56 -05:00
Timothy Stewart
23729ddbbe fix(ci): pin + cache 2022-11-01 22:35:39 -05:00
Timothy Stewart
e254c407f0 fix(ci): pin + cache 2022-11-01 22:18:39 -05:00
Timothy Stewart
713b4694e1 fix(ci): pin + cache 2022-11-01 22:05:37 -05:00
Timothy Stewart
952d513124 fix(ci): pin + cache 2022-11-01 21:45:47 -05:00
Timothy Stewart
dd1e596332 fix(ci): pin + cache 2022-11-01 21:43:00 -05:00
Timothy Stewart
6af47f96d0 fix(ci): pin + cache 2022-11-01 21:24:03 -05:00
Timothy Stewart
664deec6c3 fix(ci): pin + cache 2022-11-01 21:06:21 -05:00
Timothy Stewart
646459e7f5 fix(ci): pin + cache 2022-11-01 21:05:57 -05:00
Timothy Stewart
64242d9729 fix(ci): pin + cache 2022-11-01 21:05:11 -05:00
Timothy Stewart
f4864ddb64 fix(ci): pin + cache 2022-11-01 21:04:23 -05:00
Timothy Stewart
6a83cde0c6 fix(ci): pin + cache 2022-11-01 21:03:27 -05:00
Timothy Stewart
77ac928c0d fix(ci): pin + cache 2022-11-01 21:01:47 -05:00
Timothy Stewart
8300a7aaac fix(ci): pin + cache 2022-11-01 21:01:14 -05:00
Timothy Stewart
bdc6af5f46 fix(ci): pin + cache 2022-11-01 20:47:50 -05:00
Timothy Stewart
dc8276157a fix(ci): pin + cache 2022-11-01 20:37:23 -05:00
Timothy Stewart
37f0cb11d2 fix(ci): pin + cache 2022-11-01 20:35:46 -05:00
Timothy Stewart
68e7c77b22 fix(ci): pin + cache 2022-11-01 20:26:13 -05:00
Timothy Stewart
d82c4feac8 feat(gh-actions-controller): added 2022-11-01 20:22:07 -05:00
Timothy Stewart
9217d8607b feat(gh-actions-controller): added 2022-11-01 20:19:00 -05:00
Timothy Stewart
fbc15aa1a1 fix(ci): pin + cache 2022-11-01 20:15:03 -05:00
Timothy Stewart
b55ec046ad fix(ci): pin + cache 2022-11-01 20:07:15 -05:00
Timothy Stewart
b3cc178045 fix(ci): pin + cache 2022-11-01 19:59:22 -05:00
Timothy Stewart
13be424187 fix(ci): pin + cache 2022-11-01 19:55:33 -05:00
Timothy Stewart
d9cecd5364 fix(ci): pin + cache 2022-11-01 19:51:32 -05:00
Timothy Stewart
afb96dbee2 fix(ci): pin + cache 2022-11-01 19:48:31 -05:00
Timothy Stewart
30ffc69192 fix(ci): pin + cache 2022-11-01 19:41:44 -05:00
Timothy Stewart
94e385c28e fix(ci): pin + cache 2022-11-01 19:40:28 -05:00
Timothy Stewart
dbb2cda17a fix(ci): pin + cache 2022-10-31 22:10:31 -05:00
Timothy Stewart
d24cdb97db feat(gh-actions-controller): added 2022-10-31 22:09:33 -05:00
Timothy Stewart
5bebec930b feat(gh-actions-controller): added 2022-10-31 22:02:16 -05:00
Timothy Stewart
ac52acdec1 feat(gh-actions-controller): added 2022-10-31 22:01:39 -05:00
Timothy Stewart
105b2c2f1e fix(ci): pin + cache 2022-10-31 21:55:51 -05:00
Timothy Stewart
d20f485fca fix(ci): pin + cache 2022-10-31 21:47:33 -05:00
Timothy Stewart
f9bb9dabae fix(ci): pin + cache 2022-10-31 21:45:11 -05:00
Timothy Stewart
6f15ef260e fix(ci): pin + cache 2022-10-31 21:40:25 -05:00
Timothy Stewart
de1966fe02 fix(ci): pin + cache 2022-10-31 21:33:47 -05:00
Timothy Stewart
fc823122d8 fix(script): convert to linux 2022-10-31 21:29:24 -05:00
Techno Tim
2f8d94bb5e Merge branch 'master' into self-hosted-runners 2022-10-31 18:52:22 -05:00
Techno Tim
5268ef305a Revert "feat(ci): switching to self-hosted runners (#133)" (#135)
This reverts commit a840571733.
2022-10-31 18:50:34 -05:00
Techno Tim
a840571733 feat(ci): switching to self-hosted runners (#133)
* feat(ci): switching to self-hosted runners

* feat(gh-actions-controller): added

* feat(gh-actions-controller): added
2022-10-31 17:56:22 -05:00
Timothy Stewart
9c3814ce72 feat(gh-actions-controller): added 2022-10-30 22:45:59 -05:00
Timothy Stewart
0e60f4643b feat(gh-actions-controller): added 2022-10-30 22:44:13 -05:00
Timothy Stewart
bb20514a6a feat(ci): switching to self-hosted runners 2022-10-30 20:46:14 -05:00
dependabot[bot]
b1370406ea chore(deps): bump ansible-lint from 6.8.3 to 6.8.4 (#130)
Bumps [ansible-lint](https://github.com/ansible-community/ansible-lint) from 6.8.3 to 6.8.4.
- [Release notes](https://github.com/ansible-community/ansible-lint/releases)
- [Commits](https://github.com/ansible-community/ansible-lint/compare/v6.8.3...v6.8.4)

---
updated-dependencies:
- dependency-name: ansible-lint
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-10-28 17:16:35 -05:00
dependabot[bot]
12d57a07d0 chore(deps): bump ansible-lint from 6.8.2 to 6.8.3 (#129)
Bumps [ansible-lint](https://github.com/ansible-community/ansible-lint) from 6.8.2 to 6.8.3.
- [Release notes](https://github.com/ansible-community/ansible-lint/releases)
- [Commits](https://github.com/ansible-community/ansible-lint/compare/v6.8.2...v6.8.3)

---
updated-dependencies:
- dependency-name: ansible-lint
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-10-26 21:55:42 -05:00
samerbahri98
4f3b8ec9e0 Pre-commit hooks (#125)
* feat: pre-commit

* empty

* fix: requirements.txt
2022-10-26 19:15:24 -05:00
dependabot[bot]
45ddd65e74 chore(deps): bump zipp from 3.9.0 to 3.10.0 (#128)
Bumps [zipp](https://github.com/jaraco/zipp) from 3.9.0 to 3.10.0.
- [Release notes](https://github.com/jaraco/zipp/releases)
- [Changelog](https://github.com/jaraco/zipp/blob/main/CHANGES.rst)
- [Commits](https://github.com/jaraco/zipp/compare/v3.9.0...v3.10.0)

---
updated-dependencies:
- dependency-name: zipp
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-10-26 19:13:53 -05:00
dependabot[bot]
b2a62ea4eb chore(deps): bump ruamel-yaml-clib from 0.2.6 to 0.2.7 (#124)
Bumps [ruamel-yaml-clib](https://sourceforge.net/p/ruamel-yaml-clib/code/ci/default/tree) from 0.2.6 to 0.2.7.

---
updated-dependencies:
- dependency-name: ruamel-yaml-clib
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-10-22 13:37:11 -05:00
dependabot[bot]
a8697edc99 chore(deps): bump oauthlib from 3.2.1 to 3.2.2 (#123)
Bumps [oauthlib](https://github.com/oauthlib/oauthlib) from 3.2.1 to 3.2.2.
- [Release notes](https://github.com/oauthlib/oauthlib/releases)
- [Changelog](https://github.com/oauthlib/oauthlib/blob/v3.2.2/CHANGELOG.rst)
- [Commits](https://github.com/oauthlib/oauthlib/compare/v3.2.1...v3.2.2)

---
updated-dependencies:
- dependency-name: oauthlib
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-10-18 19:20:28 -05:00
dependabot[bot]
d3218f5d5c chore(deps): bump google-auth from 2.12.0 to 2.13.0 (#122)
Bumps [google-auth](https://github.com/googleapis/google-auth-library-python) from 2.12.0 to 2.13.0.
- [Release notes](https://github.com/googleapis/google-auth-library-python/releases)
- [Changelog](https://github.com/googleapis/google-auth-library-python/blob/main/CHANGELOG.md)
- [Commits](https://github.com/googleapis/google-auth-library-python/compare/v2.12.0...v2.13.0)

---
updated-dependencies:
- dependency-name: google-auth
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-10-17 23:06:58 -05:00
Irakli Nadareishvili
590a8029fd Removing accidental tear-down step that is clearly a typo (#117)
Co-authored-by: Techno Tim <timothystewart6@gmail.com>
2022-10-15 14:15:25 -05:00
Techno Tim
cb2fa7c441 k3s, metallb, kube-vip updates (#119)
* feat(k3s): Updated to v1.24.6+k3s1

* feat(kube-vip): Update to v0.5.5

* feat(metal-lb): Update to v0.13.6

* fix(pip): Freeze requirements

* fix(lint): Fixed ansible-lint
2022-10-15 12:23:50 -05:00
ccoane
14508ec8dc Add "collection" to the ansible-galaxy command as it will run without making changes if that collection argument is not provided. (#113) 2022-10-04 20:41:19 -05:00
Ioannis Angelakopoulos
fb6c9a6866 adds colors to molecule testing in GitHub action (#109) 2022-09-28 03:48:25 +00:00
Simon Leiner
d5d02280c1 Fix download-boxes.sh if no boxes are present (#106)
In case of grep not matching any line, it would return an error code
and thus stop the script. This patch sets "present_boxes" to an empty
value in case any of the commands fail.
2022-09-26 17:21:37 -05:00
Simon Leiner
57e528832b Fix role order in reset playbook (#104) 2022-09-25 12:35:36 -05:00
Ioannis Angelakopoulos
cd76fa05a7 fix master taint implementation - linting problems (#95)
* add virtual-ip to certificate SAN entries

Adds the kube-vip IP as a Subject Alternative Name in the TLS cert. It is needed otherwise you cannot access the cluster.

* fixes bug with master taints (#1)

- improves taint logic

* fixes typo

* fixes formatting

* fixes undefined group['node'] if missing from hosts.ini (#2)

* fixes undefined group['node'] if missing from hosts.ini

- improves application of master taint by centralizing code

* improves molecule testing, fixes linting

* hacking at linter problems, small tweaks

- increases the metallb timeout error due to intermittent testing errors in GitHub actions

* improves context by renaming taint variable

- makes variable boolean

* fix bug

* removes linting hacks

Co-authored-by: Ioannis Angelakopoulos <ioangel@gmail.com>
2022-09-24 20:12:24 -05:00
Simon Leiner
d5b37acd8a Drop support for CentOS, test Rocky and Debian in CI (#92)
* Test CentOS 7 in CI

* Drop support for CentOS, test on Rocky and Debian

* Fix reset playbook for Rocky Linux

* Fix typo

* Disable firewalld during testing

Co-authored-by: Techno Tim <timothystewart6@gmail.com>
2022-09-24 05:10:55 +00:00
Simon Leiner
5225493ca0 CI: Fix linting job for ansible-lint 6.6.0 (#96)
* CI: Fix linting job for ansible-lint 6.6.0

* Increase MetalLB timeout to mitigate CI flakiness
2022-09-23 23:28:21 -05:00
BMeach
4acbe91b6c Fix master node taints in multi node installs (#93)
* Taint master nodes if more than one node

* Kick off fork workflow tests

Co-authored-by: Techno Tim <timothystewart6@gmail.com>
2022-09-17 15:56:09 -05:00
Techno Tim
f1c2f3b7dd fix(github): ignore readme updates (#94) 2022-09-17 00:18:56 -05:00
Techno Tim
76718a010c chore(docs): Updated with ansible collections install (#89)
* chore(docs): Fixing thanks section

* chore(docs): Updated with collections command
2022-09-15 02:32:34 +00:00
Simon Leiner
a1ef590442 Add support for API servers on IPv6 addresses (#48)
* Remove duplicate file for deletion

* Add support for IPv6 clusters

To correctly escape IPv6 addresses when ports are used, they must be
wrapped in square brackets [1]. This patch adds support for that,
using Ansible's ipwrap filter [2].

[1]: https://datatracker.ietf.org/doc/html/rfc4038#section-5.1
[2]: http://docs.ansible.com/ansible/latest/collections/ansible/utils/docsite/filters_ipaddr.html#wrapping-ipv6-addresses-in-brackets

* Do not abort other molecule jobs on failure

* Fix cache keys for Vagrant boxes

* Molecule: Derive overrides.yml location from scenario dir

# Conflicts:
#	molecule/default/molecule.yml
#	molecule/ipv6/molecule.yml
2022-09-10 12:57:38 -05:00
Simon Leiner
9ff3bb6b87 Test single-node cluster (#78)
* Molecule: Derive overrides.yml location from scenario dir

# Conflicts:
#	molecule/default/molecule.yml
#	molecule/ipv6/molecule.yml

* Molecule: Add single_node scenario

* Fix get_nodes test for the case of empty groups
2022-09-09 11:47:26 -05:00
Techno Tim
b1df9663fa fix(ansible): Fix group permissions on tmp folder (#77) 2022-09-09 03:00:54 +00:00
Vitalij Dovhanyc
58c3a61bbb add editorconfig and fix trailing whitespaces (#68)
Co-authored-by: Techno Tim <timothystewart6@gmail.com>
2022-09-07 20:00:13 -05:00
Simon Leiner
60bc09b085 Mitigate CI flakiness (#70)
* Increase SSH connection timeouts and retries

* Make MetalLB timeouts configurable

* Retry applying MetalLB CRs

* Fix location of MetalLB CRs template

* Make MetalLB wait logic more compact

* Fix typo

* retrigger 1

* retrigger 2

* retrigger 3

* retrigger 4

* retrigger 5
2022-09-07 18:47:58 -05:00
Timothy Stewart
4365a2a54b fix(ansible): fixing permissions on tmp folder 2022-09-06 19:07:09 -05:00
Simon Leiner
a6b2a95b7e Test playbook using molecule (#67)
* Test cluster using molecule

* Fix detection of first control node

* Include --flannel-iface and --node-ip as k3s arguments

* Store logs of k3s-init.service as GitHub job artifacts
2022-09-03 10:36:28 -05:00
Timothy Stewart
3c36dc8bfd fix(ansible): use k3s kubectl 2022-09-02 11:07:17 -05:00
Techno Tim
6695d13683 upgrade k3s to v1.24.4+k3s1 (#64)
* feat(k3s): Upgrade to v1.24.4+k3s1
* feat(metallb): updated to v0.13.5
2022-09-01 21:20:25 -05:00
Techno Tim
74e1dc1dfe Pin GitHub Actions to SHA + Dependabot (#62)
* feat(repo): Add dependabot

* fix(ci): clean up

* fix(gh-actions): pin to sha

* fix(lint): fixing yaml lint

* feat(repo): Add dependabot

* fix(vagrant): up retry count to 60 because gh actions are sloooooow
2022-08-30 23:15:15 -05:00
Techno Tim
56f8f21850 fix(ansible): Install services separate from config (#63) 2022-08-30 21:44:55 -05:00
Timothy Stewart
117c608a73 fix(ansible): added longer wait with todo 2022-08-29 23:16:13 -05:00
niki-on-github
e28d8f38e2 add ansible.posix module to requirements.yml (#59)
Co-authored-by: arch <arch@local>
Co-authored-by: Techno Tim <timothystewart6@gmail.com>
2022-08-29 22:58:57 -05:00
Simon Leiner
9d8a5cc2b8 Execute Vagrant cluster in CI (#57) 2022-08-29 19:45:07 -05:00
Techno Tim
2296959894 fix(ci): Fix Linting (#61) 2022-08-28 20:36:05 -05:00
Timothy Stewart
6d793c5c96 fix(ansible): add wait 2022-08-28 17:49:38 -05:00
Timothy Stewart
47ac514dc6 fix(ansible): fix lint 2022-08-28 16:42:07 -05:00
Timothy Stewart
611cf5ab0b fix(ansible): fix lint 2022-08-28 16:32:52 -05:00
Timothy Stewart
c82cbfc501 fix(ansible): fix lint 2022-08-28 16:29:04 -05:00
Timothy Stewart
f603a048c3 fix(ansible): fix lint 2022-08-28 16:26:46 -05:00
Timothy Stewart
4b959719ba fix(ansible): run task on one master 2022-08-28 16:00:10 -05:00
Timothy Stewart
db8fbd9447 chore(lint): Fix yaml lint 2022-08-28 14:27:22 -05:00
Techno Tim
aa05ab153e fix(ansible): Refactored ansible steps to now install metallb in post… (#58)
* fix(ansible): Refactored ansible steps to now install metallb in post task and verify
2022-08-28 14:25:09 -05:00
Simon Leiner
370e19169b Print fewer logs when removing manifests (#55) 2022-08-23 23:26:08 -05:00
Timothy Stewart
e04f3bac61 chore(github): Updated issue template 2022-08-20 16:22:56 -05:00
Techno Tim
cdd7c4e668 Fix k3s manifest (#53)
* fix(k3s): Remove manifests and folders from bootstrapped cluster
2022-08-20 16:19:20 -05:00
Lance A. Brown
90bbc0a399 Add linux-modules-extra-raspi package for Ubuntu 22.x on Raspberry. (#50)
* Add task for linux-modules-extra-raspi

Ubuntu 22.x on Raspberry Pi needs the linux-modules-extra-raspi package
for the vxlans kernel module.

* Remove linux-modules-extra-reaspi package

Not sure we want to do this but including it in the PR anyway for discussion.
2022-08-11 21:23:56 -05:00
slemmercs
1e4b48f039 replaced --no-deploy with --disable (#49)
According to https://rancher.com/docs/k3s/latest/en/installation/install-options/server-config/ > Kubernetes Components section the --disable <value> flag should be used as the --no-deploy is a deprecated option
2022-08-11 21:23:47 -05:00
Timothy Stewart
ac5325a670 fix(kube-vip): Cleaning up; adding missing rbac api groups 2022-07-30 22:11:28 -05:00
Techno Tim
a33ed487e0 feat(upgrades): Updated k3s, metalls, and kubevip and fixed bugs (#46) 2022-07-27 23:13:43 -05:00
Simon Leiner
1830b9c9a1 Fix .gitignore (#40)
For more details, see:
https://stackoverflow.com/a/20652768
2022-07-27 21:24:59 -05:00
SwaggaRitz
39581f4ba7 Replaced manifest files with double extention to '-' (#41)
Co-authored-by: Adrian Jones <adrian@geektowers.local>
2022-07-27 21:21:38 -05:00
68 changed files with 3083 additions and 661 deletions

View File

@@ -1,3 +1,17 @@
--- ---
exclude_paths:
# default paths
- '.cache/'
- '.github/'
- 'test/fixtures/formatting-before/'
- 'test/fixtures/formatting-prettier/'
# The "converge" and "reset" playbooks use import_playbook in
# conjunction with the "env" lookup plugin, which lets the
# syntax check of ansible-lint fail.
- 'molecule/**/converge.yml'
- 'molecule/**/prepare.yml'
- 'molecule/**/reset.yml'
skip_list: skip_list:
- 'fqcn-builtins' - 'fqcn-builtins'

13
.editorconfig Normal file
View File

@@ -0,0 +1,13 @@
root = true
[*]
indent_style = space
indent_size = 2
charset = utf-8
trim_trailing_whitespace = true
insert_final_newline = true
end_of_line = lf
max_line_length = off
[Makefile]
indent_style = tab
[*.go]
indent_style = tab

View File

@@ -26,7 +26,7 @@ Operating system:
Hardware: Hardware:
### Variables Used: ### Variables Used
`all.yml` `all.yml`
@@ -73,3 +73,5 @@ node
## Possible Solution ## Possible Solution
<!--- Not obligatory, but suggest a fix/reason for the bug, --> <!--- Not obligatory, but suggest a fix/reason for the bug, -->
- [ ] I've checked the [General Troubleshooting Guide](https://github.com/techno-tim/k3s-ansible/discussions/20)

View File

@@ -12,3 +12,4 @@
- [ ] Ran `reset.yml` playbook - [ ] Ran `reset.yml` playbook
- [ ] Did not add any unnecessary changes - [ ] Did not add any unnecessary changes
- [ ] 🚀 - [ ] 🚀
- [ ] Ran pre-commit install at least once before committing

11
.github/dependabot.yml vendored Normal file
View File

@@ -0,0 +1,11 @@
---
version: 2
updates:
- package-ecosystem: "pip"
directory: "/"
schedule:
interval: "daily"
rebase-strategy: "auto"
ignore:
- dependency-name: "*"
update-types: ["version-update:semver-major"]

42
.github/download-boxes.sh vendored Executable file
View File

@@ -0,0 +1,42 @@
#!/bin/bash
# download-boxes.sh
# Check all molecule.yml files for required Vagrant boxes and download the ones that are not
# already present on the system.
set -euo pipefail
YQ_VERSION=v4.29.2
YQ_BINARY=yq_linux_amd64
GIT_ROOT=$(git rev-parse --show-toplevel)
PROVIDER=virtualbox
# get yq used for filtering
sudo wget https://github.com/mikefarah/yq/releases/download/${YQ_VERSION}/${YQ_BINARY} -O /usr/bin/yq &&\
sudo chmod +x /usr/bin/yq
# Read all boxes for all platforms from the "molecule.yml" files
all_boxes=$(cat "${GIT_ROOT}"/molecule/*/molecule.yml |
yq -r '.platforms[].box' | # Read the "box" property of each node under "platforms"
grep --invert-match --regexp=--- | # Filter out file separators
sort |
uniq)
# Read the boxes that are currently present on the system (for the current provider)
present_boxes=$(
(vagrant box list |
grep "${PROVIDER}" | # Filter by boxes available for the current provider
awk '{print $1;}' | # The box name is the first word in each line
sort |
uniq) ||
echo "" # In case any of these commands fails, just use an empty list
)
# The boxes that we need to download are the ones present in $all_boxes, but not $present_boxes.
download_boxes=$(comm -2 -3 <(echo "${all_boxes}") <(echo "${present_boxes}"))
# Actually download the necessary boxes
if [ -n "${download_boxes}" ]; then
echo "${download_boxes}" | while IFS= read -r box; do
vagrant box add --provider "${PROVIDER}" "${box}"
done
fi

View File

@@ -1,31 +1,73 @@
--- ---
name: Lint name: Linting
'on': on:
pull_request: pull_request:
push: push:
branches: branches:
- master - master
paths-ignore:
- '**/README.md'
jobs: jobs:
pre-commit-ci:
test: name: Pre-Commit
name: Lint runs-on: self-hosted
runs-on: ubuntu-latest env:
PYTHON_VERSION: "3.10"
steps: steps:
- name: Check out the codebase. - name: Check out the codebase
uses: actions/checkout@v2 uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # 3.0.2
- name: Set up Python 3.7.
uses: actions/setup-python@v2
with: with:
python-version: '3.x' ref: ${{ github.event.pull_request.head.sha }}
- name: Install test dependencies. - name: Set up Python ${{ env.PYTHON_VERSION }}
run: pip3 install yamllint ansible-lint ansible uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984 # 4.3.0
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies
- name: Run yamllint. - name: Cache pip
run: yamllint . uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # 3.0.11
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('./requirements.txt') }}
restore-keys: |
${{ runner.os }}-pip-
- name: Run ansible-lint. - name: Cache Ansible
run: ansible-lint uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # 3.0.11
with:
path: ~/.ansible/collections
key: ${{ runner.os }}-ansible-${{ hashFiles('collections/requirements.txt') }}
restore-keys: |
${{ runner.os }}-ansible-
- name: Install dependencies
run: |
echo "::group::Upgrade pip"
python3 -m pip install --upgrade pip
echo "::endgroup::"
echo "::group::Install Python requirements from requirements.txt"
python3 -m pip install -r requirements.txt
echo "::endgroup::"
echo "::group::Install Ansible role requirements from collections/requirements.yml"
ansible-galaxy install -r collections/requirements.yml
echo "::endgroup::"
- name: Run pre-commit
uses: pre-commit/action@646c83fcd040023954eafda54b4db0192ce70507 # 3.0.0
ensure-pinned-actions:
name: Ensure SHA Pinned Actions
runs-on: self-hosted
steps:
- name: Checkout code
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # 3.0.2
- name: Ensure SHA pinned actions
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@6ca5574367befbc9efdb2fa25978084159c5902d # 1.3.0
with:
allowlist: |
aws-actions/
docker/login-action

124
.github/workflows/test.yml vendored Normal file
View File

@@ -0,0 +1,124 @@
---
name: Test
on:
pull_request:
push:
branches:
- master
paths-ignore:
- '**/README.md'
jobs:
molecule:
name: Molecule
runs-on: self-hosted
strategy:
matrix:
scenario:
- default
- ipv6
- single_node
fail-fast: false
env:
PYTHON_VERSION: "3.10"
VAGRANT_DEFAULT_PROVIDER: virtualbox
steps:
- name: Check out the codebase
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # 3.0.2
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Install Virtual Box from Oracle
run: |
echo "::group::Virtual Box"
wget -O- https://www.virtualbox.org/download/oracle_vbox_2016.asc | sudo gpg --dearmor --yes --output /usr/share/keyrings/oracle-virtualbox-2016.gpg
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/oracle-virtualbox-2016.gpg] https://download.virtualbox.org/virtualbox/debian $(lsb_release -cs) contrib" | sudo tee -a /etc/apt/sources.list.d/virtualbox.list
sudo apt update && sudo apt install -y linux-headers-generic linux-headers-5.15.0-52-generic build-essential dkms virtualbox-dkms virtualbox-6.1
echo "::endgroup::"
echo "::group::Virtual Box Test"
vboxmanage --version
sudo /sbin/vboxconfig
sudo modprobe vboxdrv
vboxmanage --version
echo "::endgroup::"
- name: Install Vagrant
run: |
echo "::group::Install Vagrant"
wget -O- https://apt.releases.hashicorp.com/gpg | gpg --dearmor | sudo tee /usr/share/keyrings/hashicorp-archive-keyring.gpg
echo "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/hashicorp.list
sudo apt update && sudo apt install -y vagrant
vagrant version
vagrant plugin list
vagrant plugin install vagrant-vbguest
vagrant plugin list
echo "::endgroup::"
- name: Configure VirtualBox
run: |-
sudo mkdir -p /etc/vbox
cat <<EOF | sudo tee -a /etc/vbox/networks.conf > /dev/null
* 192.168.30.0/24
* fdad:bad:ba55::/64
EOF
- name: Cache pip
uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # 3.0.11
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('./requirements.txt') }}
restore-keys: |
${{ runner.os }}-pip-
- name: Cache Vagrant boxes
uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # 3.0.11
with:
path: |
~/.vagrant.d/boxes
key: vagrant-boxes-${{ hashFiles('**/molecule.yml') }}
restore-keys: |
vagrant-boxes
- name: Download Vagrant boxes for all scenarios
# To save some cache space, all scenarios share the same cache key.
# On the other hand, this means that the cache contents should be
# the same across all scenarios. This step ensures that.
run: ./.github/download-boxes.sh
- name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984 # 4.3.0
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies
- name: Install dependencies
run: |
echo "::group::Upgrade pip"
python3 -m pip install --upgrade pip
echo "::endgroup::"
echo "::group::Install Python requirements from requirements.txt"
python3 -m pip install -r requirements.txt
echo "::endgroup::"
- name: Test with molecule
run: molecule test --scenario-name ${{ matrix.scenario }}
env:
ANSIBLE_K3S_LOG_DIR: ${{ runner.temp }}/logs/k3s-ansible/${{ matrix.scenario }}
ANSIBLE_SSH_RETRIES: 4
ANSIBLE_TIMEOUT: 60
PY_COLORS: 1
ANSIBLE_FORCE_COLOR: 1
- name: Upload log files
if: always() # do this even if a step before has failed
uses: actions/upload-artifact@83fd05a356d7e2593de66fc9913b3002723633cb # 3.1.1
with:
name: logs
path: |
${{ runner.temp }}/logs
- name: Delete old box versions
if: always() # do this even if a step before has failed
run: vagrant box prune --force

2
.gitignore vendored
View File

@@ -1 +1 @@
.vagrant .env/

21
.pre-commit-config.yaml Normal file
View File

@@ -0,0 +1,21 @@
---
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.3.0
hooks:
- id: requirements-txt-fixer
- id: sort-simple-yaml
- id: detect-private-key
- repo: https://github.com/adrienverge/yamllint.git
rev: v1.28.0
hooks:
- id: yamllint
args: [-c=.yamllint]
- repo: https://github.com/ansible-community/ansible-lint.git
rev: v6.8.2
hooks:
- id: ansible-lint
- repo: https://github.com/shellcheck-py/shellcheck-py
rev: v0.8.0.4
hooks:
- id: shellcheck

View File

@@ -16,9 +16,9 @@ If you want more context on how this works, see:
Build a Kubernetes cluster using Ansible with k3s. The goal is easily install a HA Kubernetes cluster on machines running: Build a Kubernetes cluster using Ansible with k3s. The goal is easily install a HA Kubernetes cluster on machines running:
- [X] Debian - [x] Debian (tested on version 11)
- [X] Ubuntu - [x] Ubuntu (tested on version 22.04)
- [X] CentOS - [x] Rocky (tested on version 9)
on processor architecture: on processor architecture:
@@ -29,8 +29,13 @@ on processor architecture:
## ✅ System requirements ## ✅ System requirements
- Deployment environment must have Ansible 2.4.0+. If you need a quick primer on Ansible [you can check out my docs and setting up Ansible](https://docs.technotim.live/posts/ansible-automation/). - Deployment environment must have Ansible 2.4.0+. If you need a quick primer on Ansible [you can check out my docs and setting up Ansible](https://docs.technotim.live/posts/ansible-automation/).
- [`netaddr` package](https://pypi.org/project/netaddr/) must be available to Ansible. If you have installed Ansible via apt, this is already taken care of. If you have installed Ansible via `pip`, make sure to install `netaddr` into the respective virtual environment.
- `server` and `agent` nodes should have passwordless SSH access, if not you can supply arguments to provide credentials `--ask-pass --ask-become-pass` to each command. - `server` and `agent` nodes should have passwordless SSH access, if not you can supply arguments to provide credentials `--ask-pass --ask-become-pass` to each command.
- You will also need to install collections that this playbook uses by running `ansible-galaxy collection install -r ./collections/requirements.yml`
## 🚀 Getting Started ## 🚀 Getting Started
### 🍴 Preparation ### 🍴 Preparation
@@ -100,18 +105,16 @@ See the commands [here](https://docs.technotim.live/posts/k3s-etcd-ansible/#test
Be sure to see [this post](https://github.com/techno-tim/k3s-ansible/discussions/20) on how to troubleshoot common problems Be sure to see [this post](https://github.com/techno-tim/k3s-ansible/discussions/20) on how to troubleshoot common problems
### 🔷 Vagrant ### Testing the playbook using molecule
You may want to kickstart your k3s cluster by using Vagrant to quickly build you all needed VMs with one command. This playbook includes a [molecule](https://molecule.rtfd.io/)-based test setup.
Head to the `vagrant` subfolder and type `vagrant up` to get your environment setup. It is run automatically in CI, but you can also run the tests locally.
After the VMs have got build, deploy k3s using the Ansible playbook `site.yml` by the This might be helpful for quick feedback in a few cases.
`vagrant provision --provision-with ansible` command. You can find more information about it [here](molecule/README.md).
## Thanks 🤝 ## Thanks 🤝
This repo is really standing on the shoulders of giants. To all those who have contributed. This repo is really standing on the shoulders of giants. Thank you to all those who have contributed and tanks to these repos for code and ideas:
Thanks to these repos for code and ideas:
- [k3s-io/k3s-ansible](https://github.com/k3s-io/k3s-ansible) - [k3s-io/k3s-ansible](https://github.com/k3s-io/k3s-ansible)
- [geerlingguy/turing-pi-cluster](https://github.com/geerlingguy/turing-pi-cluster) - [geerlingguy/turing-pi-cluster](https://github.com/geerlingguy/turing-pi-cluster)

View File

@@ -1,3 +1,6 @@
--- ---
collections: collections:
- name: ansible.utils
- name: community.general - name: community.general
- name: ansible.posix
- name: kubernetes.core

View File

@@ -4,6 +4,7 @@ kind: Service
metadata: metadata:
name: nginx name: nginx
spec: spec:
ipFamilyPolicy: PreferDualStack
selector: selector:
app: nginx app: nginx
ports: ports:

View File

@@ -1,3 +1,3 @@
* /*
!.gitignore !.gitignore
!sample/ !sample/

View File

@@ -1,5 +1,5 @@
--- ---
k3s_version: v1.23.4+k3s1 k3s_version: v1.24.6+k3s1
# this is the user that has ssh access to these machines # this is the user that has ssh access to these machines
ansible_user: ansibleuser ansible_user: ansibleuser
systemd_dir: /etc/systemd/system systemd_dir: /etc/systemd/system
@@ -17,16 +17,35 @@ apiserver_endpoint: "192.168.30.222"
# this token should be alpha numeric only # this token should be alpha numeric only
k3s_token: "some-SUPER-DEDEUPER-secret-password" k3s_token: "some-SUPER-DEDEUPER-secret-password"
# change these to your liking, the only required one is--no-deploy servicelb # The IP on which the node is reachable in the cluster.
extra_server_args: "--no-deploy servicelb --no-deploy traefik" # Here, a sensible default is provided, you can still override
extra_agent_args: "" # it for each of your hosts, though.
k3s_node_ip: '{{ ansible_facts[flannel_iface]["ipv4"]["address"] }}'
# Disable the taint manually by setting: k3s_master_taint = false
k3s_master_taint: "{{ true if groups['node'] | default([]) | length >= 1 else false }}"
# these arguments are recommended for servers as well as agents:
extra_args: >-
--flannel-iface={{ flannel_iface }}
--node-ip={{ k3s_node_ip }}
# change these to your liking, the only required are: --disable servicelb, --tls-san {{ apiserver_endpoint }}
extra_server_args: >-
{{ extra_args }}
{{ '--node-taint node-role.kubernetes.io/master=true:NoSchedule' if k3s_master_taint else '' }}
--tls-san {{ apiserver_endpoint }}
--disable servicelb
--disable traefik
extra_agent_args: >-
{{ extra_args }}
# image tag for kube-vip # image tag for kube-vip
kube_vip_tag_version: "v0.4.4" kube_vip_tag_version: "v0.5.5"
# image tag for metal lb # image tag for metal lb
metal_lb_speaker_tag_version: "v0.12.1" metal_lb_speaker_tag_version: "v0.13.6"
metal_lb_controller_tag_version: "v0.12.1" metal_lb_controller_tag_version: "v0.13.6"
# metallb ip range for load balancer # metallb ip range for load balancer
metal_lb_ip_range: "192.168.30.80-192.168.30.90" metal_lb_ip_range: "192.168.30.80-192.168.30.90"

73
molecule/README.md Normal file
View File

@@ -0,0 +1,73 @@
# Test suites for `k3s-ansible`
This folder contains the [molecule](https://molecule.rtfd.io/)-based test setup for this playbook.
## Scenarios
We have these scenarios:
- **default**:
A 3 control + 2 worker node cluster based very closely on the [sample inventory](../inventory/sample/).
- **ipv6**:
A cluster that is externally accessible via IPv6 ([more information](ipv6/README.md))
To save a bit of test time, this cluster is _not_ highly available, it consists of only one control and one worker node.
- **single_node**:
Very similar to the default scenario, but uses only a single node for all cluster functionality.
## How to execute
To test on your local machine, follow these steps:
### System requirements
Make sure that the following software packages are available on your system:
- [Python 3](https://www.python.org/downloads)
- [Vagrant](https://www.vagrantup.com/downloads)
- [VirtualBox](https://www.virtualbox.org/wiki/Downloads)
### Set up VirtualBox networking on Linux and macOS
_You can safely skip this if you are working on Windows._
Furthermore, the test cluster uses the `192.168.30.0/24` subnet which is [not set up by VirtualBox automatically](https://www.virtualbox.org/manual/ch06.html#network_hostonly).
To set the subnet up for use with VirtualBox, please make sure that `/etc/vbox/networks.conf` exists and that it contains this line:
```
* 192.168.30.0/24
* fdad:bad:ba55::/64
```
### Install Python dependencies
You will get [Molecule, Ansible and a few extra dependencies](../requirements.txt) via [pip](https://pip.pypa.io/).
Usually, it is advisable to work in a [virtual environment](https://docs.python.org/3/tutorial/venv.html) for this:
```bash
cd /path/to/k3s-ansible
# Create a virtualenv at ".env". You only need to do this once.
python3 -m venv .env
# Activate the virtualenv for your current shell session.
# If you start a new session, you will have to repeat this.
source .env/bin/activate
# Install the required packages into the virtualenv.
# These remain installed across shell sessions.
python3 -m pip install -r requirements.txt
```
### Run molecule
With the virtual environment from the previous step active in your shell session, you can now use molecule to test the playbook.
Interesting commands are:
- `molecule create`: Create virtual machines for the test cluster nodes.
- `molecule destroy`: Delete the virtual machines for the test cluster nodes.
- `molecule converge`: Run the `site` playbook on the nodes of the test cluster.
- `molecule side_effect`: Run the `reset` playbook on the nodes of the test cluster.
- `molecule verify`: Verify that the cluster works correctly.
- `molecule test`: The "all-in-one" sequence of steps that is executed in CI.
This includes the `create`, `converge`, `verify`, `side_effect` and `destroy` steps.
See [`molecule.yml`](default/molecule.yml) for more details.

View File

@@ -0,0 +1,82 @@
---
dependency:
name: galaxy
driver:
name: vagrant
.platform_presets:
- &control
memory: 2048
cpus: 2
groups:
- k3s_cluster
- master
- &node
memory: 2048
cpus: 2
groups:
- k3s_cluster
- node
- &debian
box: generic/debian11
- &rocky
box: generic/rocky9
- &ubuntu
box: generic/ubuntu2204
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
platforms:
- <<: [*control, *ubuntu]
name: control1
interfaces:
- network_name: private_network
ip: 192.168.30.38
- <<: [*control, *debian]
name: control2
interfaces:
- network_name: private_network
ip: 192.168.30.39
- <<: [*control, *rocky]
name: control3
interfaces:
- network_name: private_network
ip: 192.168.30.40
- <<: [*node, *ubuntu]
name: node1
interfaces:
- network_name: private_network
ip: 192.168.30.41
- <<: [*node, *rocky]
name: node2
interfaces:
- network_name: private_network
ip: 192.168.30.42
provisioner:
name: ansible
playbooks:
converge: ../resources/converge.yml
side_effect: ../resources/reset.yml
verify: ../resources/verify.yml
inventory:
links:
group_vars: ../../inventory/sample/group_vars
scenario:
test_sequence:
- dependency
- lint
- cleanup
- destroy
- syntax
- create
- prepare
- converge
# idempotence is not possible with the playbook in its current form.
- verify
# We are repurposing side_effect here to test the reset playbook.
# This is why we do not run it before verify (which tests the cluster),
# but after the verify step.
- side_effect
- cleanup
- destroy

View File

@@ -0,0 +1,11 @@
---
- name: Apply overrides
hosts: all
tasks:
- name: Override host variables
ansible.builtin.set_fact:
# See: https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant # noqa yaml[line-length]
flannel_iface: eth1
# The test VMs might be a bit slow, so we give them more time to join the cluster:
retry_count: 45

View File

@@ -0,0 +1,22 @@
---
- name: Apply overrides
ansible.builtin.import_playbook: >-
{{ lookup("ansible.builtin.env", "MOLECULE_SCENARIO_DIRECTORY") }}/overrides.yml
- name: Network setup
hosts: all
tasks:
- name: Disable firewalld
when: ansible_distribution == "Rocky"
# Rocky Linux comes with firewalld enabled. It blocks some of the network
# connections needed for our k3s cluster. For our test setup, we just disable
# it since the VM host's firewall is still active for connections to and from
# the Internet.
# When building your own cluster, please DO NOT blindly copy this. Instead,
# please create a custom firewall configuration that fits your network design
# and security needs.
ansible.builtin.systemd:
name: firewalld
enabled: no
state: stopped
become: true

35
molecule/ipv6/README.md Normal file
View File

@@ -0,0 +1,35 @@
# Sample IPv6 configuration for `k3s-ansible`
This scenario contains a cluster configuration which is _IPv6 first_, but still supports dual-stack networking with IPv4 for most things.
This means:
- The API server VIP is an IPv6 address.
- The MetalLB pool consists of both IPv4 and IPv4 addresses.
- Nodes as well as cluster-internal resources (pods and services) are accessible via IPv4 as well as IPv6.
## Network design
All IPv6 addresses used in this scenario share a single `/48` prefix: `fdad:bad:ba55`.
The following subnets are used:
- `fdad:bad:ba55:`**`0`**`::/64` is the subnet which contains the cluster components meant for external access.
That includes:
- The VIP for the Kubernetes API server: `fdad:bad:ba55::333`
- Services load-balanced by MetalLB: `fdad:bad:ba55::1b:0/112`
- Cluster nodes: `fdad:bad:ba55::de:0/112`
- The host executing Vagrant: `fdad:bad:ba55::1`
In a home lab setup, this might be your LAN.
- `fdad:bad:ba55:`**`4200`**`::/56` is used internally by the cluster for pods.
- `fdad:bad:ba55:`**`4300`**`::/108` is used internally by the cluster for services.
IPv4 networking is also available:
- The nodes have addresses inside `192.168.123.0/24`.
MetalLB also has a bit of address space in this range: `192.168.123.80-192.168.123.90`
- For pods and services, the k3s defaults (`10.42.0.0/16` and `10.43.0.0/16)` are used.
Note that the host running Vagrant is not part any of these IPv4 networks.

View File

@@ -0,0 +1,3 @@
---
node_ipv4: 192.168.123.11
node_ipv6: fdad:bad:ba55::de:11

View File

@@ -0,0 +1,3 @@
---
node_ipv4: 192.168.123.21
node_ipv6: fdad:bad:ba55::de:21

View File

@@ -0,0 +1,63 @@
---
dependency:
name: galaxy
driver:
name: vagrant
.platform_presets:
- &control
memory: 2048
cpus: 2
groups:
- k3s_cluster
- master
- &node
memory: 2048
cpus: 2
groups:
- k3s_cluster
- node
- &ubuntu
box: generic/ubuntu2204
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
platforms:
- <<: [*control, *ubuntu]
name: control1
interfaces:
- network_name: private_network
ip: fdad:bad:ba55::de:11
- <<: [*node, *ubuntu]
name: node1
interfaces:
- network_name: private_network
ip: fdad:bad:ba55::de:21
provisioner:
name: ansible
playbooks:
converge: ../resources/converge.yml
side_effect: ../resources/reset.yml
verify: ../resources/verify.yml
inventory:
links:
group_vars: ../../inventory/sample/group_vars
scenario:
test_sequence:
- dependency
- lint
- cleanup
- destroy
- syntax
- create
- prepare
- converge
# idempotence is not possible with the playbook in its current form.
- verify
# We are repurposing side_effect here to test the reset playbook.
# This is why we do not run it before verify (which tests the cluster),
# but after the verify step.
- side_effect
- cleanup
- destroy

View File

@@ -0,0 +1,45 @@
---
- name: Apply overrides
hosts: all
tasks:
- name: Override host variables (1/2)
ansible.builtin.set_fact:
# See: https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant # noqa yaml[line-length]
flannel_iface: eth1
# The test VMs might be a bit slow, so we give them more time to join the cluster:
retry_count: 45
# IPv6 configuration
# ######################################################################
# The API server will be reachable on IPv6 only
apiserver_endpoint: fdad:bad:ba55::333
# We give MetalLB address space for both IPv4 and IPv6
metal_lb_ip_range:
- fdad:bad:ba55::1b:0/112
- 192.168.123.80-192.168.123.90
# k3s_node_ip is by default set to the IPv4 address of flannel_iface.
# We want IPv6 addresses here of course, so we just specify them
# manually below.
k3s_node_ip: "{{ node_ipv4 }},{{ node_ipv6 }}"
- name: Override host variables (2/2)
# Since "extra_args" depends on "k3s_node_ip" and "flannel_iface" we have
# to set this AFTER overriding the both of them.
ansible.builtin.set_fact:
# A few extra server args are necessary:
# - the network policy needs to be disabled.
# - we need to manually specify the subnets for services and pods, as
# the default has IPv4 ranges only.
extra_server_args: >-
{{ extra_args }}
--tls-san {{ apiserver_endpoint }}
{{ '--node-taint node-role.kubernetes.io/master=true:NoSchedule' if k3s_master_taint else '' }}
--disable servicelb
--disable traefik
--disable-network-policy
--cluster-cidr=10.42.0.0/16,fdad:bad:ba55:4200::/56
--service-cidr=10.43.0.0/16,fdad:bad:ba55:4300::/108

51
molecule/ipv6/prepare.yml Normal file
View File

@@ -0,0 +1,51 @@
---
- name: Apply overrides
ansible.builtin.import_playbook: >-
{{ lookup("ansible.builtin.env", "MOLECULE_SCENARIO_DIRECTORY") }}/overrides.yml
- name: Configure dual-stack networking
hosts: all
become: true
# Unfortunately, as of 2022-09, Vagrant does not support the configuration
# of both IPv4 and IPv6 addresses for a single network adapter. So we have
# to configure that ourselves.
# Moreover, we have to explicitly enable IPv6 for the loopback interface.
tasks:
- name: Enable IPv6 for network interfaces
ansible.posix.sysctl:
name: net.ipv6.conf.{{ item }}.disable_ipv6
value: "0"
with_items:
- all
- default
- lo
- name: Disable duplicate address detection
# Duplicate address detection did repeatedly fail within the virtual
# network. But since this setup does not use SLAAC anyway, we can safely
# disable it.
ansible.posix.sysctl:
name: net.ipv6.conf.{{ item }}.accept_dad
value: "0"
with_items:
- "{{ flannel_iface }}"
- name: Write IPv4 configuration
ansible.builtin.template:
src: 55-flannel-ipv4.yaml.j2
dest: /etc/netplan/55-flannel-ipv4.yaml
owner: root
group: root
mode: 0644
register: netplan_template
- name: Apply netplan configuration
# Conceptually, this should be a handler rather than a task.
# However, we are currently not in a role context - creating
# one just for this seemed overkill.
when: netplan_template.changed
ansible.builtin.command:
cmd: netplan apply
changed_when: true

View File

@@ -0,0 +1,8 @@
---
network:
version: 2
renderer: networkd
ethernets:
{{ flannel_iface }}:
addresses:
- {{ node_ipv4 }}/24

View File

@@ -0,0 +1,7 @@
---
- name: Apply overrides
ansible.builtin.import_playbook: >-
{{ lookup("ansible.builtin.env", "MOLECULE_SCENARIO_DIRECTORY") }}/overrides.yml
- name: Converge
ansible.builtin.import_playbook: ../../site.yml

View File

@@ -0,0 +1,7 @@
---
- name: Apply overrides
ansible.builtin.import_playbook: >-
{{ lookup("ansible.builtin.env", "MOLECULE_SCENARIO_DIRECTORY") }}/overrides.yml
- name: Reset
ansible.builtin.import_playbook: ../../reset.yml

View File

@@ -0,0 +1,5 @@
---
- name: Verify
hosts: all
roles:
- verify/from_outside

View File

@@ -0,0 +1,9 @@
---
# A host outside of the cluster from which the checks shall be performed
outside_host: localhost
# This kubernetes namespace will be used for testing
testing_namespace: molecule-verify-from-outside
# The directory in which the example manifests reside
example_manifests_path: ../../../../example

View File

@@ -0,0 +1,5 @@
---
- name: Clean up kubecfg
ansible.builtin.file:
path: "{{ kubecfg.path }}"
state: absent

View File

@@ -0,0 +1,19 @@
---
- name: Create temporary directory for kubecfg
ansible.builtin.tempfile:
state: directory
suffix: kubecfg
register: kubecfg
- name: Gathering facts
delegate_to: "{{ groups['master'][0] }}"
ansible.builtin.gather_facts:
- name: Download kubecfg
ansible.builtin.fetch:
src: "{{ ansible_env.HOME }}/.kube/config"
dest: "{{ kubecfg.path }}/"
flat: true
delegate_to: "{{ groups['master'][0] }}"
delegate_facts: true
- name: Store path to kubecfg
ansible.builtin.set_fact:
kubecfg_path: "{{ kubecfg.path }}/config"

View File

@@ -0,0 +1,14 @@
---
- name: Verify
run_once: true
delegate_to: "{{ outside_host }}"
block:
- name: "Test CASE: Get kube config"
ansible.builtin.import_tasks: kubecfg-fetch.yml
- name: "TEST CASE: Get nodes"
ansible.builtin.include_tasks: test/get-nodes.yml
- name: "TEST CASE: Deploy example"
ansible.builtin.include_tasks: test/deploy-example.yml
always:
- name: "TEST CASE: Cleanup"
ansible.builtin.import_tasks: kubecfg-cleanup.yml

View File

@@ -0,0 +1,58 @@
---
- name: Deploy example
block:
- name: "Create namespace: {{ testing_namespace }}"
kubernetes.core.k8s:
api_version: v1
kind: Namespace
name: "{{ testing_namespace }}"
state: present
wait: true
kubeconfig: "{{ kubecfg_path }}"
- name: Apply example manifests
kubernetes.core.k8s:
src: "{{ example_manifests_path }}/{{ item }}"
namespace: "{{ testing_namespace }}"
state: present
wait: true
kubeconfig: "{{ kubecfg_path }}"
with_items:
- deployment.yml
- service.yml
- name: Get info about nginx service
kubernetes.core.k8s_info:
kind: service
name: nginx
namespace: "{{ testing_namespace }}"
kubeconfig: "{{ kubecfg_path }}"
vars: &load_balancer_metadata
metallb_ip: status.loadBalancer.ingress[0].ip
metallb_port: spec.ports[0].port
register: nginx_services
- name: Assert that the nginx welcome page is available
ansible.builtin.uri:
url: http://{{ ip | ansible.utils.ipwrap }}:{{ port }}/
return_content: yes
register: result
failed_when: "'Welcome to nginx!' not in result.content"
vars:
ip: >-
{{ nginx_services.resources[0].status.loadBalancer.ingress[0].ip }}
port: >-
{{ nginx_services.resources[0].spec.ports[0].port }}
# Deactivated linter rules:
# - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap
# would be undefined. This will not be the case during playbook execution.
# noqa jinja[invalid]
always:
- name: "Remove namespace: {{ testing_namespace }}"
kubernetes.core.k8s:
api_version: v1
kind: Namespace
name: "{{ testing_namespace }}"
state: absent
kubeconfig: "{{ kubecfg_path }}"

View File

@@ -0,0 +1,28 @@
---
- name: Get all nodes in cluster
kubernetes.core.k8s_info:
kind: node
kubeconfig: "{{ kubecfg_path }}"
register: cluster_nodes
- name: Assert that the cluster contains exactly the expected nodes
ansible.builtin.assert:
that: found_nodes == expected_nodes
success_msg: "Found nodes as expected: {{ found_nodes }}"
fail_msg: "Expected nodes {{ expected_nodes }}, but found nodes {{ found_nodes }}"
vars:
found_nodes: >-
{{ cluster_nodes | json_query('resources[*].metadata.name') | unique | sort }}
expected_nodes: |-
{{
(
( groups['master'] | default([]) ) +
( groups['node'] | default([]) )
)
| unique
| sort
}}
# Deactivated linter rules:
# - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap
# would be undefined. This will not be the case during playbook execution.
# noqa jinja[invalid]

View File

@@ -0,0 +1,48 @@
---
dependency:
name: galaxy
driver:
name: vagrant
platforms:
- name: control1
box: generic/ubuntu2204
memory: 4096
cpus: 4
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
groups:
- k3s_cluster
- master
interfaces:
- network_name: private_network
ip: 192.168.30.50
provisioner:
name: ansible
playbooks:
converge: ../resources/converge.yml
side_effect: ../resources/reset.yml
verify: ../resources/verify.yml
inventory:
links:
group_vars: ../../inventory/sample/group_vars
scenario:
test_sequence:
- dependency
- lint
- cleanup
- destroy
- syntax
- create
- prepare
- converge
# idempotence is not possible with the playbook in its current form.
- verify
# We are repurposing side_effect here to test the reset playbook.
# This is why we do not run it before verify (which tests the cluster),
# but after the verify step.
- side_effect
- cleanup
- destroy

View File

@@ -0,0 +1,15 @@
---
- name: Apply overrides
hosts: all
tasks:
- name: Override host variables
ansible.builtin.set_fact:
# See: https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant # noqa yaml[line-length]
flannel_iface: eth1
# The test VMs might be a bit slow, so we give them more time to join the cluster:
retry_count: 45
# Make sure that our IP ranges do not collide with those of the default scenario
apiserver_endpoint: "192.168.30.223"
metal_lb_ip_range: "192.168.30.91-192.168.30.99"

72
requirements.txt Normal file
View File

@@ -0,0 +1,72 @@
ansible-compat==2.2.1
ansible-core==2.13.5
ansible-lint==6.8.4
arrow==1.2.3
attrs==22.1.0
binaryornot==0.4.4
black==22.10.0
bracex==2.3.post1
cachetools==5.2.0
Cerberus==1.3.2
certifi==2022.9.24
cffi==1.15.1
chardet==5.0.0
charset-normalizer==2.1.1
click==8.1.3
click-help-colors==0.9.1
commonmark==0.9.1
cookiecutter==2.1.1
cryptography==38.0.1
distro==1.8.0
enrich==1.2.7
filelock==3.8.0
google-auth==2.13.0
idna==3.4
importlib-resources==5.10.0
Jinja2==3.1.2
jinja2-time==0.2.0
jmespath==1.0.1
jsonpatch==1.32
jsonpointer==2.3
jsonschema==4.16.0
kubernetes==24.2.0
MarkupSafe==2.1.1
molecule==4.0.1
molecule-vagrant==1.0.0
mypy-extensions==0.4.3
netaddr==0.8.0
oauthlib==3.2.2
packaging==21.3
pathspec==0.10.1
pkgutil-resolve-name==1.3.10
platformdirs==2.5.2
pluggy==1.0.0
pre-commit==2.20.0
pyasn1==0.4.8
pyasn1-modules==0.2.8
pycparser==2.21
Pygments==2.13.0
pyparsing==3.0.9
pyrsistent==0.18.1
python-dateutil==2.8.2
python-slugify==6.1.2
python-vagrant==1.0.0
PyYAML==6.0
requests==2.28.1
requests-oauthlib==1.3.1
resolvelib==0.8.1
rich==12.6.0
rsa==4.9
ruamel.yaml==0.17.21
ruamel.yaml.clib==0.2.7
selinux==0.2.1
six==1.16.0
subprocess-tee==0.3.5
text-unidecode==1.3
tomli==2.0.1
typing-extensions==4.4.0
urllib3==1.26.12
wcmatch==8.4.1
websocket-client==1.4.1
yamllint==1.28.0
zipp==3.10.0

View File

@@ -5,3 +5,9 @@
become: yes become: yes
roles: roles:
- role: reset - role: reset
- role: raspberrypi
vars: {state: absent}
post_tasks:
- name: Reboot and wait for node to come back up
reboot:
reboot_timeout: 3600

View File

@@ -2,10 +2,10 @@
ansible_user: root ansible_user: root
server_init_args: >- server_init_args: >-
{% if groups['master'] | length > 1 %} {% if groups['master'] | length > 1 %}
{% if ansible_host == hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0]) %} {% if ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] %}
--cluster-init --cluster-init
{% else %} {% else %}
--server https://{{ hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0]) }}:6443 --server https://{{ hostvars[groups['master'][0]].k3s_node_ip }}:6443
{% endif %} {% endif %}
--token {{ k3s_token }} --token {{ k3s_token }}
{% endif %} {% endif %}

View File

@@ -0,0 +1,28 @@
---
# Download logs of k3s-init.service from the nodes to localhost.
# Note that log_destination must be set.
- name: Fetch k3s-init.service logs
ansible.builtin.command:
cmd: journalctl --all --unit=k3s-init.service
changed_when: false
register: k3s_init_log
- name: Create {{ log_destination }}
delegate_to: localhost
run_once: true
become: false
ansible.builtin.file:
path: "{{ log_destination }}"
state: directory
mode: "0755"
- name: Store logs to {{ log_destination }}
delegate_to: localhost
become: false
ansible.builtin.template:
src: content.j2
dest: "{{ log_destination }}/k3s-init@{{ ansible_hostname }}.log"
mode: 0644
vars:
content: "{{ k3s_init_log.stdout }}"

View File

@@ -20,16 +20,16 @@
owner: root owner: root
group: root group: root
mode: 0644 mode: 0644
when: ansible_host == hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0]) when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
- name: Copy vip rbac manifest to first master - name: Copy vip rbac manifest to first master
template: template:
src: "vip.rbac.yaml.j2" src: "vip.rbac.yaml.j2"
dest: "/var/lib/rancher/k3s/server/manifests/vip.rbac.yaml" dest: "/var/lib/rancher/k3s/server/manifests/vip-rbac.yaml"
owner: root owner: root
group: root group: root
mode: 0644 mode: 0644
when: ansible_host == hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0]) when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
- name: Copy vip manifest to first master - name: Copy vip manifest to first master
template: template:
@@ -38,34 +38,26 @@
owner: root owner: root
group: root group: root
mode: 0644 mode: 0644
when: ansible_host == hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0]) when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
- name: Copy metallb namespace manifest to first master # these will be copied and installed now, then tested later and apply config
- name: Copy metallb namespace to first master
template: template:
src: "metallb.namespace.j2" src: "metallb.namespace.j2"
dest: "/var/lib/rancher/k3s/server/manifests/metallb.namespace.yaml" dest: "/var/lib/rancher/k3s/server/manifests/metallb-namespace.yaml"
owner: root owner: root
group: root group: root
mode: 0644 mode: 0644
when: ansible_host == hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0]) when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
- name: Copy metallb ConfigMap manifest to first master - name: Copy metallb namespace to first master
template: template:
src: "metallb.configmap.j2" src: "metallb.crds.j2"
dest: "/var/lib/rancher/k3s/server/manifests/metallb.configmap.yaml" dest: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml"
owner: root owner: root
group: root group: root
mode: 0644 mode: 0644
when: ansible_host == hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0]) when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
- name: Copy metallb main manifest to first master
template:
src: "metallb.yaml.j2"
dest: "/var/lib/rancher/k3s/server/manifests/metallb.yaml"
owner: root
group: root
mode: 0644
when: ansible_host == hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0])
- name: Init cluster inside the transient k3s-init service - name: Init cluster inside the transient k3s-init service
command: command:
@@ -88,11 +80,18 @@
delay: 10 delay: 10
changed_when: false changed_when: false
always: always:
- name: Save logs of k3s-init.service
include_tasks: fetch_k3s_init_logs.yml
when: log_destination
vars:
log_destination: >-
{{ lookup('ansible.builtin.env', 'ANSIBLE_K3S_LOG_DIR', default=False) }}
- name: Kill the temporary service used for initialization - name: Kill the temporary service used for initialization
systemd: systemd:
name: k3s-init name: k3s-init
state: stopped state: stopped
failed_when: false failed_when: false
when: not ansible_check_mode
- name: Copy K3s service file - name: Copy K3s service file
register: k3s_service register: k3s_service
@@ -153,12 +152,19 @@
owner: "{{ ansible_user }}" owner: "{{ ansible_user }}"
mode: "u=rw,g=,o=" mode: "u=rw,g=,o="
- name: Configure kubectl cluster to https://{{ apiserver_endpoint }}:6443 - name: Configure kubectl cluster to {{ endpoint_url }}
command: >- command: >-
k3s kubectl config set-cluster default k3s kubectl config set-cluster default
--server=https://{{ apiserver_endpoint }}:6443 --server={{ endpoint_url }}
--kubeconfig ~{{ ansible_user }}/.kube/config --kubeconfig ~{{ ansible_user }}/.kube/config
changed_when: true changed_when: true
vars:
endpoint_url: >-
https://{{ apiserver_endpoint | ansible.utils.ipwrap }}:6443
# Deactivated linter rules:
# - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap
# would be undefined. This will not be the case during playbook execution.
# noqa jinja[invalid]
- name: Create kubectl symlink - name: Create kubectl symlink
file: file:
@@ -171,3 +177,25 @@
src: /usr/local/bin/k3s src: /usr/local/bin/k3s
dest: /usr/local/bin/crictl dest: /usr/local/bin/crictl
state: link state: link
- name: Get contents of manifests folder
find:
paths: /var/lib/rancher/k3s/server/manifests
file_type: file
register: k3s_server_manifests
- name: Get sub dirs of manifests folder
find:
paths: /var/lib/rancher/k3s/server/manifests
file_type: directory
register: k3s_server_manifests_directories
- name: Remove manifests and folders that are only needed for bootstrapping cluster so k3s doesn't auto apply on start
file:
path: "{{ item.path }}"
state: absent
with_items:
- "{{ k3s_server_manifests.files }}"
- "{{ k3s_server_manifests_directories.files }}"
loop_control:
label: "{{ item.path }}"

View File

@@ -0,0 +1,5 @@
{#
This is a really simple template that just outputs the
value of the "content" variable.
#}
{{ content }}

View File

@@ -1,13 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
namespace: metallb-system
name: config
data:
config: |
address-pools:
- name: default
protocol: layer2
addresses:
- {{ metal_lb_ip_range }}

File diff suppressed because it is too large Load Diff

View File

@@ -4,4 +4,3 @@ metadata:
name: metallb-system name: metallb-system
labels: labels:
app: metallb app: metallb

View File

@@ -1,481 +0,0 @@
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
labels:
app: metallb
name: controller
spec:
allowPrivilegeEscalation: false
allowedCapabilities: []
allowedHostPaths: []
defaultAddCapabilities: []
defaultAllowPrivilegeEscalation: false
fsGroup:
ranges:
- max: 65535
min: 1
rule: MustRunAs
hostIPC: false
hostNetwork: false
hostPID: false
privileged: false
readOnlyRootFilesystem: true
requiredDropCapabilities:
- ALL
runAsUser:
ranges:
- max: 65535
min: 1
rule: MustRunAs
seLinux:
rule: RunAsAny
supplementalGroups:
ranges:
- max: 65535
min: 1
rule: MustRunAs
volumes:
- configMap
- secret
- emptyDir
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
labels:
app: metallb
name: speaker
spec:
allowPrivilegeEscalation: false
allowedCapabilities:
- NET_RAW
allowedHostPaths: []
defaultAddCapabilities: []
defaultAllowPrivilegeEscalation: false
fsGroup:
rule: RunAsAny
hostIPC: false
hostNetwork: true
hostPID: false
hostPorts:
- max: 7472
min: 7472
- max: 7946
min: 7946
privileged: true
readOnlyRootFilesystem: true
requiredDropCapabilities:
- ALL
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- configMap
- secret
- emptyDir
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: metallb
name: controller
namespace: metallb-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: metallb
name: speaker
namespace: metallb-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app: metallb
name: metallb-system:controller
rules:
- apiGroups:
- ''
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- services/status
verbs:
- update
- apiGroups:
- ''
resources:
- events
verbs:
- create
- patch
- apiGroups:
- policy
resourceNames:
- controller
resources:
- podsecuritypolicies
verbs:
- use
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app: metallb
name: metallb-system:speaker
rules:
- apiGroups:
- ''
resources:
- services
- endpoints
- nodes
verbs:
- get
- list
- watch
- apiGroups: ["discovery.k8s.io"]
resources:
- endpointslices
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- events
verbs:
- create
- patch
- apiGroups:
- policy
resourceNames:
- speaker
resources:
- podsecuritypolicies
verbs:
- use
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app: metallb
name: config-watcher
namespace: metallb-system
rules:
- apiGroups:
- ''
resources:
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app: metallb
name: pod-lister
namespace: metallb-system
rules:
- apiGroups:
- ''
resources:
- pods
verbs:
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app: metallb
name: controller
namespace: metallb-system
rules:
- apiGroups:
- ''
resources:
- secrets
verbs:
- create
- apiGroups:
- ''
resources:
- secrets
resourceNames:
- memberlist
verbs:
- list
- apiGroups:
- apps
resources:
- deployments
resourceNames:
- controller
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app: metallb
name: metallb-system:controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: metallb-system:controller
subjects:
- kind: ServiceAccount
name: controller
namespace: metallb-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app: metallb
name: metallb-system:speaker
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: metallb-system:speaker
subjects:
- kind: ServiceAccount
name: speaker
namespace: metallb-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app: metallb
name: config-watcher
namespace: metallb-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: config-watcher
subjects:
- kind: ServiceAccount
name: controller
- kind: ServiceAccount
name: speaker
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app: metallb
name: pod-lister
namespace: metallb-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: pod-lister
subjects:
- kind: ServiceAccount
name: speaker
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app: metallb
name: controller
namespace: metallb-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: controller
subjects:
- kind: ServiceAccount
name: controller
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
app: metallb
component: speaker
name: speaker
namespace: metallb-system
spec:
selector:
matchLabels:
app: metallb
component: speaker
template:
metadata:
annotations:
prometheus.io/port: '7472'
prometheus.io/scrape: 'true'
labels:
app: metallb
component: speaker
spec:
containers:
- args:
- --port=7472
- --config=config
- --log-level=info
env:
- name: METALLB_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: METALLB_HOST
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: METALLB_ML_BIND_ADDR
valueFrom:
fieldRef:
fieldPath: status.podIP
# needed when another software is also using memberlist / port 7946
# when changing this default you also need to update the container ports definition
# and the PodSecurityPolicy hostPorts definition
#- name: METALLB_ML_BIND_PORT
# value: "7946"
- name: METALLB_ML_LABELS
value: "app=metallb,component=speaker"
- name: METALLB_ML_SECRET_KEY
valueFrom:
secretKeyRef:
name: memberlist
key: secretkey
image: quay.io/metallb/speaker:{{ metal_lb_speaker_tag_version }}
name: speaker
ports:
- containerPort: 7472
name: monitoring
- containerPort: 7946
name: memberlist-tcp
- containerPort: 7946
name: memberlist-udp
protocol: UDP
livenessProbe:
httpGet:
path: /metrics
port: monitoring
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /metrics
port: monitoring
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_RAW
drop:
- ALL
readOnlyRootFilesystem: true
hostNetwork: true
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: speaker
terminationGracePeriodSeconds: 2
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Exists
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: metallb
component: controller
name: controller
namespace: metallb-system
spec:
revisionHistoryLimit: 3
selector:
matchLabels:
app: metallb
component: controller
template:
metadata:
annotations:
prometheus.io/port: '7472'
prometheus.io/scrape: 'true'
labels:
app: metallb
component: controller
spec:
containers:
- args:
- --port=7472
- --config=config
- --log-level=info
env:
- name: METALLB_ML_SECRET_NAME
value: memberlist
- name: METALLB_DEPLOYMENT
value: controller
image: quay.io/metallb/controller:{{ metal_lb_controller_tag_version }}
name: controller
ports:
- containerPort: 7472
name: monitoring
livenessProbe:
httpGet:
path: /metrics
port: monitoring
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /metrics
port: monitoring
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- all
readOnlyRootFilesystem: true
nodeSelector:
kubernetes.io/os: linux
securityContext:
runAsNonRoot: true
runAsUser: 65534
fsGroup: 65534
serviceAccountName: controller
terminationGracePeriodSeconds: 0

View File

@@ -12,7 +12,7 @@ metadata:
name: system:kube-vip-role name: system:kube-vip-role
rules: rules:
- apiGroups: [""] - apiGroups: [""]
resources: ["services", "services/status", "nodes"] resources: ["services", "services/status", "nodes", "endpoints"]
verbs: ["list","get","watch", "update"] verbs: ["list","get","watch", "update"]
- apiGroups: ["coordination.k8s.io"] - apiGroups: ["coordination.k8s.io"]
resources: ["leases"] resources: ["leases"]
@@ -30,4 +30,3 @@ subjects:
- kind: ServiceAccount - kind: ServiceAccount
name: kube-vip name: kube-vip
namespace: kube-system namespace: kube-system

View File

@@ -1,7 +1,6 @@
apiVersion: apps/v1 apiVersion: apps/v1
kind: DaemonSet kind: DaemonSet
metadata: metadata:
creationTimestamp: null
name: kube-vip-ds name: kube-vip-ds
namespace: kube-system namespace: kube-system
spec: spec:
@@ -10,7 +9,6 @@ spec:
name: kube-vip-ds name: kube-vip-ds
template: template:
metadata: metadata:
creationTimestamp: null
labels: labels:
name: kube-vip-ds name: kube-vip-ds
spec: spec:
@@ -35,7 +33,7 @@ spec:
- name: vip_interface - name: vip_interface
value: {{ flannel_iface }} value: {{ flannel_iface }}
- name: vip_cidr - name: vip_cidr
value: "32" value: "{{ apiserver_endpoint | ansible.utils.ipsubnet | ansible.utils.ipaddr('prefix') }}"
- name: cp_enable - name: cp_enable
value: "true" value: "true"
- name: cp_namespace - name: cp_namespace

View File

@@ -7,7 +7,7 @@ After=network-online.target
Type=notify Type=notify
ExecStartPre=-/sbin/modprobe br_netfilter ExecStartPre=-/sbin/modprobe br_netfilter
ExecStartPre=-/sbin/modprobe overlay ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/local/bin/k3s agent --server https://{{ apiserver_endpoint }}:6443 --token {{ hostvars[groups['master'][0]]['token'] | default(k3s_token) }} {{ extra_agent_args | default("") }} ExecStart=/usr/local/bin/k3s agent --server https://{{ apiserver_endpoint | ansible.utils.ipwrap }}:6443 --token {{ hostvars[groups['master'][0]]['token'] | default(k3s_token) }} {{ extra_agent_args | default("") }}
KillMode=process KillMode=process
Delegate=yes Delegate=yes
# Having non-zero Limit*s causes performance problems due to accounting overhead # Having non-zero Limit*s causes performance problems due to accounting overhead

View File

@@ -0,0 +1,3 @@
---
# Timeout to wait for MetalLB services to come up
metal_lb_available_timeout: 120s

View File

@@ -0,0 +1,94 @@
---
- name: Create manifests directory for temp configuration
file:
path: /tmp/k3s
state: directory
owner: "{{ ansible_user }}"
mode: 0755
with_items: "{{ groups['master'] }}"
run_once: true
- name: Copy metallb CRs manifest to first master
template:
src: "metallb.crs.j2"
dest: "/tmp/k3s/metallb-crs.yaml"
owner: "{{ ansible_user }}"
mode: 0755
with_items: "{{ groups['master'] }}"
run_once: true
- name: Test metallb-system namespace
command: >-
k3s kubectl -n metallb-system
changed_when: false
with_items: "{{ groups['master'] }}"
run_once: true
- name: Wait for MetalLB resources
command: >-
k3s kubectl wait {{ item.resource }}
--namespace='metallb-system'
{% if item.name | default(False) -%}{{ item.name }}{%- endif %}
{% if item.selector | default(False) -%}--selector='{{ item.selector }}'{%- endif %}
{% if item.condition | default(False) -%}{{ item.condition }}{%- endif %}
--timeout='{{ metal_lb_available_timeout }}'
changed_when: false
run_once: true
with_items:
- description: controller
resource: deployment
name: controller
condition: --for condition=Available=True
- description: webhook service
resource: pod
selector: component=controller
condition: --for=jsonpath='{.status.phase}'=Running
- description: pods in replica sets
resource: pod
selector: component=controller,app=metallb
condition: --for condition=Ready
- description: ready replicas of controller
resource: replicaset
selector: component=controller,app=metallb
condition: --for=jsonpath='{.status.readyReplicas}'=1
- description: fully labeled replicas of controller
resource: replicaset
selector: component=controller,app=metallb
condition: --for=jsonpath='{.status.fullyLabeledReplicas}'=1
- description: available replicas of controller
resource: replicaset
selector: component=controller,app=metallb
condition: --for=jsonpath='{.status.availableReplicas}'=1
loop_control:
label: "{{ item.description }}"
- name: Test metallb-system webhook-service endpoint
command: >-
k3s kubectl -n metallb-system get endpoints webhook-service
changed_when: false
with_items: "{{ groups['master'] }}"
run_once: true
- name: Apply metallb CRs
command: >-
k3s kubectl apply -f /tmp/k3s/metallb-crs.yaml
--timeout='{{ metal_lb_available_timeout }}'
register: this
changed_when: false
run_once: true
until: this.rc == 0
retries: 5
- name: Test metallb-system resources
command: >-
k3s kubectl -n metallb-system get {{ item }}
changed_when: false
run_once: true
with_items:
- IPAddressPool
- L2Advertisement
- name: Remove tmp directory used for manifests
file:
path: /tmp/k3s
state: absent

View File

@@ -0,0 +1,21 @@
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: first-pool
namespace: metallb-system
spec:
addresses:
{% if metal_lb_ip_range is string %}
{# metal_lb_ip_range was used in the legacy way: single string instead of a list #}
{# => transform to list with single element #}
{% set metal_lb_ip_range = [metal_lb_ip_range] %}
{% endif %}
{% for range in metal_lb_ip_range %}
- {{ range }}
{% endfor %}
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: default
namespace: metallb-system

View File

@@ -23,6 +23,13 @@
state: present state: present
reload: yes reload: yes
- name: Enable IPv6 router advertisements
sysctl:
name: net.ipv6.conf.all.accept_ra
value: "2"
state: present
reload: yes
- name: Add br_netfilter to /etc/modules-load.d/ - name: Add br_netfilter to /etc/modules-load.d/
copy: copy:
content: "br_netfilter" content: "br_netfilter"

View File

@@ -0,0 +1,6 @@
---
# Indicates whether the k3s prerequisites for Raspberry Pi should be set up
# Possible values:
# - present
# - absent
state: present

View File

@@ -1,3 +1,3 @@
--- ---
- name: reboot - name: Reboot
reboot: reboot:

View File

@@ -47,13 +47,20 @@
- raspberry_pi|default(false) - raspberry_pi|default(false)
- ansible_facts.lsb.description|default("") is match("Debian.*bullseye") - ansible_facts.lsb.description|default("") is match("Debian.*bullseye")
- name: execute OS related tasks on the Raspberry Pi - name: execute OS related tasks on the Raspberry Pi - {{ action }}
include_tasks: "{{ item }}" include_tasks: "{{ item }}"
with_first_found: with_first_found:
- "prereq/{{ detected_distribution }}-{{ detected_distribution_major_version }}.yml" - "{{ action }}/{{ detected_distribution }}-{{ detected_distribution_major_version }}.yml"
- "prereq/{{ detected_distribution }}.yml" - "{{ action }}/{{ detected_distribution }}.yml"
- "prereq/{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml" - "{{ action }}/{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml"
- "prereq/{{ ansible_distribution }}.yml" - "{{ action }}/{{ ansible_distribution }}.yml"
- "prereq/default.yml" - "{{ action }}/default.yml"
vars:
action: >-
{% if state == "present" -%}
setup
{%- else -%}
teardown
{%- endif %}
when: when:
- raspberry_pi|default(false) - raspberry_pi|default(false)

View File

@@ -13,7 +13,6 @@
- name: Flush iptables before changing to iptables-legacy - name: Flush iptables before changing to iptables-legacy
iptables: iptables:
flush: true flush: true
changed_when: false # iptables flush always returns changed
- name: Changing to iptables-legacy - name: Changing to iptables-legacy
alternatives: alternatives:

View File

@@ -1,8 +1,9 @@
--- ---
- name: Enable cgroup via boot commandline if not already enabled for Centos - name: Enable cgroup via boot commandline if not already enabled for Rocky
lineinfile: lineinfile:
path: /boot/cmdline.txt path: /boot/cmdline.txt
backrefs: yes backrefs: yes
regexp: '^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$' regexp: '^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$'
line: '\1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory' line: '\1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory'
notify: reboot notify: reboot
when: not ansible_check_mode

View File

@@ -6,3 +6,8 @@
regexp: '^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$' regexp: '^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$'
line: '\1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory' line: '\1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory'
notify: reboot notify: reboot
- name: Install linux-modules-extra-raspi
apt:
name: linux-modules-extra-raspi
state: present

View File

@@ -0,0 +1 @@
---

View File

@@ -0,0 +1 @@
---

View File

@@ -0,0 +1,5 @@
---
- name: Remove linux-modules-extra-raspi
apt:
name: linux-modules-extra-raspi
state: absent

View File

@@ -0,0 +1 @@
---

View File

@@ -10,7 +10,7 @@
- k3s-node - k3s-node
- k3s-init - k3s-init
- name: pkill -9 -f "k3s/data/[^/]+/bin/containerd-shim-runc" - name: RUN pkill -9 -f "k3s/data/[^/]+/bin/containerd-shim-runc"
register: pkill_containerd_shim_runc register: pkill_containerd_shim_runc
command: pkill -9 -f "k3s/data/[^/]+/bin/containerd-shim-runc" command: pkill -9 -f "k3s/data/[^/]+/bin/containerd-shim-runc"
changed_when: "pkill_containerd_shim_runc.rc == 0" changed_when: "pkill_containerd_shim_runc.rc == 0"
@@ -44,13 +44,13 @@
- /var/lib/kubelet - /var/lib/kubelet
- /var/lib/rancher/k3s - /var/lib/rancher/k3s
- /var/lib/rancher/ - /var/lib/rancher/
- /usr/local/bin/k3s
- /var/lib/cni/ - /var/lib/cni/
- name: daemon_reload - name: Reload daemon_reload
systemd: systemd:
daemon_reload: yes daemon_reload: yes
- name: Reboot and wait for node to come back up - name: Remove tmp directory used for manifests
reboot: file:
reboot_timeout: 3600 path: /tmp/k3s
state: absent

View File

@@ -17,3 +17,8 @@
become: yes become: yes
roles: roles:
- role: k3s/node - role: k3s/node
- hosts: master
become: yes
roles:
- role: k3s/post

79
vagrant/Vagrantfile vendored
View File

@@ -1,79 +0,0 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
Vagrant.configure("2") do |config|
# General configuration
config.vm.box = "generic/ubuntu2110"
config.vm.synced_folder ".", "/vagrant", disabled: true
config.ssh.insert_key = false
config.vm.provider :virtualbox do |v|
v.memory = 4096
v.cpus = 2
v.linked_clone = true
end
# Control Node 1
config.vm.define "control1" do |control1|
control1.vm.hostname = "control1"
control1.vm.network "private_network", ip: "192.168.30.38"
end
# Control Node 2
config.vm.define "control2" do |control2|
control2.vm.hostname = "control2"
control2.vm.network "private_network", ip: "192.168.30.39"
end
# Control Node 3
config.vm.define "control3" do |control3|
control3.vm.hostname = "control3"
control3.vm.network "private_network", ip: "192.168.30.40"
end
# Worker Node 1
config.vm.define "node1" do |node1|
node1.vm.hostname = "node1"
node1.vm.network "private_network", ip: "192.168.30.41"
end
# Worker Node 2
config.vm.define "node2" do |node2|
node2.vm.hostname = "node2"
node2.vm.network "private_network", ip: "192.168.30.42"
end
config.vm.provision "ansible",type: "ansible", run: "never" do |ansible|
ansible.playbook = "../site.yml"
ansible.limit = "all"
ansible.groups = {
"master" => ["control1", "control2", "control3"],
"node" => ["node1", "node2"],
"k3s_cluster:children" => ["master", "node"],
"k3s_cluster:vars" => {"k3s_version" => "v1.23.4+k3s1",
"ansible_user" => "vagrant",
"systemd_dir" => "/etc/systemd/system",
"flannel_iface" => "eth1",
"apiserver_endpoint" => "192.168.30.222",
"k3s_token" => "supersecret",
"extra_server_args" => "--node-ip={{ ansible_eth1.ipv4.address }} --flannel-iface={{ flannel_iface }} --no-deploy servicelb --no-deploy traefik",
"extra_agent_args" => "--flannel-iface={{ flannel_iface }}",
"kube_vip_tag_version" => "v0.4.2",
"metal_lb_speaker_tag_version" => "v0.12.1",
"metal_lb_controller_tag_version" => "v0.12.1",
"metal_lb_ip_range" => "192.168.30.80-192.168.30.90",
"retry_count" => "30"}
}
ansible.host_vars = {
"control1" => {
"server_init_args" => "--cluster-init --token {{ k3s_token }} {{ extra_server_args | default('') }}"
},
"control2" => {
"server_init_args" => "--server https://192.168.30.38:6443 --token {{ k3s_token }} {{ extra_server_args | default('') }}"
},
"control3" => {
"server_init_args" => "--server https://192.168.30.38:6443 --token {{ k3s_token }} {{ extra_server_args | default('') }}"
}
}
end
end