Compare commits

..

57 Commits

Author SHA1 Message Date
Timothy Stewart
2ae9ee82f0 fix(ci): pin + cache 2022-11-02 21:27:35 -05:00
Timothy Stewart
5380f93b64 fix(ci): pin + cache 2022-11-02 21:20:33 -05:00
Timothy Stewart
a414453ad4 fix(ci): pin + cache 2022-11-02 21:11:31 -05:00
Timothy Stewart
23c5d9ec89 fix(ci): pin + cache 2022-11-02 21:00:37 -05:00
Timothy Stewart
6b339e1985 fix(ci): pin + cache 2022-11-02 20:55:40 -05:00
Timothy Stewart
a031807660 fix(ci): pin + cache 2022-11-02 20:15:03 -05:00
Timothy Stewart
7dd305aabc fix(ci): pin + cache 2022-11-02 20:00:27 -05:00
Timothy Stewart
500931e2fd fix(ci): pin + cache 2022-11-02 19:55:06 -05:00
Timothy Stewart
cf357cf164 fix(ci): pin + cache 2022-11-02 19:49:32 -05:00
Timothy Stewart
215e0d10ed fix(ci): pin + cache 2022-11-02 19:49:03 -05:00
Timothy Stewart
c6ed680dc1 fix(ci): pin + cache 2022-11-02 19:41:30 -05:00
Timothy Stewart
8343a6199e fix(ci): pin + cache 2022-11-02 19:40:11 -05:00
Timothy Stewart
b524f97552 fix(ci): pin + cache 2022-11-02 19:38:39 -05:00
Timothy Stewart
f741040e44 fix(ci): pin + cache 2022-11-02 19:32:26 -05:00
Timothy Stewart
09bc628ba6 fix(ci): pin + cache 2022-11-01 22:55:42 -05:00
Timothy Stewart
71ff6b86cd fix(ci): pin + cache 2022-11-01 22:43:56 -05:00
Timothy Stewart
23729ddbbe fix(ci): pin + cache 2022-11-01 22:35:39 -05:00
Timothy Stewart
e254c407f0 fix(ci): pin + cache 2022-11-01 22:18:39 -05:00
Timothy Stewart
713b4694e1 fix(ci): pin + cache 2022-11-01 22:05:37 -05:00
Timothy Stewart
952d513124 fix(ci): pin + cache 2022-11-01 21:45:47 -05:00
Timothy Stewart
dd1e596332 fix(ci): pin + cache 2022-11-01 21:43:00 -05:00
Timothy Stewart
6af47f96d0 fix(ci): pin + cache 2022-11-01 21:24:03 -05:00
Timothy Stewart
664deec6c3 fix(ci): pin + cache 2022-11-01 21:06:21 -05:00
Timothy Stewart
646459e7f5 fix(ci): pin + cache 2022-11-01 21:05:57 -05:00
Timothy Stewart
64242d9729 fix(ci): pin + cache 2022-11-01 21:05:11 -05:00
Timothy Stewart
f4864ddb64 fix(ci): pin + cache 2022-11-01 21:04:23 -05:00
Timothy Stewart
6a83cde0c6 fix(ci): pin + cache 2022-11-01 21:03:27 -05:00
Timothy Stewart
77ac928c0d fix(ci): pin + cache 2022-11-01 21:01:47 -05:00
Timothy Stewart
8300a7aaac fix(ci): pin + cache 2022-11-01 21:01:14 -05:00
Timothy Stewart
bdc6af5f46 fix(ci): pin + cache 2022-11-01 20:47:50 -05:00
Timothy Stewart
dc8276157a fix(ci): pin + cache 2022-11-01 20:37:23 -05:00
Timothy Stewart
37f0cb11d2 fix(ci): pin + cache 2022-11-01 20:35:46 -05:00
Timothy Stewart
68e7c77b22 fix(ci): pin + cache 2022-11-01 20:26:13 -05:00
Timothy Stewart
d82c4feac8 feat(gh-actions-controller): added 2022-11-01 20:22:07 -05:00
Timothy Stewart
9217d8607b feat(gh-actions-controller): added 2022-11-01 20:19:00 -05:00
Timothy Stewart
fbc15aa1a1 fix(ci): pin + cache 2022-11-01 20:15:03 -05:00
Timothy Stewart
b55ec046ad fix(ci): pin + cache 2022-11-01 20:07:15 -05:00
Timothy Stewart
b3cc178045 fix(ci): pin + cache 2022-11-01 19:59:22 -05:00
Timothy Stewart
13be424187 fix(ci): pin + cache 2022-11-01 19:55:33 -05:00
Timothy Stewart
d9cecd5364 fix(ci): pin + cache 2022-11-01 19:51:32 -05:00
Timothy Stewart
afb96dbee2 fix(ci): pin + cache 2022-11-01 19:48:31 -05:00
Timothy Stewart
30ffc69192 fix(ci): pin + cache 2022-11-01 19:41:44 -05:00
Timothy Stewart
94e385c28e fix(ci): pin + cache 2022-11-01 19:40:28 -05:00
Timothy Stewart
dbb2cda17a fix(ci): pin + cache 2022-10-31 22:10:31 -05:00
Timothy Stewart
d24cdb97db feat(gh-actions-controller): added 2022-10-31 22:09:33 -05:00
Timothy Stewart
5bebec930b feat(gh-actions-controller): added 2022-10-31 22:02:16 -05:00
Timothy Stewart
ac52acdec1 feat(gh-actions-controller): added 2022-10-31 22:01:39 -05:00
Timothy Stewart
105b2c2f1e fix(ci): pin + cache 2022-10-31 21:55:51 -05:00
Timothy Stewart
d20f485fca fix(ci): pin + cache 2022-10-31 21:47:33 -05:00
Timothy Stewart
f9bb9dabae fix(ci): pin + cache 2022-10-31 21:45:11 -05:00
Timothy Stewart
6f15ef260e fix(ci): pin + cache 2022-10-31 21:40:25 -05:00
Timothy Stewart
de1966fe02 fix(ci): pin + cache 2022-10-31 21:33:47 -05:00
Timothy Stewart
fc823122d8 fix(script): convert to linux 2022-10-31 21:29:24 -05:00
Techno Tim
2f8d94bb5e Merge branch 'master' into self-hosted-runners 2022-10-31 18:52:22 -05:00
Timothy Stewart
9c3814ce72 feat(gh-actions-controller): added 2022-10-30 22:45:59 -05:00
Timothy Stewart
0e60f4643b feat(gh-actions-controller): added 2022-10-30 22:44:13 -05:00
Timothy Stewart
bb20514a6a feat(ci): switching to self-hosted runners 2022-10-30 20:46:14 -05:00
25 changed files with 289 additions and 487 deletions

View File

@@ -11,5 +11,5 @@
- [ ] Ran `site.yml` playbook - [ ] Ran `site.yml` playbook
- [ ] Ran `reset.yml` playbook - [ ] Ran `reset.yml` playbook
- [ ] Did not add any unnecessary changes - [ ] Did not add any unnecessary changes
- [ ] Ran pre-commit install at least once before committing
- [ ] 🚀 - [ ] 🚀
- [ ] Ran pre-commit install at least once before committing

View File

@@ -5,10 +5,15 @@
# already present on the system. # already present on the system.
set -euo pipefail set -euo pipefail
YQ_VERSION=v4.29.2
YQ_BINARY=yq_linux_amd64
GIT_ROOT=$(git rev-parse --show-toplevel) GIT_ROOT=$(git rev-parse --show-toplevel)
PROVIDER=virtualbox PROVIDER=virtualbox
# get yq used for filtering
sudo wget https://github.com/mikefarah/yq/releases/download/${YQ_VERSION}/${YQ_BINARY} -O /usr/bin/yq &&\
sudo chmod +x /usr/bin/yq
# Read all boxes for all platforms from the "molecule.yml" files # Read all boxes for all platforms from the "molecule.yml" files
all_boxes=$(cat "${GIT_ROOT}"/molecule/*/molecule.yml | all_boxes=$(cat "${GIT_ROOT}"/molecule/*/molecule.yml |
yq -r '.platforms[].box' | # Read the "box" property of each node under "platforms" yq -r '.platforms[].box' | # Read the "box" property of each node under "platforms"

View File

@@ -1,15 +0,0 @@
---
name: "CI"
on:
pull_request:
push:
branches:
- master
paths-ignore:
- '**/README.md'
jobs:
lint:
uses: ./.github/workflows/lint.yml
test:
uses: ./.github/workflows/test.yml
needs: [lint]

View File

@@ -1,22 +1,27 @@
--- ---
name: Linting name: Linting
on: on:
workflow_call: pull_request:
push:
branches:
- master
paths-ignore:
- '**/README.md'
jobs: jobs:
pre-commit-ci: pre-commit-ci:
name: Pre-Commit name: Pre-Commit
runs-on: ubuntu-latest runs-on: self-hosted
env: env:
PYTHON_VERSION: "3.10" PYTHON_VERSION: "3.10"
steps: steps:
- name: Check out the codebase - name: Check out the codebase
uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v3 2.5.0 uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # 3.0.2
with: with:
ref: ${{ github.event.pull_request.head.sha }} ref: ${{ github.event.pull_request.head.sha }}
- name: Set up Python ${{ env.PYTHON_VERSION }} - name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@75f3110429a8c05be0e1bf360334e4cced2b63fa # 2.3.3 uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984 # 4.3.0
with: with:
python-version: ${{ env.PYTHON_VERSION }} python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies cache: 'pip' # caching pip dependencies
@@ -56,12 +61,12 @@ jobs:
ensure-pinned-actions: ensure-pinned-actions:
name: Ensure SHA Pinned Actions name: Ensure SHA Pinned Actions
runs-on: ubuntu-latest runs-on: self-hosted
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v3 2.5.0 uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # 3.0.2
- name: Ensure SHA pinned actions - name: Ensure SHA pinned actions
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@af2eb3226618e2494e3d9084f515ad6dcf16e229 # 2.0.1 uses: zgosalvez/github-actions-ensure-sha-pinned-actions@6ca5574367befbc9efdb2fa25978084159c5902d # 1.3.0
with: with:
allowlist: | allowlist: |
aws-actions/ aws-actions/

View File

@@ -1,11 +1,17 @@
--- ---
name: Test name: Test
on: on:
workflow_call: pull_request:
push:
branches:
- master
paths-ignore:
- '**/README.md'
jobs: jobs:
molecule: molecule:
name: Molecule name: Molecule
runs-on: macos-12 runs-on: self-hosted
strategy: strategy:
matrix: matrix:
scenario: scenario:
@@ -15,13 +21,40 @@ jobs:
fail-fast: false fail-fast: false
env: env:
PYTHON_VERSION: "3.10" PYTHON_VERSION: "3.10"
VAGRANT_DEFAULT_PROVIDER: virtualbox
steps: steps:
- name: Check out the codebase - name: Check out the codebase
uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v3 2.5.0 uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # 3.0.2
with: with:
ref: ${{ github.event.pull_request.head.sha }} ref: ${{ github.event.pull_request.head.sha }}
- name: Install Virtual Box from Oracle
run: |
echo "::group::Virtual Box"
wget -O- https://www.virtualbox.org/download/oracle_vbox_2016.asc | sudo gpg --dearmor --yes --output /usr/share/keyrings/oracle-virtualbox-2016.gpg
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/oracle-virtualbox-2016.gpg] https://download.virtualbox.org/virtualbox/debian $(lsb_release -cs) contrib" | sudo tee -a /etc/apt/sources.list.d/virtualbox.list
sudo apt update && sudo apt install -y linux-headers-generic linux-headers-5.15.0-52-generic build-essential dkms virtualbox-dkms virtualbox-6.1
echo "::endgroup::"
echo "::group::Virtual Box Test"
vboxmanage --version
sudo /sbin/vboxconfig
sudo modprobe vboxdrv
vboxmanage --version
echo "::endgroup::"
- name: Install Vagrant
run: |
echo "::group::Install Vagrant"
wget -O- https://apt.releases.hashicorp.com/gpg | gpg --dearmor | sudo tee /usr/share/keyrings/hashicorp-archive-keyring.gpg
echo "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/hashicorp.list
sudo apt update && sudo apt install -y vagrant
vagrant version
vagrant plugin list
vagrant plugin install vagrant-vbguest
vagrant plugin list
echo "::endgroup::"
- name: Configure VirtualBox - name: Configure VirtualBox
run: |- run: |-
sudo mkdir -p /etc/vbox sudo mkdir -p /etc/vbox
@@ -54,7 +87,7 @@ jobs:
run: ./.github/download-boxes.sh run: ./.github/download-boxes.sh
- name: Set up Python ${{ env.PYTHON_VERSION }} - name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@75f3110429a8c05be0e1bf360334e4cced2b63fa # 2.3.3 uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984 # 4.3.0
with: with:
python-version: ${{ env.PYTHON_VERSION }} python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies cache: 'pip' # caching pip dependencies

1
.gitignore vendored
View File

@@ -1,2 +1 @@
.env/ .env/
*.log

View File

@@ -1,35 +1,21 @@
--- ---
repos: repos:
- repo: https://github.com/pre-commit/pre-commit-hooks - repo: https://github.com/pre-commit/pre-commit-hooks
rev: 3298ddab3c13dd77d6ce1fc0baf97691430d84b0 # v4.3.0 rev: v4.3.0
hooks: hooks:
- id: requirements-txt-fixer - id: requirements-txt-fixer
- id: sort-simple-yaml - id: sort-simple-yaml
- id: detect-private-key - id: detect-private-key
- id: check-merge-conflict
- id: end-of-file-fixer
- id: mixed-line-ending
- id: trailing-whitespace
args: [--markdown-linebreak-ext=md]
- repo: https://github.com/adrienverge/yamllint.git - repo: https://github.com/adrienverge/yamllint.git
rev: 9cce2940414e9560ae4c8518ddaee2ac1863a4d2 # v1.28.0 rev: v1.28.0
hooks: hooks:
- id: yamllint - id: yamllint
args: [-c=.yamllint] args: [-c=.yamllint]
- repo: https://github.com/ansible-community/ansible-lint.git - repo: https://github.com/ansible-community/ansible-lint.git
rev: a058554b9bcf88f12ad09ab9fb93b267a214368f # v6.8.6 rev: v6.8.2
hooks: hooks:
- id: ansible-lint - id: ansible-lint
- repo: https://github.com/shellcheck-py/shellcheck-py - repo: https://github.com/shellcheck-py/shellcheck-py
rev: 4c7c3dd7161ef39e984cb295e93a968236dc8e8a # v0.8.0.4 rev: v0.8.0.4
hooks: hooks:
- id: shellcheck - id: shellcheck
- repo: https://github.com/Lucas-C/pre-commit-hooks
rev: 04618e68aa2380828a36a23ff5f65a06ae8f59b9 # v1.3.1
hooks:
- id: remove-crlf
- id: remove-tabs
- repo: https://github.com/sirosen/texthooks
rev: 30d9af95631de0d7cff4e282bde9160d38bb0359 # 0.4.0
hooks:
- id: fix-smartquotes

View File

@@ -174,4 +174,4 @@
incurred by, or claims asserted against, such Contributor by reason incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability. of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS END OF TERMS AND CONDITIONS

View File

@@ -10,7 +10,7 @@ If you want more context on how this works, see:
📄 [Documentation](https://docs.technotim.live/posts/k3s-etcd-ansible/) (including example commands) 📄 [Documentation](https://docs.technotim.live/posts/k3s-etcd-ansible/) (including example commands)
📺 [Watch the Video](https://www.youtube.com/watch?v=CbkEWcUZ7zM) 📺 [Video](https://www.youtube.com/watch?v=CbkEWcUZ7zM)
## 📖 k3s Ansible Playbook ## 📖 k3s Ansible Playbook
@@ -30,12 +30,12 @@ on processor architecture:
- Deployment environment must have Ansible 2.4.0+. If you need a quick primer on Ansible [you can check out my docs and setting up Ansible](https://docs.technotim.live/posts/ansible-automation/). - Deployment environment must have Ansible 2.4.0+. If you need a quick primer on Ansible [you can check out my docs and setting up Ansible](https://docs.technotim.live/posts/ansible-automation/).
- You will also need to install collections that this playbook uses by running `ansible-galaxy collection install -r ./collections/requirements.yml` (important❗)
- [`netaddr` package](https://pypi.org/project/netaddr/) must be available to Ansible. If you have installed Ansible via apt, this is already taken care of. If you have installed Ansible via `pip`, make sure to install `netaddr` into the respective virtual environment. - [`netaddr` package](https://pypi.org/project/netaddr/) must be available to Ansible. If you have installed Ansible via apt, this is already taken care of. If you have installed Ansible via `pip`, make sure to install `netaddr` into the respective virtual environment.
- `server` and `agent` nodes should have passwordless SSH access, if not you can supply arguments to provide credentials `--ask-pass --ask-become-pass` to each command. - `server` and `agent` nodes should have passwordless SSH access, if not you can supply arguments to provide credentials `--ask-pass --ask-become-pass` to each command.
- You will also need to install collections that this playbook uses by running `ansible-galaxy collection install -r ./collections/requirements.yml`
## 🚀 Getting Started ## 🚀 Getting Started
### 🍴 Preparation ### 🍴 Preparation
@@ -112,13 +112,9 @@ It is run automatically in CI, but you can also run the tests locally.
This might be helpful for quick feedback in a few cases. This might be helpful for quick feedback in a few cases.
You can find more information about it [here](molecule/README.md). You can find more information about it [here](molecule/README.md).
### Pre-commit Hooks
This repo uses `pre-commit` and `pre-commit-hooks` to lint and fix common style and syntax errors. Be sure to install python packages and then run `pre-commit install`. For more information, see [pre-commit](https://pre-commit.com/)
## Thanks 🤝 ## Thanks 🤝
This repo is really standing on the shoulders of giants. Thank you to all those who have contributed and thanks to these repos for code and ideas: This repo is really standing on the shoulders of giants. Thank you to all those who have contributed and tanks to these repos for code and ideas:
- [k3s-io/k3s-ansible](https://github.com/k3s-io/k3s-ansible) - [k3s-io/k3s-ansible](https://github.com/k3s-io/k3s-ansible)
- [geerlingguy/turing-pi-cluster](https://github.com/geerlingguy/turing-pi-cluster) - [geerlingguy/turing-pi-cluster](https://github.com/geerlingguy/turing-pi-cluster)

View File

@@ -1,23 +1,12 @@
[defaults] [defaults]
nocows = True nocows = True
roles_path = ./roles roles_path = ./roles
inventory = ./hosts.ini inventory = ./hosts.ini
stdout_callback = yaml
remote_tmp = $HOME/.ansible/tmp remote_tmp = $HOME/.ansible/tmp
local_tmp = $HOME/.ansible/tmp local_tmp = $HOME/.ansible/tmp
timeout = 60 pipelining = True
host_key_checking = False become = True
deprecation_warnings = False host_key_checking = False
callbacks_enabled = profile_tasks deprecation_warnings = False
log_path = ./ansible.log callback_whitelist = profile_tasks
[privilege_escalation]
become = True
[ssh_connection]
scp_if_ssh = smart
retries = 3
ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o Compression=yes -o ServerAliveInterval=15s
pipelining = True
control_path = %(directory)s/%%h-%%r

View File

@@ -1,3 +1,3 @@
#!/bin/bash #!/bin/bash
ansible-playbook site.yml -i inventory/my-cluster/hosts.ini ansible-playbook site.yml -i inventory/my-cluster/hosts.ini

View File

@@ -1,5 +1,5 @@
--- ---
k3s_version: v1.24.9+k3s1 k3s_version: v1.24.6+k3s1
# this is the user that has ssh access to these machines # this is the user that has ssh access to these machines
ansible_user: ansibleuser ansible_user: ansibleuser
systemd_dir: /etc/systemd/system systemd_dir: /etc/systemd/system
@@ -41,11 +41,11 @@ extra_agent_args: >-
{{ extra_args }} {{ extra_args }}
# image tag for kube-vip # image tag for kube-vip
kube_vip_tag_version: "v0.5.7" kube_vip_tag_version: "v0.5.5"
# image tag for metal lb # image tag for metal lb
metal_lb_speaker_tag_version: "v0.13.7" metal_lb_speaker_tag_version: "v0.13.6"
metal_lb_controller_tag_version: "v0.13.7" metal_lb_controller_tag_version: "v0.13.6"
# metallb ip range for load balancer # metallb ip range for load balancer
metal_lb_ip_range: "192.168.30.80-192.168.30.90" metal_lb_ip_range: "192.168.30.80-192.168.30.90"

View File

@@ -3,73 +3,56 @@ dependency:
name: galaxy name: galaxy
driver: driver:
name: vagrant name: vagrant
platforms: .platform_presets:
- &control
- name: control1
box: generic/ubuntu2204
memory: 2048 memory: 2048
cpus: 2 cpus: 2
groups: groups:
- k3s_cluster - k3s_cluster
- master - master
- &node
memory: 2048
cpus: 2
groups:
- k3s_cluster
- node
- &debian
box: generic/debian11
- &rocky
box: generic/rocky9
- &ubuntu
box: generic/ubuntu2204
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
platforms:
- <<: [*control, *ubuntu]
name: control1
interfaces: interfaces:
- network_name: private_network - network_name: private_network
ip: 192.168.30.38 ip: 192.168.30.38
config_options: - <<: [*control, *debian]
# We currently can not use public-key based authentication on Ubuntu 22.04, name: control2
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
- name: control2
box: generic/debian11
memory: 2048
cpus: 2
groups:
- k3s_cluster
- master
interfaces: interfaces:
- network_name: private_network - network_name: private_network
ip: 192.168.30.39 ip: 192.168.30.39
- <<: [*control, *rocky]
- name: control3 name: control3
box: generic/rocky9
memory: 2048
cpus: 2
groups:
- k3s_cluster
- master
interfaces: interfaces:
- network_name: private_network - network_name: private_network
ip: 192.168.30.40 ip: 192.168.30.40
- <<: [*node, *ubuntu]
- name: node1 name: node1
box: generic/ubuntu2204
memory: 2048
cpus: 2
groups:
- k3s_cluster
- node
interfaces: interfaces:
- network_name: private_network - network_name: private_network
ip: 192.168.30.41 ip: 192.168.30.41
config_options: - <<: [*node, *rocky]
# We currently can not use public-key based authentication on Ubuntu 22.04, name: node2
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
- name: node2
box: generic/rocky9
memory: 2048
cpus: 2
groups:
- k3s_cluster
- node
interfaces: interfaces:
- network_name: private_network - network_name: private_network
ip: 192.168.30.42 ip: 192.168.30.42
provisioner: provisioner:
name: ansible name: ansible
playbooks: playbooks:

View File

@@ -3,39 +3,37 @@ dependency:
name: galaxy name: galaxy
driver: driver:
name: vagrant name: vagrant
platforms: .platform_presets:
- &control
- name: control1
box: generic/ubuntu2204
memory: 2048 memory: 2048
cpus: 2 cpus: 2
groups: groups:
- k3s_cluster - k3s_cluster
- master - master
interfaces: - &node
- network_name: private_network
ip: fdad:bad:ba55::de:11
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
- name: node1
box: generic/ubuntu2204
memory: 2048 memory: 2048
cpus: 2 cpus: 2
groups: groups:
- k3s_cluster - k3s_cluster
- node - node
interfaces: - &ubuntu
- network_name: private_network box: generic/ubuntu2204
ip: fdad:bad:ba55::de:21
config_options: config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04, # We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405 # see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant" ssh.username: "vagrant"
ssh.password: "vagrant" ssh.password: "vagrant"
platforms:
- <<: [*control, *ubuntu]
name: control1
interfaces:
- network_name: private_network
ip: fdad:bad:ba55::de:11
- <<: [*node, *ubuntu]
name: node1
interfaces:
- network_name: private_network
ip: fdad:bad:ba55::de:21
provisioner: provisioner:
name: ansible name: ansible
playbooks: playbooks:

View File

@@ -1,3 +0,0 @@
#!/bin/bash
ansible-playbook reboot.yml -i inventory/my-cluster/hosts.ini

View File

@@ -1,9 +0,0 @@
---
- name: Reboot k3s_cluster
hosts: k3s_cluster
gather_facts: yes
become: yes
tasks:
- name: Reboot the nodes (and Wait upto 5 mins max)
reboot:
reboot_timeout: 300

View File

@@ -1,12 +0,0 @@
ansible-core>=2.13.5
ansible-lint>=6.8.6
jmespath>=1.0.1
jsonpatch>=1.32
kubernetes>=25.3.0
molecule-vagrant>=1.0.0
molecule>=4.0.3
netaddr>=0.8.0
pre-commit>=2.20.0
pre-commit-hooks>=1.3.1
pyyaml>=6.0
yamllint>=1.28.0

View File

@@ -1,210 +1,72 @@
# ansible-compat==2.2.1
# This file is autogenerated by pip-compile with python 3.8 ansible-core==2.13.5
# To update, run: ansible-lint==6.8.4
#
# pip-compile requirements.in
#
ansible-compat==2.2.4
# via
# ansible-lint
# molecule
ansible-core==2.14.1
# via
# -r requirements.in
# ansible-lint
ansible-lint==6.8.6
# via -r requirements.in
arrow==1.2.3 arrow==1.2.3
# via jinja2-time
attrs==22.1.0 attrs==22.1.0
# via jsonschema
binaryornot==0.4.4 binaryornot==0.4.4
# via cookiecutter
black==22.10.0 black==22.10.0
# via ansible-lint
bracex==2.3.post1 bracex==2.3.post1
# via wcmatch
cachetools==5.2.0 cachetools==5.2.0
# via google-auth Cerberus==1.3.2
certifi==2022.9.24 certifi==2022.9.24
# via
# kubernetes
# requests
cffi==1.15.1 cffi==1.15.1
# via cryptography
cfgv==3.3.1
# via pre-commit
chardet==5.0.0 chardet==5.0.0
# via binaryornot
charset-normalizer==2.1.1 charset-normalizer==2.1.1
# via requests
click==8.1.3 click==8.1.3
# via
# black
# click-help-colors
# cookiecutter
# molecule
click-help-colors==0.9.1 click-help-colors==0.9.1
# via molecule
commonmark==0.9.1 commonmark==0.9.1
# via rich
cookiecutter==2.1.1 cookiecutter==2.1.1
# via molecule cryptography==38.0.1
cryptography==38.0.3
# via ansible-core
distlib==0.3.6
# via virtualenv
distro==1.8.0 distro==1.8.0
# via selinux
enrich==1.2.7 enrich==1.2.7
# via molecule
filelock==3.8.0 filelock==3.8.0
# via google-auth==2.13.0
# ansible-lint
# virtualenv
google-auth==2.14.0
# via kubernetes
identify==2.5.8
# via pre-commit
idna==3.4 idna==3.4
# via requests importlib-resources==5.10.0
jinja2==3.1.2 Jinja2==3.1.2
# via
# ansible-core
# cookiecutter
# jinja2-time
# molecule
# molecule-vagrant
jinja2-time==0.2.0 jinja2-time==0.2.0
# via cookiecutter
jmespath==1.0.1 jmespath==1.0.1
# via -r requirements.in
jsonpatch==1.32 jsonpatch==1.32
# via -r requirements.in
jsonpointer==2.3 jsonpointer==2.3
# via jsonpatch jsonschema==4.16.0
jsonschema==4.17.0 kubernetes==24.2.0
# via MarkupSafe==2.1.1
# ansible-compat molecule==4.0.1
# ansible-lint
# molecule
kubernetes==25.3.0
# via -r requirements.in
markupsafe==2.1.1
# via jinja2
molecule==4.0.4
# via
# -r requirements.in
# molecule-vagrant
molecule-vagrant==1.0.0 molecule-vagrant==1.0.0
# via -r requirements.in
mypy-extensions==0.4.3 mypy-extensions==0.4.3
# via black
netaddr==0.8.0 netaddr==0.8.0
# via -r requirements.in
nodeenv==1.7.0
# via pre-commit
oauthlib==3.2.2 oauthlib==3.2.2
# via requests-oauthlib
packaging==21.3 packaging==21.3
# via
# ansible-compat
# ansible-core
# ansible-lint
# molecule
pathspec==0.10.1 pathspec==0.10.1
# via pkgutil-resolve-name==1.3.10
# black
# yamllint
platformdirs==2.5.2 platformdirs==2.5.2
# via
# black
# virtualenv
pluggy==1.0.0 pluggy==1.0.0
# via molecule pre-commit==2.20.0
pre-commit==2.21.0
# via -r requirements.in
pre-commit-hooks==4.4.0
# via -r requirements.in
pyasn1==0.4.8 pyasn1==0.4.8
# via
# pyasn1-modules
# rsa
pyasn1-modules==0.2.8 pyasn1-modules==0.2.8
# via google-auth
pycparser==2.21 pycparser==2.21
# via cffi Pygments==2.13.0
pygments==2.13.0
# via rich
pyparsing==3.0.9 pyparsing==3.0.9
# via packaging pyrsistent==0.18.1
pyrsistent==0.19.2
# via jsonschema
python-dateutil==2.8.2 python-dateutil==2.8.2
# via
# arrow
# kubernetes
python-slugify==6.1.2 python-slugify==6.1.2
# via cookiecutter
python-vagrant==1.0.0 python-vagrant==1.0.0
# via molecule-vagrant PyYAML==6.0
pyyaml==6.0
# via
# -r requirements.in
# ansible-compat
# ansible-core
# ansible-lint
# cookiecutter
# kubernetes
# molecule
# molecule-vagrant
# pre-commit
# yamllint
requests==2.28.1 requests==2.28.1
# via
# cookiecutter
# kubernetes
# requests-oauthlib
requests-oauthlib==1.3.1 requests-oauthlib==1.3.1
# via kubernetes
resolvelib==0.8.1 resolvelib==0.8.1
# via ansible-core
rich==12.6.0 rich==12.6.0
# via
# ansible-lint
# enrich
# molecule
rsa==4.9 rsa==4.9
# via google-auth ruamel.yaml==0.17.21
ruamel-yaml==0.17.21 ruamel.yaml.clib==0.2.7
# via
# ansible-lint
# pre-commit-hooks
selinux==0.2.1 selinux==0.2.1
# via molecule-vagrant
six==1.16.0 six==1.16.0
# via
# google-auth
# kubernetes
# python-dateutil
subprocess-tee==0.3.5 subprocess-tee==0.3.5
# via ansible-compat
text-unidecode==1.3 text-unidecode==1.3
# via python-slugify tomli==2.0.1
typing-extensions==4.4.0
urllib3==1.26.12 urllib3==1.26.12
# via
# kubernetes
# requests
virtualenv==20.16.6
# via pre-commit
wcmatch==8.4.1 wcmatch==8.4.1
# via ansible-lint websocket-client==1.4.1
websocket-client==1.4.2 yamllint==1.28.0
# via kubernetes zipp==3.10.0
yamllint==1.29.0
# via
# -r requirements.in
# ansible-lint
# The following packages are considered to be unsafe in a requirements file:
# setuptools

View File

@@ -13,11 +13,51 @@
args: args:
warn: false # The ansible systemd module does not support reset-failed warn: false # The ansible systemd module does not support reset-failed
- name: Deploy vip manifest - name: Create manifests directory on first master
include_tasks: vip.yml file:
path: /var/lib/rancher/k3s/server/manifests
state: directory
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
- name: Deploy metallb manifest - name: Copy vip rbac manifest to first master
include_tasks: metallb.yml template:
src: "vip.rbac.yaml.j2"
dest: "/var/lib/rancher/k3s/server/manifests/vip-rbac.yaml"
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
- name: Copy vip manifest to first master
template:
src: "vip.yaml.j2"
dest: "/var/lib/rancher/k3s/server/manifests/vip.yaml"
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
# these will be copied and installed now, then tested later and apply config
- name: Copy metallb namespace to first master
template:
src: "metallb.namespace.j2"
dest: "/var/lib/rancher/k3s/server/manifests/metallb-namespace.yaml"
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
- name: Copy metallb namespace to first master
template:
src: "metallb.crds.j2"
dest: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml"
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
- name: Init cluster inside the transient k3s-init service - name: Init cluster inside the transient k3s-init service
command: command:
@@ -26,6 +66,8 @@
--unit=k3s-init \ --unit=k3s-init \
k3s server {{ server_init_args }}" k3s server {{ server_init_args }}"
creates: "{{ systemd_dir }}/k3s.service" creates: "{{ systemd_dir }}/k3s.service"
args:
warn: false # The ansible systemd module does not support transient units
- name: Verification - name: Verification
block: block:

View File

@@ -1,27 +0,0 @@
---
- name: Create manifests directory on first master
file:
path: /var/lib/rancher/k3s/server/manifests
state: directory
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
- name: Copy metallb namespace to first master
template:
src: "metallb.namespace.j2"
dest: "/var/lib/rancher/k3s/server/manifests/metallb-namespace.yaml"
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
- name: Copy metallb manifest to first master
template:
src: "metallb.crds.j2"
dest: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml"
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']

View File

@@ -1,27 +0,0 @@
---
- name: Create manifests directory on first master
file:
path: /var/lib/rancher/k3s/server/manifests
state: directory
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
- name: Copy vip rbac manifest to first master
template:
src: "vip.rbac.yaml.j2"
dest: "/var/lib/rancher/k3s/server/manifests/vip-rbac.yaml"
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
- name: Copy vip manifest to first master
template:
src: "vip.yaml.j2"
dest: "/var/lib/rancher/k3s/server/manifests/vip.yaml"
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']

View File

@@ -77,7 +77,7 @@ spec:
aggregationLength: aggregationLength:
default: 32 default: 32
description: The aggregation-length advertisement option lets description: The aggregation-length advertisement option lets
you "roll up" the /32s into a larger prefix. you roll up the /32s into a larger prefix.
format: int32 format: int32
minimum: 1 minimum: 1
type: integer type: integer
@@ -167,7 +167,7 @@ spec:
aggregationLength: aggregationLength:
default: 32 default: 32
description: The aggregation-length advertisement option lets description: The aggregation-length advertisement option lets
you "roll up" the /32s into a larger prefix. you roll up the /32s into a larger prefix.
format: int32 format: int32
minimum: 1 minimum: 1
type: integer type: integer
@@ -359,7 +359,7 @@ spec:
aggregationLength: aggregationLength:
default: 32 default: 32
description: The aggregation-length advertisement option lets you description: The aggregation-length advertisement option lets you
"roll up" the /32s into a larger prefix. Defaults to 32. Works for roll up the /32s into a larger prefix. Defaults to 32. Works for
IPv4 addresses. IPv4 addresses.
format: int32 format: int32
minimum: 1 minimum: 1
@@ -367,7 +367,7 @@ spec:
aggregationLengthV6: aggregationLengthV6:
default: 128 default: 128
description: The aggregation-length advertisement option lets you description: The aggregation-length advertisement option lets you
"roll up" the /128s into a larger prefix. Defaults to 128. Works roll up the /128s into a larger prefix. Defaults to 128. Works
for IPv6 addresses. for IPv6 addresses.
format: int32 format: int32
type: integer type: integer

View File

@@ -1,6 +1,92 @@
--- ---
- name: Deploy metallb pool - name: Create manifests directory for temp configuration
include_tasks: metallb.yml file:
path: /tmp/k3s
state: directory
owner: "{{ ansible_user }}"
mode: 0755
with_items: "{{ groups['master'] }}"
run_once: true
- name: Copy metallb CRs manifest to first master
template:
src: "metallb.crs.j2"
dest: "/tmp/k3s/metallb-crs.yaml"
owner: "{{ ansible_user }}"
mode: 0755
with_items: "{{ groups['master'] }}"
run_once: true
- name: Test metallb-system namespace
command: >-
k3s kubectl -n metallb-system
changed_when: false
with_items: "{{ groups['master'] }}"
run_once: true
- name: Wait for MetalLB resources
command: >-
k3s kubectl wait {{ item.resource }}
--namespace='metallb-system'
{% if item.name | default(False) -%}{{ item.name }}{%- endif %}
{% if item.selector | default(False) -%}--selector='{{ item.selector }}'{%- endif %}
{% if item.condition | default(False) -%}{{ item.condition }}{%- endif %}
--timeout='{{ metal_lb_available_timeout }}'
changed_when: false
run_once: true
with_items:
- description: controller
resource: deployment
name: controller
condition: --for condition=Available=True
- description: webhook service
resource: pod
selector: component=controller
condition: --for=jsonpath='{.status.phase}'=Running
- description: pods in replica sets
resource: pod
selector: component=controller,app=metallb
condition: --for condition=Ready
- description: ready replicas of controller
resource: replicaset
selector: component=controller,app=metallb
condition: --for=jsonpath='{.status.readyReplicas}'=1
- description: fully labeled replicas of controller
resource: replicaset
selector: component=controller,app=metallb
condition: --for=jsonpath='{.status.fullyLabeledReplicas}'=1
- description: available replicas of controller
resource: replicaset
selector: component=controller,app=metallb
condition: --for=jsonpath='{.status.availableReplicas}'=1
loop_control:
label: "{{ item.description }}"
- name: Test metallb-system webhook-service endpoint
command: >-
k3s kubectl -n metallb-system get endpoints webhook-service
changed_when: false
with_items: "{{ groups['master'] }}"
run_once: true
- name: Apply metallb CRs
command: >-
k3s kubectl apply -f /tmp/k3s/metallb-crs.yaml
--timeout='{{ metal_lb_available_timeout }}'
register: this
changed_when: false
run_once: true
until: this.rc == 0
retries: 5
- name: Test metallb-system resources
command: >-
k3s kubectl -n metallb-system get {{ item }}
changed_when: false
run_once: true
with_items:
- IPAddressPool
- L2Advertisement
- name: Remove tmp directory used for manifests - name: Remove tmp directory used for manifests
file: file:

View File

@@ -1,89 +0,0 @@
---
- name: Create manifests directory for temp configuration
file:
path: /tmp/k3s
state: directory
owner: "{{ ansible_user }}"
mode: 0755
with_items: "{{ groups['master'] }}"
run_once: true
- name: Copy metallb CRs manifest to first master
template:
src: "metallb.crs.j2"
dest: "/tmp/k3s/metallb-crs.yaml"
owner: "{{ ansible_user }}"
mode: 0755
with_items: "{{ groups['master'] }}"
run_once: true
- name: Test metallb-system namespace
command: >-
k3s kubectl -n metallb-system
changed_when: false
with_items: "{{ groups['master'] }}"
run_once: true
- name: Wait for MetalLB resources
command: >-
k3s kubectl wait {{ item.resource }}
--namespace='metallb-system'
{% if item.name | default(False) -%}{{ item.name }}{%- endif %}
{% if item.selector | default(False) -%}--selector='{{ item.selector }}'{%- endif %}
{% if item.condition | default(False) -%}{{ item.condition }}{%- endif %}
--timeout='{{ metal_lb_available_timeout }}'
changed_when: false
run_once: true
with_items:
- description: controller
resource: deployment
name: controller
condition: --for condition=Available=True
- description: webhook service
resource: pod
selector: component=controller
condition: --for=jsonpath='{.status.phase}'=Running
- description: pods in replica sets
resource: pod
selector: component=controller,app=metallb
condition: --for condition=Ready
- description: ready replicas of controller
resource: replicaset
selector: component=controller,app=metallb
condition: --for=jsonpath='{.status.readyReplicas}'=1
- description: fully labeled replicas of controller
resource: replicaset
selector: component=controller,app=metallb
condition: --for=jsonpath='{.status.fullyLabeledReplicas}'=1
- description: available replicas of controller
resource: replicaset
selector: component=controller,app=metallb
condition: --for=jsonpath='{.status.availableReplicas}'=1
loop_control:
label: "{{ item.description }}"
- name: Test metallb-system webhook-service endpoint
command: >-
k3s kubectl -n metallb-system get endpoints webhook-service
changed_when: false
with_items: "{{ groups['master'] }}"
run_once: true
- name: Apply metallb CRs
command: >-
k3s kubectl apply -f /tmp/k3s/metallb-crs.yaml
--timeout='{{ metal_lb_available_timeout }}'
register: this
changed_when: false
run_once: true
until: this.rc == 0
retries: 5
- name: Test metallb-system resources
command: >-
k3s kubectl -n metallb-system get {{ item }}
changed_when: false
run_once: true
with_items:
- IPAddressPool
- L2Advertisement

View File

@@ -1,3 +1,3 @@
--- ---
- name: reboot - name: Reboot
reboot: reboot: