Compare commits

..

57 Commits

Author SHA1 Message Date
Timothy Stewart
2ae9ee82f0 fix(ci): pin + cache 2022-11-02 21:27:35 -05:00
Timothy Stewart
5380f93b64 fix(ci): pin + cache 2022-11-02 21:20:33 -05:00
Timothy Stewart
a414453ad4 fix(ci): pin + cache 2022-11-02 21:11:31 -05:00
Timothy Stewart
23c5d9ec89 fix(ci): pin + cache 2022-11-02 21:00:37 -05:00
Timothy Stewart
6b339e1985 fix(ci): pin + cache 2022-11-02 20:55:40 -05:00
Timothy Stewart
a031807660 fix(ci): pin + cache 2022-11-02 20:15:03 -05:00
Timothy Stewart
7dd305aabc fix(ci): pin + cache 2022-11-02 20:00:27 -05:00
Timothy Stewart
500931e2fd fix(ci): pin + cache 2022-11-02 19:55:06 -05:00
Timothy Stewart
cf357cf164 fix(ci): pin + cache 2022-11-02 19:49:32 -05:00
Timothy Stewart
215e0d10ed fix(ci): pin + cache 2022-11-02 19:49:03 -05:00
Timothy Stewart
c6ed680dc1 fix(ci): pin + cache 2022-11-02 19:41:30 -05:00
Timothy Stewart
8343a6199e fix(ci): pin + cache 2022-11-02 19:40:11 -05:00
Timothy Stewart
b524f97552 fix(ci): pin + cache 2022-11-02 19:38:39 -05:00
Timothy Stewart
f741040e44 fix(ci): pin + cache 2022-11-02 19:32:26 -05:00
Timothy Stewart
09bc628ba6 fix(ci): pin + cache 2022-11-01 22:55:42 -05:00
Timothy Stewart
71ff6b86cd fix(ci): pin + cache 2022-11-01 22:43:56 -05:00
Timothy Stewart
23729ddbbe fix(ci): pin + cache 2022-11-01 22:35:39 -05:00
Timothy Stewart
e254c407f0 fix(ci): pin + cache 2022-11-01 22:18:39 -05:00
Timothy Stewart
713b4694e1 fix(ci): pin + cache 2022-11-01 22:05:37 -05:00
Timothy Stewart
952d513124 fix(ci): pin + cache 2022-11-01 21:45:47 -05:00
Timothy Stewart
dd1e596332 fix(ci): pin + cache 2022-11-01 21:43:00 -05:00
Timothy Stewart
6af47f96d0 fix(ci): pin + cache 2022-11-01 21:24:03 -05:00
Timothy Stewart
664deec6c3 fix(ci): pin + cache 2022-11-01 21:06:21 -05:00
Timothy Stewart
646459e7f5 fix(ci): pin + cache 2022-11-01 21:05:57 -05:00
Timothy Stewart
64242d9729 fix(ci): pin + cache 2022-11-01 21:05:11 -05:00
Timothy Stewart
f4864ddb64 fix(ci): pin + cache 2022-11-01 21:04:23 -05:00
Timothy Stewart
6a83cde0c6 fix(ci): pin + cache 2022-11-01 21:03:27 -05:00
Timothy Stewart
77ac928c0d fix(ci): pin + cache 2022-11-01 21:01:47 -05:00
Timothy Stewart
8300a7aaac fix(ci): pin + cache 2022-11-01 21:01:14 -05:00
Timothy Stewart
bdc6af5f46 fix(ci): pin + cache 2022-11-01 20:47:50 -05:00
Timothy Stewart
dc8276157a fix(ci): pin + cache 2022-11-01 20:37:23 -05:00
Timothy Stewart
37f0cb11d2 fix(ci): pin + cache 2022-11-01 20:35:46 -05:00
Timothy Stewart
68e7c77b22 fix(ci): pin + cache 2022-11-01 20:26:13 -05:00
Timothy Stewart
d82c4feac8 feat(gh-actions-controller): added 2022-11-01 20:22:07 -05:00
Timothy Stewart
9217d8607b feat(gh-actions-controller): added 2022-11-01 20:19:00 -05:00
Timothy Stewart
fbc15aa1a1 fix(ci): pin + cache 2022-11-01 20:15:03 -05:00
Timothy Stewart
b55ec046ad fix(ci): pin + cache 2022-11-01 20:07:15 -05:00
Timothy Stewart
b3cc178045 fix(ci): pin + cache 2022-11-01 19:59:22 -05:00
Timothy Stewart
13be424187 fix(ci): pin + cache 2022-11-01 19:55:33 -05:00
Timothy Stewart
d9cecd5364 fix(ci): pin + cache 2022-11-01 19:51:32 -05:00
Timothy Stewart
afb96dbee2 fix(ci): pin + cache 2022-11-01 19:48:31 -05:00
Timothy Stewart
30ffc69192 fix(ci): pin + cache 2022-11-01 19:41:44 -05:00
Timothy Stewart
94e385c28e fix(ci): pin + cache 2022-11-01 19:40:28 -05:00
Timothy Stewart
dbb2cda17a fix(ci): pin + cache 2022-10-31 22:10:31 -05:00
Timothy Stewart
d24cdb97db feat(gh-actions-controller): added 2022-10-31 22:09:33 -05:00
Timothy Stewart
5bebec930b feat(gh-actions-controller): added 2022-10-31 22:02:16 -05:00
Timothy Stewart
ac52acdec1 feat(gh-actions-controller): added 2022-10-31 22:01:39 -05:00
Timothy Stewart
105b2c2f1e fix(ci): pin + cache 2022-10-31 21:55:51 -05:00
Timothy Stewart
d20f485fca fix(ci): pin + cache 2022-10-31 21:47:33 -05:00
Timothy Stewart
f9bb9dabae fix(ci): pin + cache 2022-10-31 21:45:11 -05:00
Timothy Stewart
6f15ef260e fix(ci): pin + cache 2022-10-31 21:40:25 -05:00
Timothy Stewart
de1966fe02 fix(ci): pin + cache 2022-10-31 21:33:47 -05:00
Timothy Stewart
fc823122d8 fix(script): convert to linux 2022-10-31 21:29:24 -05:00
Techno Tim
2f8d94bb5e Merge branch 'master' into self-hosted-runners 2022-10-31 18:52:22 -05:00
Timothy Stewart
9c3814ce72 feat(gh-actions-controller): added 2022-10-30 22:45:59 -05:00
Timothy Stewart
0e60f4643b feat(gh-actions-controller): added 2022-10-30 22:44:13 -05:00
Timothy Stewart
bb20514a6a feat(ci): switching to self-hosted runners 2022-10-30 20:46:14 -05:00
45 changed files with 2138 additions and 786 deletions

View File

@@ -11,5 +11,5 @@
- [ ] Ran `site.yml` playbook
- [ ] Ran `reset.yml` playbook
- [ ] Did not add any unnecessary changes
- [ ] Ran pre-commit install at least once before committing
- [ ] 🚀
- [ ] Ran pre-commit install at least once before committing

View File

@@ -5,10 +5,15 @@
# already present on the system.
set -euo pipefail
YQ_VERSION=v4.29.2
YQ_BINARY=yq_linux_amd64
GIT_ROOT=$(git rev-parse --show-toplevel)
PROVIDER=virtualbox
# get yq used for filtering
sudo wget https://github.com/mikefarah/yq/releases/download/${YQ_VERSION}/${YQ_BINARY} -O /usr/bin/yq &&\
sudo chmod +x /usr/bin/yq
# Read all boxes for all platforms from the "molecule.yml" files
all_boxes=$(cat "${GIT_ROOT}"/molecule/*/molecule.yml |
yq -r '.platforms[].box' | # Read the "box" property of each node under "platforms"

View File

@@ -1,15 +0,0 @@
---
name: "CI"
on:
pull_request:
push:
branches:
- master
paths-ignore:
- '**/README.md'
jobs:
lint:
uses: ./.github/workflows/lint.yml
test:
uses: ./.github/workflows/test.yml
needs: [lint]

View File

@@ -1,22 +1,27 @@
---
name: Linting
on:
workflow_call:
pull_request:
push:
branches:
- master
paths-ignore:
- '**/README.md'
jobs:
pre-commit-ci:
name: Pre-Commit
runs-on: ubuntu-latest
runs-on: self-hosted
env:
PYTHON_VERSION: "3.10"
steps:
- name: Check out the codebase
uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v3 2.5.0
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # 3.0.2
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@75f3110429a8c05be0e1bf360334e4cced2b63fa # 2.3.3
uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984 # 4.3.0
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies
@@ -56,12 +61,12 @@ jobs:
ensure-pinned-actions:
name: Ensure SHA Pinned Actions
runs-on: ubuntu-latest
runs-on: self-hosted
steps:
- name: Checkout code
uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v3 2.5.0
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # 3.0.2
- name: Ensure SHA pinned actions
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@af2eb3226618e2494e3d9084f515ad6dcf16e229 # 2.0.1
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@6ca5574367befbc9efdb2fa25978084159c5902d # 1.3.0
with:
allowlist: |
aws-actions/

View File

@@ -1,11 +1,17 @@
---
name: Test
on:
workflow_call:
pull_request:
push:
branches:
- master
paths-ignore:
- '**/README.md'
jobs:
molecule:
name: Molecule
runs-on: macos-12
runs-on: self-hosted
strategy:
matrix:
scenario:
@@ -15,13 +21,40 @@ jobs:
fail-fast: false
env:
PYTHON_VERSION: "3.10"
VAGRANT_DEFAULT_PROVIDER: virtualbox
steps:
- name: Check out the codebase
uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v3 2.5.0
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # 3.0.2
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Install Virtual Box from Oracle
run: |
echo "::group::Virtual Box"
wget -O- https://www.virtualbox.org/download/oracle_vbox_2016.asc | sudo gpg --dearmor --yes --output /usr/share/keyrings/oracle-virtualbox-2016.gpg
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/oracle-virtualbox-2016.gpg] https://download.virtualbox.org/virtualbox/debian $(lsb_release -cs) contrib" | sudo tee -a /etc/apt/sources.list.d/virtualbox.list
sudo apt update && sudo apt install -y linux-headers-generic linux-headers-5.15.0-52-generic build-essential dkms virtualbox-dkms virtualbox-6.1
echo "::endgroup::"
echo "::group::Virtual Box Test"
vboxmanage --version
sudo /sbin/vboxconfig
sudo modprobe vboxdrv
vboxmanage --version
echo "::endgroup::"
- name: Install Vagrant
run: |
echo "::group::Install Vagrant"
wget -O- https://apt.releases.hashicorp.com/gpg | gpg --dearmor | sudo tee /usr/share/keyrings/hashicorp-archive-keyring.gpg
echo "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/hashicorp.list
sudo apt update && sudo apt install -y vagrant
vagrant version
vagrant plugin list
vagrant plugin install vagrant-vbguest
vagrant plugin list
echo "::endgroup::"
- name: Configure VirtualBox
run: |-
sudo mkdir -p /etc/vbox
@@ -54,7 +87,7 @@ jobs:
run: ./.github/download-boxes.sh
- name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@75f3110429a8c05be0e1bf360334e4cced2b63fa # 2.3.3
uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984 # 4.3.0
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies
@@ -71,7 +104,6 @@ jobs:
- name: Test with molecule
run: molecule test --scenario-name ${{ matrix.scenario }}
timeout-minutes: 90
env:
ANSIBLE_K3S_LOG_DIR: ${{ runner.temp }}/logs/k3s-ansible/${{ matrix.scenario }}
ANSIBLE_SSH_RETRIES: 4

2
.gitignore vendored
View File

@@ -1,3 +1 @@
.env/
*.log
ansible.cfg

View File

@@ -1,35 +1,21 @@
---
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: 3298ddab3c13dd77d6ce1fc0baf97691430d84b0 # v4.3.0
rev: v4.3.0
hooks:
- id: requirements-txt-fixer
- id: sort-simple-yaml
- id: detect-private-key
- id: check-merge-conflict
- id: end-of-file-fixer
- id: mixed-line-ending
- id: trailing-whitespace
args: [--markdown-linebreak-ext=md]
- repo: https://github.com/adrienverge/yamllint.git
rev: 9cce2940414e9560ae4c8518ddaee2ac1863a4d2 # v1.28.0
rev: v1.28.0
hooks:
- id: yamllint
args: [-c=.yamllint]
- repo: https://github.com/ansible-community/ansible-lint.git
rev: a058554b9bcf88f12ad09ab9fb93b267a214368f # v6.8.6
rev: v6.8.2
hooks:
- id: ansible-lint
- repo: https://github.com/shellcheck-py/shellcheck-py
rev: 4c7c3dd7161ef39e984cb295e93a968236dc8e8a # v0.8.0.4
rev: v0.8.0.4
hooks:
- id: shellcheck
- repo: https://github.com/Lucas-C/pre-commit-hooks
rev: 04618e68aa2380828a36a23ff5f65a06ae8f59b9 # v1.3.1
hooks:
- id: remove-crlf
- id: remove-tabs
- repo: https://github.com/sirosen/texthooks
rev: 30d9af95631de0d7cff4e282bde9160d38bb0359 # 0.4.0
hooks:
- id: fix-smartquotes

View File

@@ -174,4 +174,4 @@
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
END OF TERMS AND CONDITIONS

View File

@@ -10,7 +10,7 @@ If you want more context on how this works, see:
📄 [Documentation](https://docs.technotim.live/posts/k3s-etcd-ansible/) (including example commands)
📺 [Watch the Video](https://www.youtube.com/watch?v=CbkEWcUZ7zM)
📺 [Video](https://www.youtube.com/watch?v=CbkEWcUZ7zM)
## 📖 k3s Ansible Playbook
@@ -28,14 +28,14 @@ on processor architecture:
## ✅ System requirements
- Control Node (the machine you are running `ansible` commands) must have Ansible 2.11+ If you need a quick primer on Ansible [you can check out my docs and setting up Ansible](https://docs.technotim.live/posts/ansible-automation/).
- You will also need to install collections that this playbook uses by running `ansible-galaxy collection install -r ./collections/requirements.yml` (important❗)
- Deployment environment must have Ansible 2.4.0+. If you need a quick primer on Ansible [you can check out my docs and setting up Ansible](https://docs.technotim.live/posts/ansible-automation/).
- [`netaddr` package](https://pypi.org/project/netaddr/) must be available to Ansible. If you have installed Ansible via apt, this is already taken care of. If you have installed Ansible via `pip`, make sure to install `netaddr` into the respective virtual environment.
- `server` and `agent` nodes should have passwordless SSH access, if not you can supply arguments to provide credentials `--ask-pass --ask-become-pass` to each command.
- You will also need to install collections that this playbook uses by running `ansible-galaxy collection install -r ./collections/requirements.yml`
## 🚀 Getting Started
### 🍴 Preparation
@@ -67,8 +67,6 @@ node
If multiple hosts are in the master group, the playbook will automatically set up k3s in [HA mode with etcd](https://rancher.com/docs/k3s/latest/en/installation/ha-embedded/).
Finally, copy `ansible.example.cfg` to `ansible.cfg` and adapt the inventory path to match the files that you just created.
This requires at least k3s version `1.19.1` however the version is configurable by using the `k3s_version` variable.
If needed, you can also edit `inventory/my-cluster/group_vars/all.yml` to match your environment.
@@ -114,13 +112,9 @@ It is run automatically in CI, but you can also run the tests locally.
This might be helpful for quick feedback in a few cases.
You can find more information about it [here](molecule/README.md).
### Pre-commit Hooks
This repo uses `pre-commit` and `pre-commit-hooks` to lint and fix common style and syntax errors. Be sure to install python packages and then run `pre-commit install`. For more information, see [pre-commit](https://pre-commit.com/)
## Thanks 🤝
This repo is really standing on the shoulders of giants. Thank you to all those who have contributed and thanks to these repos for code and ideas:
This repo is really standing on the shoulders of giants. Thank you to all those who have contributed and tanks to these repos for code and ideas:
- [k3s-io/k3s-ansible](https://github.com/k3s-io/k3s-ansible)
- [geerlingguy/turing-pi-cluster](https://github.com/geerlingguy/turing-pi-cluster)

12
ansible.cfg Normal file
View File

@@ -0,0 +1,12 @@
[defaults]
nocows = True
roles_path = ./roles
inventory = ./hosts.ini
remote_tmp = $HOME/.ansible/tmp
local_tmp = $HOME/.ansible/tmp
pipelining = True
become = True
host_key_checking = False
deprecation_warnings = False
callback_whitelist = profile_tasks

View File

@@ -1,2 +0,0 @@
[defaults]
inventory = inventory/my-cluster/hosts.ini ; Adapt this to the path to your inventory file

View File

@@ -1,3 +1,3 @@
#!/bin/bash
ansible-playbook site.yml
ansible-playbook site.yml -i inventory/my-cluster/hosts.ini

View File

@@ -1,5 +1,5 @@
---
k3s_version: v1.24.11+k3s1
k3s_version: v1.24.6+k3s1
# this is the user that has ssh access to these machines
ansible_user: ansibleuser
systemd_dir: /etc/systemd/system
@@ -41,44 +41,11 @@ extra_agent_args: >-
{{ extra_args }}
# image tag for kube-vip
kube_vip_tag_version: "v0.5.11"
# metallb type frr or native
metal_lb_type: "native"
# metallb mode layer2 or bgp
metal_lb_mode: "layer2"
# bgp options
# metal_lb_bgp_my_asn: "64513"
# metal_lb_bgp_peer_asn: "64512"
# metal_lb_bgp_peer_address: "192.168.30.1"
kube_vip_tag_version: "v0.5.5"
# image tag for metal lb
metal_lb_frr_tag_version: "v7.5.1"
metal_lb_speaker_tag_version: "v0.13.9"
metal_lb_controller_tag_version: "v0.13.9"
metal_lb_speaker_tag_version: "v0.13.6"
metal_lb_controller_tag_version: "v0.13.6"
# metallb ip range for load balancer
metal_lb_ip_range: "192.168.30.80-192.168.30.90"
# Only enable if your nodes are proxmox LXC nodes, make sure to configure your proxmox nodes
# in your hosts.ini file.
# Please read https://gist.github.com/triangletodd/02f595cd4c0dc9aac5f7763ca2264185 before using this.
# Most notably, your containers must be privileged, and must not have nesting set to true.
# Please note this script disables most of the security of lxc containers, with the trade off being that lxc
# containers are significantly more resource efficent compared to full VMs.
# Mixing and matching VMs and lxc containers is not supported, ymmv if you want to do this.
# I would only really recommend using this if you have partiularly low powered proxmox nodes where the overhead of
# VMs would use a significant portion of your available resources.
proxmox_lxc_configure: false
# the user that you would use to ssh into the host, for example if you run ssh some-user@my-proxmox-host,
# set this value to some-user
proxmox_lxc_ssh_user: root
# the unique proxmox ids for all of the containers in the cluster, both worker and master nodes
proxmox_lxc_ct_ids:
- 200
- 201
- 202
- 203
- 204

View File

@@ -7,11 +7,6 @@
192.168.30.41
192.168.30.42
# only required if proxmox_lxc_configure: true
# must contain all proxmox instances that have a master or worker node
# [proxmox]
# 192.168.30.43
[k3s_cluster:children]
master
node

View File

@@ -3,73 +3,56 @@ dependency:
name: galaxy
driver:
name: vagrant
platforms:
- name: control1
box: generic/ubuntu2204
.platform_presets:
- &control
memory: 2048
cpus: 2
groups:
- k3s_cluster
- master
- &node
memory: 2048
cpus: 2
groups:
- k3s_cluster
- node
- &debian
box: generic/debian11
- &rocky
box: generic/rocky9
- &ubuntu
box: generic/ubuntu2204
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
platforms:
- <<: [*control, *ubuntu]
name: control1
interfaces:
- network_name: private_network
ip: 192.168.30.38
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
- name: control2
box: generic/debian11
memory: 2048
cpus: 2
groups:
- k3s_cluster
- master
- <<: [*control, *debian]
name: control2
interfaces:
- network_name: private_network
ip: 192.168.30.39
- name: control3
box: generic/rocky9
memory: 2048
cpus: 2
groups:
- k3s_cluster
- master
- <<: [*control, *rocky]
name: control3
interfaces:
- network_name: private_network
ip: 192.168.30.40
- name: node1
box: generic/ubuntu2204
memory: 2048
cpus: 2
groups:
- k3s_cluster
- node
- <<: [*node, *ubuntu]
name: node1
interfaces:
- network_name: private_network
ip: 192.168.30.41
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
- name: node2
box: generic/rocky9
memory: 2048
cpus: 2
groups:
- k3s_cluster
- node
- <<: [*node, *rocky]
name: node2
interfaces:
- network_name: private_network
ip: 192.168.30.42
provisioner:
name: ansible
playbooks:

View File

@@ -1,3 +0,0 @@
---
node_ipv4: 192.168.123.12
node_ipv6: fdad:bad:ba55::de:12

View File

@@ -3,54 +3,37 @@ dependency:
name: galaxy
driver:
name: vagrant
platforms:
- name: control1
box: generic/ubuntu2204
.platform_presets:
- &control
memory: 2048
cpus: 2
groups:
- k3s_cluster
- master
interfaces:
- network_name: private_network
ip: fdad:bad:ba55::de:11
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
- name: control2
box: generic/ubuntu2204
memory: 2048
cpus: 2
groups:
- k3s_cluster
- master
interfaces:
- network_name: private_network
ip: fdad:bad:ba55::de:12
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
- name: node1
box: generic/ubuntu2204
- &node
memory: 2048
cpus: 2
groups:
- k3s_cluster
- node
interfaces:
- network_name: private_network
ip: fdad:bad:ba55::de:21
- &ubuntu
box: generic/ubuntu2204
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
platforms:
- <<: [*control, *ubuntu]
name: control1
interfaces:
- network_name: private_network
ip: fdad:bad:ba55::de:11
- <<: [*node, *ubuntu]
name: node1
interfaces:
- network_name: private_network
ip: fdad:bad:ba55::de:21
provisioner:
name: ansible
playbooks:

View File

@@ -7,11 +7,6 @@
# See: https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant # noqa yaml[line-length]
flannel_iface: eth1
# In this scenario, we have multiple interfaces that the VIP could be
# broadcasted on. Since we have assigned a dedicated private network
# here, let's make sure that it is used.
kube_vip_iface: eth1
# The test VMs might be a bit slow, so we give them more time to join the cluster:
retry_count: 45

View File

@@ -1,3 +0,0 @@
#!/bin/bash
ansible-playbook reboot.yml

View File

@@ -1,9 +0,0 @@
---
- name: Reboot k3s_cluster
hosts: k3s_cluster
gather_facts: yes
tasks:
- name: Reboot the nodes (and Wait upto 5 mins max)
become: true
reboot:
reboot_timeout: 300

View File

@@ -1,12 +0,0 @@
ansible-core>=2.13.5
ansible-lint>=6.8.6
jmespath>=1.0.1
jsonpatch>=1.32
kubernetes>=25.3.0
molecule-vagrant>=1.0.0
molecule>=4.0.3
netaddr>=0.8.0
pre-commit>=2.20.0
pre-commit-hooks>=1.3.1
pyyaml>=6.0
yamllint>=1.28.0

View File

@@ -1,211 +1,72 @@
#
# This file is autogenerated by pip-compile with python 3.8
# To update, run:
#
# pip-compile requirements.in
#
ansible-compat==3.0.1
# via molecule
ansible-core==2.14.3
# via
# -r requirements.in
# ansible-compat
# ansible-lint
ansible-lint==6.14.2
# via -r requirements.in
ansible-compat==2.2.1
ansible-core==2.13.5
ansible-lint==6.8.4
arrow==1.2.3
# via jinja2-time
attrs==22.1.0
# via jsonschema
binaryornot==0.4.4
# via cookiecutter
black==22.10.0
# via ansible-lint
bracex==2.3.post1
# via wcmatch
cachetools==5.2.0
# via google-auth
Cerberus==1.3.2
certifi==2022.9.24
# via
# kubernetes
# requests
cffi==1.15.1
# via cryptography
cfgv==3.3.1
# via pre-commit
chardet==5.0.0
# via binaryornot
charset-normalizer==2.1.1
# via requests
click==8.1.3
# via
# black
# click-help-colors
# cookiecutter
# molecule
click-help-colors==0.9.1
# via molecule
commonmark==0.9.1
# via rich
cookiecutter==2.1.1
# via molecule
cryptography==38.0.3
# via ansible-core
distlib==0.3.6
# via virtualenv
cryptography==38.0.1
distro==1.8.0
# via selinux
enrich==1.2.7
# via molecule
filelock==3.8.0
# via
# ansible-lint
# virtualenv
google-auth==2.14.0
# via kubernetes
identify==2.5.8
# via pre-commit
google-auth==2.13.0
idna==3.4
# via requests
jinja2==3.1.2
# via
# ansible-core
# cookiecutter
# jinja2-time
# molecule
# molecule-vagrant
importlib-resources==5.10.0
Jinja2==3.1.2
jinja2-time==0.2.0
# via cookiecutter
jmespath==1.0.1
# via -r requirements.in
jsonpatch==1.32
# via -r requirements.in
jsonpointer==2.3
# via jsonpatch
jsonschema==4.17.0
# via
# ansible-compat
# ansible-lint
# molecule
kubernetes==25.3.0
# via -r requirements.in
markupsafe==2.1.1
# via jinja2
molecule==4.0.4
# via
# -r requirements.in
# molecule-vagrant
jsonschema==4.16.0
kubernetes==24.2.0
MarkupSafe==2.1.1
molecule==4.0.1
molecule-vagrant==1.0.0
# via -r requirements.in
mypy-extensions==0.4.3
# via black
netaddr==0.8.0
# via -r requirements.in
nodeenv==1.7.0
# via pre-commit
oauthlib==3.2.2
# via requests-oauthlib
packaging==21.3
# via
# ansible-compat
# ansible-core
# ansible-lint
# molecule
pathspec==0.10.1
# via
# black
# yamllint
pkgutil-resolve-name==1.3.10
platformdirs==2.5.2
# via
# black
# virtualenv
pluggy==1.0.0
# via molecule
pre-commit==2.21.0
# via -r requirements.in
pre-commit-hooks==4.4.0
# via -r requirements.in
pre-commit==2.20.0
pyasn1==0.4.8
# via
# pyasn1-modules
# rsa
pyasn1-modules==0.2.8
# via google-auth
pycparser==2.21
# via cffi
pygments==2.13.0
# via rich
Pygments==2.13.0
pyparsing==3.0.9
# via packaging
pyrsistent==0.19.2
# via jsonschema
pyrsistent==0.18.1
python-dateutil==2.8.2
# via
# arrow
# kubernetes
python-slugify==6.1.2
# via cookiecutter
python-vagrant==1.0.0
# via molecule-vagrant
pyyaml==6.0
# via
# -r requirements.in
# ansible-compat
# ansible-core
# ansible-lint
# cookiecutter
# kubernetes
# molecule
# molecule-vagrant
# pre-commit
# yamllint
PyYAML==6.0
requests==2.28.1
# via
# cookiecutter
# kubernetes
# requests-oauthlib
requests-oauthlib==1.3.1
# via kubernetes
resolvelib==0.8.1
# via ansible-core
rich==12.6.0
# via
# ansible-lint
# enrich
# molecule
rsa==4.9
# via google-auth
ruamel-yaml==0.17.21
# via
# ansible-lint
# pre-commit-hooks
ruamel.yaml==0.17.21
ruamel.yaml.clib==0.2.7
selinux==0.2.1
# via molecule-vagrant
six==1.16.0
# via
# google-auth
# kubernetes
# python-dateutil
subprocess-tee==0.4.1
# via
# ansible-compat
# ansible-lint
subprocess-tee==0.3.5
text-unidecode==1.3
# via python-slugify
tomli==2.0.1
typing-extensions==4.4.0
urllib3==1.26.12
# via
# kubernetes
# requests
virtualenv==20.16.6
# via pre-commit
wcmatch==8.4.1
# via ansible-lint
websocket-client==1.4.2
# via kubernetes
yamllint==1.29.0
# via
# -r requirements.in
# ansible-lint
# The following packages are considered to be unsafe in a requirements file:
# setuptools
websocket-client==1.4.1
yamllint==1.28.0
zipp==3.10.0

View File

@@ -1,3 +1,3 @@
#!/bin/bash
ansible-playbook reset.yml
ansible-playbook reset.yml -i inventory/my-cluster/hosts.ini

View File

@@ -2,22 +2,12 @@
- hosts: k3s_cluster
gather_facts: yes
become: yes
roles:
- role: reset
become: true
- role: raspberrypi
become: true
vars: {state: absent}
post_tasks:
- name: Reboot and wait for node to come back up
become: true
reboot:
reboot_timeout: 3600
- hosts: proxmox
gather_facts: true
become: yes
remote_user: "{{ proxmox_lxc_ssh_user }}"
roles:
- role: reset_proxmox_lxc
when: proxmox_lxc_configure

View File

@@ -1,15 +1,11 @@
---
# If you want to explicitly define an interface that ALL control nodes
# should use to propagate the VIP, define it here. Otherwise, kube-vip
# will determine the right interface automatically at runtime.
kube_vip_iface: null
ansible_user: root
server_init_args: >-
{% if groups['master'] | length > 1 %}
{% if ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] %}
--cluster-init
{% else %}
--server https://{{ hostvars[groups['master'][0]].k3s_node_ip | split(",") | first | ansible.utils.ipwrap }}:6443
--server https://{{ hostvars[groups['master'][0]].k3s_node_ip }}:6443
{% endif %}
--token {{ k3s_token }}
{% endif %}

View File

@@ -13,11 +13,51 @@
args:
warn: false # The ansible systemd module does not support reset-failed
- name: Deploy vip manifest
include_tasks: vip.yml
- name: Create manifests directory on first master
file:
path: /var/lib/rancher/k3s/server/manifests
state: directory
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
- name: Deploy metallb manifest
include_tasks: metallb.yml
- name: Copy vip rbac manifest to first master
template:
src: "vip.rbac.yaml.j2"
dest: "/var/lib/rancher/k3s/server/manifests/vip-rbac.yaml"
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
- name: Copy vip manifest to first master
template:
src: "vip.yaml.j2"
dest: "/var/lib/rancher/k3s/server/manifests/vip.yaml"
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
# these will be copied and installed now, then tested later and apply config
- name: Copy metallb namespace to first master
template:
src: "metallb.namespace.j2"
dest: "/var/lib/rancher/k3s/server/manifests/metallb-namespace.yaml"
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
- name: Copy metallb namespace to first master
template:
src: "metallb.crds.j2"
dest: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml"
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
- name: Init cluster inside the transient k3s-init service
command:
@@ -26,6 +66,8 @@
--unit=k3s-init \
k3s server {{ server_init_args }}"
creates: "{{ systemd_dir }}/k3s.service"
args:
warn: false # The ansible systemd module does not support transient units
- name: Verification
block:
@@ -97,24 +139,24 @@
- name: Create directory .kube
file:
path: "{{ ansible_user_dir }}/.kube"
path: ~{{ ansible_user }}/.kube
state: directory
owner: "{{ ansible_user_id }}"
owner: "{{ ansible_user }}"
mode: "u=rwx,g=rx,o="
- name: Copy config file to user home directory
copy:
src: /etc/rancher/k3s/k3s.yaml
dest: "{{ ansible_user_dir }}/.kube/config"
dest: ~{{ ansible_user }}/.kube/config
remote_src: yes
owner: "{{ ansible_user_id }}"
owner: "{{ ansible_user }}"
mode: "u=rw,g=,o="
- name: Configure kubectl cluster to {{ endpoint_url }}
command: >-
k3s kubectl config set-cluster default
--server={{ endpoint_url }}
--kubeconfig {{ ansible_user_dir }}/.kube/config
--kubeconfig ~{{ ansible_user }}/.kube/config
changed_when: true
vars:
endpoint_url: >-

View File

@@ -1,30 +0,0 @@
---
- name: Create manifests directory on first master
file:
path: /var/lib/rancher/k3s/server/manifests
state: directory
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
- name: "Download to first master: manifest for metallb-{{ metal_lb_type }}"
ansible.builtin.get_url:
url: "https://raw.githubusercontent.com/metallb/metallb/{{ metal_lb_controller_tag_version }}/config/manifests/metallb-{{metal_lb_type}}.yaml" # noqa yaml[line-length]
dest: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml"
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
- name: Set image versions in manifest for metallb-{{ metal_lb_type }}
ansible.builtin.replace:
path: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml"
regexp: "{{ item.change | ansible.builtin.regex_escape }}"
replace: "{{ item.to }}"
with_items:
- change: "metallb/speaker:{{ metal_lb_controller_tag_version }}"
to: "metallb/speaker:{{ metal_lb_speaker_tag_version }}"
loop_control:
label: "{{ item.change }} => {{ item.to }}"
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']

View File

@@ -1,27 +0,0 @@
---
- name: Create manifests directory on first master
file:
path: /var/lib/rancher/k3s/server/manifests
state: directory
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
- name: Download vip rbac manifest to first master
ansible.builtin.get_url:
url: "https://raw.githubusercontent.com/kube-vip/kube-vip/{{ kube_vip_tag_version }}/docs/manifests/rbac.yaml"
dest: "/var/lib/rancher/k3s/server/manifests/vip-rbac.yaml"
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
- name: Copy vip manifest to first master
template:
src: "vip.yaml.j2"
dest: "/var/lib/rancher/k3s/server/manifests/vip.yaml"
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
name: metallb-system
labels:
app: metallb

View File

@@ -0,0 +1,32 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-vip
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
name: system:kube-vip-role
rules:
- apiGroups: [""]
resources: ["services", "services/status", "nodes", "endpoints"]
verbs: ["list","get","watch", "update"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["list", "get", "watch", "update", "create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:kube-vip-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kube-vip-role
subjects:
- kind: ServiceAccount
name: kube-vip
namespace: kube-system

View File

@@ -30,10 +30,8 @@ spec:
value: "true"
- name: port
value: "6443"
{% if kube_vip_iface %}
- name: vip_interface
value: {{ kube_vip_iface }}
{% endif %}
value: {{ flannel_iface }}
- name: vip_cidr
value: "{{ apiserver_endpoint | ansible.utils.ipsubnet | ansible.utils.ipaddr('prefix') }}"
- name: cp_enable

View File

@@ -1,6 +1,92 @@
---
- name: Deploy metallb pool
include_tasks: metallb.yml
- name: Create manifests directory for temp configuration
file:
path: /tmp/k3s
state: directory
owner: "{{ ansible_user }}"
mode: 0755
with_items: "{{ groups['master'] }}"
run_once: true
- name: Copy metallb CRs manifest to first master
template:
src: "metallb.crs.j2"
dest: "/tmp/k3s/metallb-crs.yaml"
owner: "{{ ansible_user }}"
mode: 0755
with_items: "{{ groups['master'] }}"
run_once: true
- name: Test metallb-system namespace
command: >-
k3s kubectl -n metallb-system
changed_when: false
with_items: "{{ groups['master'] }}"
run_once: true
- name: Wait for MetalLB resources
command: >-
k3s kubectl wait {{ item.resource }}
--namespace='metallb-system'
{% if item.name | default(False) -%}{{ item.name }}{%- endif %}
{% if item.selector | default(False) -%}--selector='{{ item.selector }}'{%- endif %}
{% if item.condition | default(False) -%}{{ item.condition }}{%- endif %}
--timeout='{{ metal_lb_available_timeout }}'
changed_when: false
run_once: true
with_items:
- description: controller
resource: deployment
name: controller
condition: --for condition=Available=True
- description: webhook service
resource: pod
selector: component=controller
condition: --for=jsonpath='{.status.phase}'=Running
- description: pods in replica sets
resource: pod
selector: component=controller,app=metallb
condition: --for condition=Ready
- description: ready replicas of controller
resource: replicaset
selector: component=controller,app=metallb
condition: --for=jsonpath='{.status.readyReplicas}'=1
- description: fully labeled replicas of controller
resource: replicaset
selector: component=controller,app=metallb
condition: --for=jsonpath='{.status.fullyLabeledReplicas}'=1
- description: available replicas of controller
resource: replicaset
selector: component=controller,app=metallb
condition: --for=jsonpath='{.status.availableReplicas}'=1
loop_control:
label: "{{ item.description }}"
- name: Test metallb-system webhook-service endpoint
command: >-
k3s kubectl -n metallb-system get endpoints webhook-service
changed_when: false
with_items: "{{ groups['master'] }}"
run_once: true
- name: Apply metallb CRs
command: >-
k3s kubectl apply -f /tmp/k3s/metallb-crs.yaml
--timeout='{{ metal_lb_available_timeout }}'
register: this
changed_when: false
run_once: true
until: this.rc == 0
retries: 5
- name: Test metallb-system resources
command: >-
k3s kubectl -n metallb-system get {{ item }}
changed_when: false
run_once: true
with_items:
- IPAddressPool
- L2Advertisement
- name: Remove tmp directory used for manifests
file:

View File

@@ -1,101 +0,0 @@
---
- name: Create manifests directory for temp configuration
file:
path: /tmp/k3s
state: directory
owner: "{{ ansible_user_id }}"
mode: 0755
with_items: "{{ groups['master'] }}"
run_once: true
- name: Copy metallb CRs manifest to first master
template:
src: "metallb.crs.j2"
dest: "/tmp/k3s/metallb-crs.yaml"
owner: "{{ ansible_user_id }}"
mode: 0755
with_items: "{{ groups['master'] }}"
run_once: true
- name: Test metallb-system namespace
command: >-
k3s kubectl -n metallb-system
changed_when: false
with_items: "{{ groups['master'] }}"
run_once: true
- name: Wait for MetalLB resources
command: >-
k3s kubectl wait {{ item.resource }}
--namespace='metallb-system'
{% if item.name | default(False) -%}{{ item.name }}{%- endif %}
{% if item.selector | default(False) -%}--selector='{{ item.selector }}'{%- endif %}
{% if item.condition | default(False) -%}{{ item.condition }}{%- endif %}
--timeout='{{ metal_lb_available_timeout }}'
changed_when: false
run_once: true
with_items:
- description: controller
resource: deployment
name: controller
condition: --for condition=Available=True
- description: webhook service
resource: pod
selector: component=controller
condition: --for=jsonpath='{.status.phase}'=Running
- description: pods in replica sets
resource: pod
selector: component=controller,app=metallb
condition: --for condition=Ready
- description: ready replicas of controller
resource: replicaset
selector: component=controller,app=metallb
condition: --for=jsonpath='{.status.readyReplicas}'=1
- description: fully labeled replicas of controller
resource: replicaset
selector: component=controller,app=metallb
condition: --for=jsonpath='{.status.fullyLabeledReplicas}'=1
- description: available replicas of controller
resource: replicaset
selector: component=controller,app=metallb
condition: --for=jsonpath='{.status.availableReplicas}'=1
loop_control:
label: "{{ item.description }}"
- name: Test metallb-system webhook-service endpoint
command: >-
k3s kubectl -n metallb-system get endpoints webhook-service
changed_when: false
with_items: "{{ groups['master'] }}"
run_once: true
- name: Apply metallb CRs
command: >-
k3s kubectl apply -f /tmp/k3s/metallb-crs.yaml
--timeout='{{ metal_lb_available_timeout }}'
register: this
changed_when: false
run_once: true
until: this.rc == 0
retries: 5
- name: Test metallb-system resources for Layer 2 configuration
command: >-
k3s kubectl -n metallb-system get {{ item }}
changed_when: false
run_once: true
when: metal_lb_mode == "layer2"
with_items:
- IPAddressPool
- L2Advertisement
- name: Test metallb-system resources for BGP configuration
command: >-
k3s kubectl -n metallb-system get {{ item }}
changed_when: false
run_once: true
when: metal_lb_mode == "bgp"
with_items:
- IPAddressPool
- BGPPeer
- BGPAdvertisement

View File

@@ -13,31 +13,9 @@ spec:
{% for range in metal_lb_ip_range %}
- {{ range }}
{% endfor %}
{% if metal_lb_mode == "layer2" %}
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: default
namespace: metallb-system
{% endif %}
{% if metal_lb_mode == "bgp" %}
---
apiVersion: metallb.io/v1beta2
kind: BGPPeer
metadata:
name: default
namespace: metallb-system
spec:
myASN: {{ metal_lb_bgp_my_asn }}
peerASN: {{ metal_lb_bgp_peer_asn }}
peerAddress: {{ metal_lb_bgp_peer_address }}
---
apiVersion: metallb.io/v1beta1
kind: BGPAdvertisement
metadata:
name: default
namespace: metallb-system
{% endif %}

View File

@@ -1,4 +0,0 @@
---
- name: reboot server
become: true
reboot:

View File

@@ -1,21 +0,0 @@
---
- name: Check for rc.local file
stat:
path: /etc/rc.local
register: rcfile
- name: Create rc.local if needed
lineinfile:
path: /etc/rc.local
line: "#!/bin/sh -e"
create: true
insertbefore: BOF
mode: "u=rwx,g=rx,o=rx"
when: not rcfile.stat.exists
- name: Write rc.local file
blockinfile:
path: /etc/rc.local
content: "{{ lookup('template', 'templates/rc.local.j2') }}"
state: present
notify: reboot server

View File

@@ -1,5 +0,0 @@
---
- name: reboot containers
command:
"pct reboot {{ item }}"
loop: "{{ proxmox_lxc_filtered_ids }}"

View File

@@ -1,50 +0,0 @@
---
- name: check for container files that exist on this host
stat:
path: "/etc/pve/lxc/{{ item }}.conf"
loop: "{{ proxmox_lxc_ct_ids }}"
register: stat_results
- name: filter out files that do not exist
set_fact:
proxmox_lxc_filtered_files:
'{{ stat_results.results | rejectattr("stat.exists", "false") | map(attribute="stat.path") }}'
# used for the reboot handler
- name: get container ids from filtered files
set_fact:
proxmox_lxc_filtered_ids:
'{{ proxmox_lxc_filtered_files | map("split", "/") | map("last") | map("split", ".") | map("first") }}'
# https://gist.github.com/triangletodd/02f595cd4c0dc9aac5f7763ca2264185
- name: Ensure lxc config has the right apparmor profile
lineinfile:
dest: "{{ item }}"
regexp: "^lxc.apparmor.profile"
line: "lxc.apparmor.profile: unconfined"
loop: "{{ proxmox_lxc_filtered_files }}"
notify: reboot containers
- name: Ensure lxc config has the right cgroup
lineinfile:
dest: "{{ item }}"
regexp: "^lxc.cgroup.devices.allow"
line: "lxc.cgroup.devices.allow: a"
loop: "{{ proxmox_lxc_filtered_files }}"
notify: reboot containers
- name: Ensure lxc config has the right cap drop
lineinfile:
dest: "{{ item }}"
regexp: "^lxc.cap.drop"
line: "lxc.cap.drop: "
loop: "{{ proxmox_lxc_filtered_files }}"
notify: reboot containers
- name: Ensure lxc config has the right mounts
lineinfile:
dest: "{{ item }}"
regexp: "^lxc.mount.auto"
line: 'lxc.mount.auto: "proc:rw sys:rw"'
loop: "{{ proxmox_lxc_filtered_files }}"
notify: reboot containers

View File

@@ -1,3 +1,3 @@
---
- name: reboot
- name: Reboot
reboot:

View File

@@ -54,31 +54,3 @@
file:
path: /tmp/k3s
state: absent
- name: Check if rc.local exists
stat:
path: /etc/rc.local
register: rcfile
- name: Remove rc.local modifications for proxmox lxc containers
become: true
blockinfile:
path: /etc/rc.local
content: "{{ lookup('template', 'templates/rc.local.j2') }}"
create: false
state: absent
when: proxmox_lxc_configure and rclocal.stat.exists
- name: Check rc.local for cleanup
become: true
slurp:
src: /etc/rc.local
register: rcslurp
when: proxmox_lxc_configure and rclocal.stat.exists
- name: Cleanup rc.local if we only have a Shebang line
become: true
file:
path: /etc/rc.local
state: absent
when: proxmox_lxc_configure and rclocal.stat.exists and ((rcslurp.content | b64decode).splitlines() | length) <= 1

View File

@@ -1,5 +0,0 @@
---
- name: reboot containers
command:
"pct reboot {{ item }}"
loop: "{{ proxmox_lxc_filtered_ids }}"

View File

@@ -1,53 +0,0 @@
---
- name: check for container files that exist on this host
stat:
path: "/etc/pve/lxc/{{ item }}.conf"
loop: "{{ proxmox_lxc_ct_ids }}"
register: stat_results
- name: filter out files that do not exist
set_fact:
proxmox_lxc_filtered_files:
'{{ stat_results.results | rejectattr("stat.exists", "false") | map(attribute="stat.path") }}'
# used for the reboot handler
- name: get container ids from filtered files
set_fact:
proxmox_lxc_filtered_ids:
'{{ proxmox_lxc_filtered_files | map("split", "/") | map("last") | map("split", ".") | map("first") }}'
- name: Remove LXC apparmor profile
lineinfile:
dest: "{{ item }}"
regexp: "^lxc.apparmor.profile"
line: "lxc.apparmor.profile: unconfined"
state: absent
loop: "{{ proxmox_lxc_filtered_files }}"
notify: reboot containers
- name: Remove lxc cgroups
lineinfile:
dest: "{{ item }}"
regexp: "^lxc.cgroup.devices.allow"
line: "lxc.cgroup.devices.allow: a"
state: absent
loop: "{{ proxmox_lxc_filtered_files }}"
notify: reboot containers
- name: Remove lxc cap drop
lineinfile:
dest: "{{ item }}"
regexp: "^lxc.cap.drop"
line: "lxc.cap.drop: "
state: absent
loop: "{{ proxmox_lxc_filtered_files }}"
notify: reboot containers
- name: Remove lxc mounts
lineinfile:
dest: "{{ item }}"
regexp: "^lxc.mount.auto"
line: 'lxc.mount.auto: "proc:rw sys:rw"'
state: absent
loop: "{{ proxmox_lxc_filtered_files }}"
notify: reboot containers

View File

@@ -1,37 +1,24 @@
---
- hosts: proxmox
gather_facts: true
become: yes
remote_user: "{{ proxmox_lxc_ssh_user }}"
roles:
- role: proxmox_lxc
when: proxmox_lxc_configure
- hosts: k3s_cluster
gather_facts: yes
become: yes
roles:
- role: lxc
become: true
when: proxmox_lxc_configure
- role: prereq
become: true
- role: download
become: true
- role: raspberrypi
become: true
- hosts: master
become: yes
roles:
- role: k3s/master
become: true
- hosts: node
become: yes
roles:
- role: k3s/node
become: true
- hosts: master
become: yes
roles:
- role: k3s/post
become: true

View File

@@ -1,8 +0,0 @@
# Kubeadm 1.15 needs /dev/kmsg to be there, but it's not in lxc, but we can just use /dev/console instead
# see: https://github.com/kubernetes-sigs/kind/issues/662
if [ ! -e /dev/kmsg ]; then
ln -s /dev/console /dev/kmsg
fi
# https://medium.com/@kvaps/run-kubernetes-in-lxc-container-f04aa94b6c9c
mount --make-rshared /