Compare commits

..

57 Commits

Author SHA1 Message Date
Timothy Stewart
2ae9ee82f0 fix(ci): pin + cache 2022-11-02 21:27:35 -05:00
Timothy Stewart
5380f93b64 fix(ci): pin + cache 2022-11-02 21:20:33 -05:00
Timothy Stewart
a414453ad4 fix(ci): pin + cache 2022-11-02 21:11:31 -05:00
Timothy Stewart
23c5d9ec89 fix(ci): pin + cache 2022-11-02 21:00:37 -05:00
Timothy Stewart
6b339e1985 fix(ci): pin + cache 2022-11-02 20:55:40 -05:00
Timothy Stewart
a031807660 fix(ci): pin + cache 2022-11-02 20:15:03 -05:00
Timothy Stewart
7dd305aabc fix(ci): pin + cache 2022-11-02 20:00:27 -05:00
Timothy Stewart
500931e2fd fix(ci): pin + cache 2022-11-02 19:55:06 -05:00
Timothy Stewart
cf357cf164 fix(ci): pin + cache 2022-11-02 19:49:32 -05:00
Timothy Stewart
215e0d10ed fix(ci): pin + cache 2022-11-02 19:49:03 -05:00
Timothy Stewart
c6ed680dc1 fix(ci): pin + cache 2022-11-02 19:41:30 -05:00
Timothy Stewart
8343a6199e fix(ci): pin + cache 2022-11-02 19:40:11 -05:00
Timothy Stewart
b524f97552 fix(ci): pin + cache 2022-11-02 19:38:39 -05:00
Timothy Stewart
f741040e44 fix(ci): pin + cache 2022-11-02 19:32:26 -05:00
Timothy Stewart
09bc628ba6 fix(ci): pin + cache 2022-11-01 22:55:42 -05:00
Timothy Stewart
71ff6b86cd fix(ci): pin + cache 2022-11-01 22:43:56 -05:00
Timothy Stewart
23729ddbbe fix(ci): pin + cache 2022-11-01 22:35:39 -05:00
Timothy Stewart
e254c407f0 fix(ci): pin + cache 2022-11-01 22:18:39 -05:00
Timothy Stewart
713b4694e1 fix(ci): pin + cache 2022-11-01 22:05:37 -05:00
Timothy Stewart
952d513124 fix(ci): pin + cache 2022-11-01 21:45:47 -05:00
Timothy Stewart
dd1e596332 fix(ci): pin + cache 2022-11-01 21:43:00 -05:00
Timothy Stewart
6af47f96d0 fix(ci): pin + cache 2022-11-01 21:24:03 -05:00
Timothy Stewart
664deec6c3 fix(ci): pin + cache 2022-11-01 21:06:21 -05:00
Timothy Stewart
646459e7f5 fix(ci): pin + cache 2022-11-01 21:05:57 -05:00
Timothy Stewart
64242d9729 fix(ci): pin + cache 2022-11-01 21:05:11 -05:00
Timothy Stewart
f4864ddb64 fix(ci): pin + cache 2022-11-01 21:04:23 -05:00
Timothy Stewart
6a83cde0c6 fix(ci): pin + cache 2022-11-01 21:03:27 -05:00
Timothy Stewart
77ac928c0d fix(ci): pin + cache 2022-11-01 21:01:47 -05:00
Timothy Stewart
8300a7aaac fix(ci): pin + cache 2022-11-01 21:01:14 -05:00
Timothy Stewart
bdc6af5f46 fix(ci): pin + cache 2022-11-01 20:47:50 -05:00
Timothy Stewart
dc8276157a fix(ci): pin + cache 2022-11-01 20:37:23 -05:00
Timothy Stewart
37f0cb11d2 fix(ci): pin + cache 2022-11-01 20:35:46 -05:00
Timothy Stewart
68e7c77b22 fix(ci): pin + cache 2022-11-01 20:26:13 -05:00
Timothy Stewart
d82c4feac8 feat(gh-actions-controller): added 2022-11-01 20:22:07 -05:00
Timothy Stewart
9217d8607b feat(gh-actions-controller): added 2022-11-01 20:19:00 -05:00
Timothy Stewart
fbc15aa1a1 fix(ci): pin + cache 2022-11-01 20:15:03 -05:00
Timothy Stewart
b55ec046ad fix(ci): pin + cache 2022-11-01 20:07:15 -05:00
Timothy Stewart
b3cc178045 fix(ci): pin + cache 2022-11-01 19:59:22 -05:00
Timothy Stewart
13be424187 fix(ci): pin + cache 2022-11-01 19:55:33 -05:00
Timothy Stewart
d9cecd5364 fix(ci): pin + cache 2022-11-01 19:51:32 -05:00
Timothy Stewart
afb96dbee2 fix(ci): pin + cache 2022-11-01 19:48:31 -05:00
Timothy Stewart
30ffc69192 fix(ci): pin + cache 2022-11-01 19:41:44 -05:00
Timothy Stewart
94e385c28e fix(ci): pin + cache 2022-11-01 19:40:28 -05:00
Timothy Stewart
dbb2cda17a fix(ci): pin + cache 2022-10-31 22:10:31 -05:00
Timothy Stewart
d24cdb97db feat(gh-actions-controller): added 2022-10-31 22:09:33 -05:00
Timothy Stewart
5bebec930b feat(gh-actions-controller): added 2022-10-31 22:02:16 -05:00
Timothy Stewart
ac52acdec1 feat(gh-actions-controller): added 2022-10-31 22:01:39 -05:00
Timothy Stewart
105b2c2f1e fix(ci): pin + cache 2022-10-31 21:55:51 -05:00
Timothy Stewart
d20f485fca fix(ci): pin + cache 2022-10-31 21:47:33 -05:00
Timothy Stewart
f9bb9dabae fix(ci): pin + cache 2022-10-31 21:45:11 -05:00
Timothy Stewart
6f15ef260e fix(ci): pin + cache 2022-10-31 21:40:25 -05:00
Timothy Stewart
de1966fe02 fix(ci): pin + cache 2022-10-31 21:33:47 -05:00
Timothy Stewart
fc823122d8 fix(script): convert to linux 2022-10-31 21:29:24 -05:00
Techno Tim
2f8d94bb5e Merge branch 'master' into self-hosted-runners 2022-10-31 18:52:22 -05:00
Timothy Stewart
9c3814ce72 feat(gh-actions-controller): added 2022-10-30 22:45:59 -05:00
Timothy Stewart
0e60f4643b feat(gh-actions-controller): added 2022-10-30 22:44:13 -05:00
Timothy Stewart
bb20514a6a feat(ci): switching to self-hosted runners 2022-10-30 20:46:14 -05:00
77 changed files with 2217 additions and 1141 deletions

View File

@@ -11,5 +11,5 @@
- [ ] Ran `site.yml` playbook
- [ ] Ran `reset.yml` playbook
- [ ] Did not add any unnecessary changes
- [ ] Ran pre-commit install at least once before committing
- [ ] 🚀
- [ ] Ran pre-commit install at least once before committing

View File

@@ -5,10 +5,15 @@
# already present on the system.
set -euo pipefail
YQ_VERSION=v4.29.2
YQ_BINARY=yq_linux_amd64
GIT_ROOT=$(git rev-parse --show-toplevel)
PROVIDER=virtualbox
# get yq used for filtering
sudo wget https://github.com/mikefarah/yq/releases/download/${YQ_VERSION}/${YQ_BINARY} -O /usr/bin/yq &&\
sudo chmod +x /usr/bin/yq
# Read all boxes for all platforms from the "molecule.yml" files
all_boxes=$(cat "${GIT_ROOT}"/molecule/*/molecule.yml |
yq -r '.platforms[].box' | # Read the "box" property of each node under "platforms"

View File

@@ -1,21 +0,0 @@
---
name: "CI"
on:
pull_request:
push:
branches:
- master
paths-ignore:
- '**/README.md'
jobs:
lint:
uses: ./.github/workflows/lint.yml
test-default:
uses: ./.github/workflows/test-default.yml
needs: [lint]
test-ipv6:
uses: ./.github/workflows/test-ipv6.yml
needs: [lint, test-default]
test-single-node:
uses: ./.github/workflows/test-single-node.yml
needs: [lint, test-default, test-ipv6]

View File

@@ -1,28 +1,33 @@
---
name: Linting
on:
workflow_call:
pull_request:
push:
branches:
- master
paths-ignore:
- '**/README.md'
jobs:
pre-commit-ci:
name: Pre-Commit
runs-on: ubuntu-22.04
runs-on: self-hosted
env:
PYTHON_VERSION: "3.11"
PYTHON_VERSION: "3.10"
steps:
- name: Check out the codebase
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # 4.1.1
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # 3.0.2
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # 4.7.1
uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984 # 4.3.0
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies
- name: Cache pip
uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # 3.3.2
uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # 3.0.11
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('./requirements.txt') }}
@@ -30,7 +35,7 @@ jobs:
${{ runner.os }}-pip-
- name: Cache Ansible
uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # 3.3.2
uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # 3.0.11
with:
path: ~/.ansible/collections
key: ${{ runner.os }}-ansible-${{ hashFiles('collections/requirements.txt') }}
@@ -56,12 +61,12 @@ jobs:
ensure-pinned-actions:
name: Ensure SHA Pinned Actions
runs-on: ubuntu-latest
runs-on: self-hosted
steps:
- name: Checkout code
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # 4.1.1
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # 3.0.2
- name: Ensure SHA pinned actions
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@f32435541e24cd6a4700a7f52bb2ec59e80603b1 # 2.0.1
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@6ca5574367befbc9efdb2fa25978084159c5902d # 1.3.0
with:
allowlist: |
aws-actions/

View File

@@ -1,80 +0,0 @@
---
name: Molecule Default
on:
workflow_call:
jobs:
molecule:
name: Molecule
runs-on: macos-13
strategy:
matrix:
scenario:
- default
fail-fast: false
env:
PYTHON_VERSION: "3.11"
steps:
- name: Check out the codebase
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # 4.1.1
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Configure VirtualBox
run: |-
sudo mkdir -p /etc/vbox
cat <<EOF | sudo tee -a /etc/vbox/networks.conf > /dev/null
* 192.168.30.0/24
* fdad:bad:ba55::/64
EOF
- name: Cache pip
uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # 3.3.2
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('./requirements.txt') }}
restore-keys: |
${{ runner.os }}-pip-
- name: Download Vagrant boxes for all scenarios
# To save some cache space, all scenarios share the same cache key.
# On the other hand, this means that the cache contents should be
# the same across all scenarios. This step ensures that.
run: ./.github/download-boxes.sh
- name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # 4.7.1
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies
- name: Install dependencies
run: |
echo "::group::Upgrade pip"
python3 -m pip install --upgrade pip
echo "::endgroup::"
echo "::group::Install Python requirements from requirements.txt"
python3 -m pip install -r requirements.txt
echo "::endgroup::"
- name: Test with molecule
run: molecule test --scenario-name ${{ matrix.scenario }}
timeout-minutes: 90
env:
ANSIBLE_K3S_LOG_DIR: ${{ runner.temp }}/logs/k3s-ansible/${{ matrix.scenario }}
ANSIBLE_SSH_RETRIES: 4
ANSIBLE_TIMEOUT: 60
PY_COLORS: 1
ANSIBLE_FORCE_COLOR: 1
- name: Upload log files
if: always() # do this even if a step before has failed
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # 3.1.3
with:
name: logs
path: |
${{ runner.temp }}/logs
- name: Delete old box versions
if: always() # do this even if a step before has failed
run: vagrant box prune --force

View File

@@ -1,80 +0,0 @@
---
name: Molecule IPv6
on:
workflow_call:
jobs:
molecule:
name: Molecule
runs-on: macos-13
strategy:
matrix:
scenario:
- ipv6
fail-fast: false
env:
PYTHON_VERSION: "3.11"
steps:
- name: Check out the codebase
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # 4.1.1
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Configure VirtualBox
run: |-
sudo mkdir -p /etc/vbox
cat <<EOF | sudo tee -a /etc/vbox/networks.conf > /dev/null
* 192.168.30.0/24
* fdad:bad:ba55::/64
EOF
- name: Cache pip
uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # 3.3.2
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('./requirements.txt') }}
restore-keys: |
${{ runner.os }}-pip-
- name: Download Vagrant boxes for all scenarios
# To save some cache space, all scenarios share the same cache key.
# On the other hand, this means that the cache contents should be
# the same across all scenarios. This step ensures that.
run: ./.github/download-boxes.sh
- name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # 4.7.1
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies
- name: Install dependencies
run: |
echo "::group::Upgrade pip"
python3 -m pip install --upgrade pip
echo "::endgroup::"
echo "::group::Install Python requirements from requirements.txt"
python3 -m pip install -r requirements.txt
echo "::endgroup::"
- name: Test with molecule
run: molecule test --scenario-name ${{ matrix.scenario }}
timeout-minutes: 90
env:
ANSIBLE_K3S_LOG_DIR: ${{ runner.temp }}/logs/k3s-ansible/${{ matrix.scenario }}
ANSIBLE_SSH_RETRIES: 4
ANSIBLE_TIMEOUT: 60
PY_COLORS: 1
ANSIBLE_FORCE_COLOR: 1
- name: Upload log files
if: always() # do this even if a step before has failed
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # 3.1.3
with:
name: logs
path: |
${{ runner.temp }}/logs
- name: Delete old box versions
if: always() # do this even if a step before has failed
run: vagrant box prune --force

View File

@@ -1,80 +0,0 @@
---
name: Molecule Single Node
on:
workflow_call:
jobs:
molecule:
name: Molecule
runs-on: macos-13
strategy:
matrix:
scenario:
- single_node
fail-fast: false
env:
PYTHON_VERSION: "3.11"
steps:
- name: Check out the codebase
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # 4.1.1
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Configure VirtualBox
run: |-
sudo mkdir -p /etc/vbox
cat <<EOF | sudo tee -a /etc/vbox/networks.conf > /dev/null
* 192.168.30.0/24
* fdad:bad:ba55::/64
EOF
- name: Cache pip
uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # 3.3.2
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('./requirements.txt') }}
restore-keys: |
${{ runner.os }}-pip-
- name: Download Vagrant boxes for all scenarios
# To save some cache space, all scenarios share the same cache key.
# On the other hand, this means that the cache contents should be
# the same across all scenarios. This step ensures that.
run: ./.github/download-boxes.sh
- name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # 4.7.1
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies
- name: Install dependencies
run: |
echo "::group::Upgrade pip"
python3 -m pip install --upgrade pip
echo "::endgroup::"
echo "::group::Install Python requirements from requirements.txt"
python3 -m pip install -r requirements.txt
echo "::endgroup::"
- name: Test with molecule
run: molecule test --scenario-name ${{ matrix.scenario }}
timeout-minutes: 90
env:
ANSIBLE_K3S_LOG_DIR: ${{ runner.temp }}/logs/k3s-ansible/${{ matrix.scenario }}
ANSIBLE_SSH_RETRIES: 4
ANSIBLE_TIMEOUT: 60
PY_COLORS: 1
ANSIBLE_FORCE_COLOR: 1
- name: Upload log files
if: always() # do this even if a step before has failed
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # 3.1.3
with:
name: logs
path: |
${{ runner.temp }}/logs
- name: Delete old box versions
if: always() # do this even if a step before has failed
run: vagrant box prune --force

124
.github/workflows/test.yml vendored Normal file
View File

@@ -0,0 +1,124 @@
---
name: Test
on:
pull_request:
push:
branches:
- master
paths-ignore:
- '**/README.md'
jobs:
molecule:
name: Molecule
runs-on: self-hosted
strategy:
matrix:
scenario:
- default
- ipv6
- single_node
fail-fast: false
env:
PYTHON_VERSION: "3.10"
VAGRANT_DEFAULT_PROVIDER: virtualbox
steps:
- name: Check out the codebase
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # 3.0.2
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Install Virtual Box from Oracle
run: |
echo "::group::Virtual Box"
wget -O- https://www.virtualbox.org/download/oracle_vbox_2016.asc | sudo gpg --dearmor --yes --output /usr/share/keyrings/oracle-virtualbox-2016.gpg
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/oracle-virtualbox-2016.gpg] https://download.virtualbox.org/virtualbox/debian $(lsb_release -cs) contrib" | sudo tee -a /etc/apt/sources.list.d/virtualbox.list
sudo apt update && sudo apt install -y linux-headers-generic linux-headers-5.15.0-52-generic build-essential dkms virtualbox-dkms virtualbox-6.1
echo "::endgroup::"
echo "::group::Virtual Box Test"
vboxmanage --version
sudo /sbin/vboxconfig
sudo modprobe vboxdrv
vboxmanage --version
echo "::endgroup::"
- name: Install Vagrant
run: |
echo "::group::Install Vagrant"
wget -O- https://apt.releases.hashicorp.com/gpg | gpg --dearmor | sudo tee /usr/share/keyrings/hashicorp-archive-keyring.gpg
echo "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/hashicorp.list
sudo apt update && sudo apt install -y vagrant
vagrant version
vagrant plugin list
vagrant plugin install vagrant-vbguest
vagrant plugin list
echo "::endgroup::"
- name: Configure VirtualBox
run: |-
sudo mkdir -p /etc/vbox
cat <<EOF | sudo tee -a /etc/vbox/networks.conf > /dev/null
* 192.168.30.0/24
* fdad:bad:ba55::/64
EOF
- name: Cache pip
uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # 3.0.11
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('./requirements.txt') }}
restore-keys: |
${{ runner.os }}-pip-
- name: Cache Vagrant boxes
uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # 3.0.11
with:
path: |
~/.vagrant.d/boxes
key: vagrant-boxes-${{ hashFiles('**/molecule.yml') }}
restore-keys: |
vagrant-boxes
- name: Download Vagrant boxes for all scenarios
# To save some cache space, all scenarios share the same cache key.
# On the other hand, this means that the cache contents should be
# the same across all scenarios. This step ensures that.
run: ./.github/download-boxes.sh
- name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@13ae5bb136fac2878aff31522b9efb785519f984 # 4.3.0
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies
- name: Install dependencies
run: |
echo "::group::Upgrade pip"
python3 -m pip install --upgrade pip
echo "::endgroup::"
echo "::group::Install Python requirements from requirements.txt"
python3 -m pip install -r requirements.txt
echo "::endgroup::"
- name: Test with molecule
run: molecule test --scenario-name ${{ matrix.scenario }}
env:
ANSIBLE_K3S_LOG_DIR: ${{ runner.temp }}/logs/k3s-ansible/${{ matrix.scenario }}
ANSIBLE_SSH_RETRIES: 4
ANSIBLE_TIMEOUT: 60
PY_COLORS: 1
ANSIBLE_FORCE_COLOR: 1
- name: Upload log files
if: always() # do this even if a step before has failed
uses: actions/upload-artifact@83fd05a356d7e2593de66fc9913b3002723633cb # 3.1.1
with:
name: logs
path: |
${{ runner.temp }}/logs
- name: Delete old box versions
if: always() # do this even if a step before has failed
run: vagrant box prune --force

2
.gitignore vendored
View File

@@ -1,3 +1 @@
.env/
*.log
ansible.cfg

View File

@@ -1,35 +1,21 @@
---
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: f71fa2c1f9cf5cb705f73dffe4b21f7c61470ba9 # frozen: v4.4.0
rev: v4.3.0
hooks:
- id: requirements-txt-fixer
- id: sort-simple-yaml
- id: detect-private-key
- id: check-merge-conflict
- id: end-of-file-fixer
- id: mixed-line-ending
- id: trailing-whitespace
args: [--markdown-linebreak-ext=md]
- repo: https://github.com/adrienverge/yamllint.git
rev: b05e028c5881819161d11cb543fd96a30c06cceb # frozen: v1.32.0
rev: v1.28.0
hooks:
- id: yamllint
args: [-c=.yamllint]
- repo: https://github.com/ansible-community/ansible-lint.git
rev: 3293b64b939c0de16ef8cb81dd49255e475bf89a # frozen: v6.17.2
rev: v6.8.2
hooks:
- id: ansible-lint
- repo: https://github.com/shellcheck-py/shellcheck-py
rev: 375289a39f5708101b1f916eb729e8d6da96993f # frozen: v0.9.0.5
rev: v0.8.0.4
hooks:
- id: shellcheck
- repo: https://github.com/Lucas-C/pre-commit-hooks
rev: 12885e376b93dc4536ad68d156065601e4433665 # frozen: v1.5.1
hooks:
- id: remove-crlf
- id: remove-tabs
- repo: https://github.com/sirosen/texthooks
rev: c4ffd3e31669dd4fa4d31a23436cc13839730084 # frozen: 0.5.0
hooks:
- id: fix-smartquotes

View File

@@ -174,4 +174,4 @@
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
END OF TERMS AND CONDITIONS

View File

@@ -4,13 +4,13 @@
This playbook will build an HA Kubernetes cluster with `k3s`, `kube-vip` and MetalLB via `ansible`.
This is based on the work from [this fork](https://github.com/212850a/k3s-ansible) which is based on the work from [k3s-io/k3s-ansible](https://github.com/k3s-io/k3s-ansible). It uses [kube-vip](https://kube-vip.io/) to create a load balancer for control plane, and [metal-lb](https://metallb.universe.tf/installation/) for its service `LoadBalancer`.
This is based on the work from [this fork](https://github.com/212850a/k3s-ansible) which is based on the work from [k3s-io/k3s-ansible](https://github.com/k3s-io/k3s-ansible). It uses [kube-vip](https://kube-vip.chipzoller.dev/) to create a load balancer for control plane, and [metal-lb](https://metallb.universe.tf/installation/) for its service `LoadBalancer`.
If you want more context on how this works, see:
📄 [Documentation](https://technotim.live/posts/k3s-etcd-ansible/) (including example commands)
📄 [Documentation](https://docs.technotim.live/posts/k3s-etcd-ansible/) (including example commands)
📺 [Watch the Video](https://www.youtube.com/watch?v=CbkEWcUZ7zM)
📺 [Video](https://www.youtube.com/watch?v=CbkEWcUZ7zM)
## 📖 k3s Ansible Playbook
@@ -28,14 +28,14 @@ on processor architecture:
## ✅ System requirements
- Control Node (the machine you are running `ansible` commands) must have Ansible 2.11+ If you need a quick primer on Ansible [you can check out my docs and setting up Ansible](https://technotim.live/posts/ansible-automation/).
- You will also need to install collections that this playbook uses by running `ansible-galaxy collection install -r ./collections/requirements.yml` (important❗)
- Deployment environment must have Ansible 2.4.0+. If you need a quick primer on Ansible [you can check out my docs and setting up Ansible](https://docs.technotim.live/posts/ansible-automation/).
- [`netaddr` package](https://pypi.org/project/netaddr/) must be available to Ansible. If you have installed Ansible via apt, this is already taken care of. If you have installed Ansible via `pip`, make sure to install `netaddr` into the respective virtual environment.
- `server` and `agent` nodes should have passwordless SSH access, if not you can supply arguments to provide credentials `--ask-pass --ask-become-pass` to each command.
- You will also need to install collections that this playbook uses by running `ansible-galaxy collection install -r ./collections/requirements.yml`
## 🚀 Getting Started
### 🍴 Preparation
@@ -67,8 +67,6 @@ node
If multiple hosts are in the master group, the playbook will automatically set up k3s in [HA mode with etcd](https://rancher.com/docs/k3s/latest/en/installation/ha-embedded/).
Finally, copy `ansible.example.cfg` to `ansible.cfg` and adapt the inventory path to match the files that you just created.
This requires at least k3s version `1.19.1` however the version is configurable by using the `k3s_version` variable.
If needed, you can also edit `inventory/my-cluster/group_vars/all.yml` to match your environment.
@@ -101,7 +99,7 @@ scp debian@master_ip:~/.kube/config ~/.kube/config
### 🔨 Testing your cluster
See the commands [here](https://technotim.live/posts/k3s-etcd-ansible/#testing-your-cluster).
See the commands [here](https://docs.technotim.live/posts/k3s-etcd-ansible/#testing-your-cluster).
### Troubleshooting
@@ -114,13 +112,9 @@ It is run automatically in CI, but you can also run the tests locally.
This might be helpful for quick feedback in a few cases.
You can find more information about it [here](molecule/README.md).
### Pre-commit Hooks
This repo uses `pre-commit` and `pre-commit-hooks` to lint and fix common style and syntax errors. Be sure to install python packages and then run `pre-commit install`. For more information, see [pre-commit](https://pre-commit.com/)
## Thanks 🤝
This repo is really standing on the shoulders of giants. Thank you to all those who have contributed and thanks to these repos for code and ideas:
This repo is really standing on the shoulders of giants. Thank you to all those who have contributed and tanks to these repos for code and ideas:
- [k3s-io/k3s-ansible](https://github.com/k3s-io/k3s-ansible)
- [geerlingguy/turing-pi-cluster](https://github.com/geerlingguy/turing-pi-cluster)

12
ansible.cfg Normal file
View File

@@ -0,0 +1,12 @@
[defaults]
nocows = True
roles_path = ./roles
inventory = ./hosts.ini
remote_tmp = $HOME/.ansible/tmp
local_tmp = $HOME/.ansible/tmp
pipelining = True
become = True
host_key_checking = False
deprecation_warnings = False
callback_whitelist = profile_tasks

View File

@@ -1,2 +0,0 @@
[defaults]
inventory = inventory/my-cluster/hosts.ini ; Adapt this to the path to your inventory file

View File

@@ -1,3 +1,3 @@
#!/bin/bash
ansible-playbook site.yml
ansible-playbook site.yml -i inventory/my-cluster/hosts.ini

View File

@@ -1,5 +1,5 @@
---
k3s_version: v1.25.12+k3s1
k3s_version: v1.24.6+k3s1
# this is the user that has ssh access to these machines
ansible_user: ansibleuser
systemd_dir: /etc/systemd/system
@@ -41,89 +41,11 @@ extra_agent_args: >-
{{ extra_args }}
# image tag for kube-vip
kube_vip_tag_version: "v0.5.12"
# metallb type frr or native
metal_lb_type: "native"
# metallb mode layer2 or bgp
metal_lb_mode: "layer2"
# bgp options
# metal_lb_bgp_my_asn: "64513"
# metal_lb_bgp_peer_asn: "64512"
# metal_lb_bgp_peer_address: "192.168.30.1"
kube_vip_tag_version: "v0.5.5"
# image tag for metal lb
metal_lb_speaker_tag_version: "v0.13.9"
metal_lb_controller_tag_version: "v0.13.9"
metal_lb_speaker_tag_version: "v0.13.6"
metal_lb_controller_tag_version: "v0.13.6"
# metallb ip range for load balancer
metal_lb_ip_range: "192.168.30.80-192.168.30.90"
# Only enable if your nodes are proxmox LXC nodes, make sure to configure your proxmox nodes
# in your hosts.ini file.
# Please read https://gist.github.com/triangletodd/02f595cd4c0dc9aac5f7763ca2264185 before using this.
# Most notably, your containers must be privileged, and must not have nesting set to true.
# Please note this script disables most of the security of lxc containers, with the trade off being that lxc
# containers are significantly more resource efficent compared to full VMs.
# Mixing and matching VMs and lxc containers is not supported, ymmv if you want to do this.
# I would only really recommend using this if you have partiularly low powered proxmox nodes where the overhead of
# VMs would use a significant portion of your available resources.
proxmox_lxc_configure: false
# the user that you would use to ssh into the host, for example if you run ssh some-user@my-proxmox-host,
# set this value to some-user
proxmox_lxc_ssh_user: root
# the unique proxmox ids for all of the containers in the cluster, both worker and master nodes
proxmox_lxc_ct_ids:
- 200
- 201
- 202
- 203
- 204
# Only enable this if you have set up your own container registry to act as a mirror / pull-through cache
# (harbor / nexus / docker's official registry / etc).
# Can be beneficial for larger dev/test environments (for example if you're getting rate limited by docker hub),
# or air-gapped environments where your nodes don't have internet access after the initial setup
# (which is still needed for downloading the k3s binary and such).
# k3s's documentation about private registries here: https://docs.k3s.io/installation/private-registry
custom_registries: false
# The registries can be authenticated or anonymous, depending on your registry server configuration.
# If they allow anonymous access, simply remove the following bit from custom_registries_yaml
# configs:
# "registry.domain.com":
# auth:
# username: yourusername
# password: yourpassword
# The following is an example that pulls all images used in this playbook through your private registries.
# It also allows you to pull your own images from your private registry, without having to use imagePullSecrets
# in your deployments.
# If all you need is your own images and you don't care about caching the docker/quay/ghcr.io images,
# you can just remove those from the mirrors: section.
custom_registries_yaml: |
mirrors:
docker.io:
endpoint:
- "https://registry.domain.com/v2/dockerhub"
quay.io:
endpoint:
- "https://registry.domain.com/v2/quayio"
ghcr.io:
endpoint:
- "https://registry.domain.com/v2/ghcrio"
registry.domain.com:
endpoint:
- "https://registry.domain.com"
configs:
"registry.domain.com":
auth:
username: yourusername
password: yourpassword
# Only enable and configure these if you access the internet through a proxy
# proxy_env:
# HTTP_PROXY: "http://proxy.domain.local:3128"
# HTTPS_PROXY: "http://proxy.domain.local:3128"
# NO_PROXY: "*.domain.local,127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16"

View File

@@ -1,2 +0,0 @@
---
ansible_user: '{{ proxmox_lxc_ssh_user }}'

View File

@@ -7,11 +7,6 @@
192.168.30.41
192.168.30.42
# only required if proxmox_lxc_configure: true
# must contain all proxmox instances that have a master or worker node
# [proxmox]
# 192.168.30.43
[k3s_cluster:children]
master
node

View File

@@ -3,73 +3,56 @@ dependency:
name: galaxy
driver:
name: vagrant
platforms:
- name: control1
box: generic/ubuntu2204
.platform_presets:
- &control
memory: 2048
cpus: 2
groups:
- k3s_cluster
- master
- &node
memory: 2048
cpus: 2
groups:
- k3s_cluster
- node
- &debian
box: generic/debian11
- &rocky
box: generic/rocky9
- &ubuntu
box: generic/ubuntu2204
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
platforms:
- <<: [*control, *ubuntu]
name: control1
interfaces:
- network_name: private_network
ip: 192.168.30.38
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
- name: control2
box: generic/debian11
memory: 2048
cpus: 2
groups:
- k3s_cluster
- master
- <<: [*control, *debian]
name: control2
interfaces:
- network_name: private_network
ip: 192.168.30.39
- name: control3
box: generic/rocky9
memory: 2048
cpus: 2
groups:
- k3s_cluster
- master
- <<: [*control, *rocky]
name: control3
interfaces:
- network_name: private_network
ip: 192.168.30.40
- name: node1
box: generic/ubuntu2204
memory: 2048
cpus: 2
groups:
- k3s_cluster
- node
- <<: [*node, *ubuntu]
name: node1
interfaces:
- network_name: private_network
ip: 192.168.30.41
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
- name: node2
box: generic/rocky9
memory: 2048
cpus: 2
groups:
- k3s_cluster
- node
- <<: [*node, *rocky]
name: node2
interfaces:
- network_name: private_network
ip: 192.168.30.42
provisioner:
name: ansible
playbooks:

View File

@@ -4,8 +4,7 @@
tasks:
- name: Override host variables
ansible.builtin.set_fact:
# See:
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
# See: https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant # noqa yaml[line-length]
flannel_iface: eth1
# The test VMs might be a bit slow, so we give them more time to join the cluster:

View File

@@ -1,3 +0,0 @@
---
node_ipv4: 192.168.123.12
node_ipv6: fdad:bad:ba55::de:12

View File

@@ -3,54 +3,37 @@ dependency:
name: galaxy
driver:
name: vagrant
platforms:
- name: control1
box: generic/ubuntu2204
.platform_presets:
- &control
memory: 2048
cpus: 2
groups:
- k3s_cluster
- master
interfaces:
- network_name: private_network
ip: fdad:bad:ba55::de:11
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
- name: control2
box: generic/ubuntu2204
memory: 2048
cpus: 2
groups:
- k3s_cluster
- master
interfaces:
- network_name: private_network
ip: fdad:bad:ba55::de:12
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
- name: node1
box: generic/ubuntu2204
- &node
memory: 2048
cpus: 2
groups:
- k3s_cluster
- node
interfaces:
- network_name: private_network
ip: fdad:bad:ba55::de:21
- &ubuntu
box: generic/ubuntu2204
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
platforms:
- <<: [*control, *ubuntu]
name: control1
interfaces:
- network_name: private_network
ip: fdad:bad:ba55::de:11
- <<: [*node, *ubuntu]
name: node1
interfaces:
- network_name: private_network
ip: fdad:bad:ba55::de:21
provisioner:
name: ansible
playbooks:

View File

@@ -4,15 +4,9 @@
tasks:
- name: Override host variables (1/2)
ansible.builtin.set_fact:
# See:
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
# See: https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant # noqa yaml[line-length]
flannel_iface: eth1
# In this scenario, we have multiple interfaces that the VIP could be
# broadcasted on. Since we have assigned a dedicated private network
# here, let's make sure that it is used.
kube_vip_iface: eth1
# The test VMs might be a bit slow, so we give them more time to join the cluster:
retry_count: 45

View File

@@ -2,4 +2,4 @@
- name: Verify
hosts: all
roles:
- verify_from_outside
- verify/from_outside

View File

@@ -6,4 +6,4 @@ outside_host: localhost
testing_namespace: molecule-verify-from-outside
# The directory in which the example manifests reside
example_manifests_path: ../../../example
example_manifests_path: ../../../../example

View File

@@ -34,14 +34,14 @@
- name: Assert that the nginx welcome page is available
ansible.builtin.uri:
url: http://{{ ip | ansible.utils.ipwrap }}:{{ port_ }}/
url: http://{{ ip | ansible.utils.ipwrap }}:{{ port }}/
return_content: yes
register: result
failed_when: "'Welcome to nginx!' not in result.content"
vars:
ip: >-
{{ nginx_services.resources[0].status.loadBalancer.ingress[0].ip }}
port_: >-
port: >-
{{ nginx_services.resources[0].spec.ports[0].port }}
# Deactivated linter rules:
# - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap

View File

@@ -4,8 +4,7 @@
tasks:
- name: Override host variables
ansible.builtin.set_fact:
# See:
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
# See: https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant # noqa yaml[line-length]
flannel_iface: eth1
# The test VMs might be a bit slow, so we give them more time to join the cluster:

View File

@@ -1,3 +0,0 @@
#!/bin/bash
ansible-playbook reboot.yml

View File

@@ -1,9 +0,0 @@
---
- name: Reboot k3s_cluster
hosts: k3s_cluster
gather_facts: yes
tasks:
- name: Reboot the nodes (and Wait upto 5 mins max)
become: true
reboot:
reboot_timeout: 300

View File

@@ -1,10 +0,0 @@
ansible-core>=2.13.5
jmespath>=1.0.1
jsonpatch>=1.32
kubernetes>=25.3.0
molecule-vagrant>=1.0.0
molecule>=4.0.3
netaddr>=0.8.0
pre-commit>=2.20.0
pre-commit-hooks>=1.3.1
pyyaml>=6.0

View File

@@ -1,178 +1,72 @@
#
# This file is autogenerated by pip-compile with Python 3.11
# by the following command:
#
# pip-compile requirements.in
#
ansible-compat==3.0.1
# via molecule
ansible-core==2.15.4
# via
# -r requirements.in
# ansible-compat
ansible-compat==2.2.1
ansible-core==2.13.5
ansible-lint==6.8.4
arrow==1.2.3
# via jinja2-time
attrs==22.1.0
# via jsonschema
binaryornot==0.4.4
# via cookiecutter
black==22.10.0
bracex==2.3.post1
cachetools==5.2.0
# via google-auth
Cerberus==1.3.2
certifi==2022.9.24
# via
# kubernetes
# requests
cffi==1.15.1
# via cryptography
cfgv==3.3.1
# via pre-commit
chardet==5.0.0
# via binaryornot
charset-normalizer==2.1.1
# via requests
click==8.1.3
# via
# click-help-colors
# cookiecutter
# molecule
click-help-colors==0.9.1
# via molecule
commonmark==0.9.1
# via rich
cookiecutter==2.1.1
# via molecule
cryptography==38.0.3
# via ansible-core
distlib==0.3.6
# via virtualenv
cryptography==38.0.1
distro==1.8.0
# via selinux
enrich==1.2.7
# via molecule
filelock==3.8.0
# via virtualenv
google-auth==2.14.0
# via kubernetes
identify==2.5.8
# via pre-commit
google-auth==2.13.0
idna==3.4
# via requests
jinja2==3.1.2
# via
# ansible-core
# cookiecutter
# jinja2-time
# molecule
# molecule-vagrant
importlib-resources==5.10.0
Jinja2==3.1.2
jinja2-time==0.2.0
# via cookiecutter
jmespath==1.0.1
# via -r requirements.in
jsonpatch==1.33
# via -r requirements.in
jsonpatch==1.32
jsonpointer==2.3
# via jsonpatch
jsonschema==4.17.0
# via
# ansible-compat
# molecule
kubernetes==25.3.0
# via -r requirements.in
markupsafe==2.1.1
# via jinja2
molecule==4.0.4
# via
# -r requirements.in
# molecule-vagrant
jsonschema==4.16.0
kubernetes==24.2.0
MarkupSafe==2.1.1
molecule==4.0.1
molecule-vagrant==1.0.0
# via -r requirements.in
netaddr==0.9.0
# via -r requirements.in
nodeenv==1.7.0
# via pre-commit
mypy-extensions==0.4.3
netaddr==0.8.0
oauthlib==3.2.2
# via requests-oauthlib
packaging==21.3
# via
# ansible-compat
# ansible-core
# molecule
pathspec==0.10.1
pkgutil-resolve-name==1.3.10
platformdirs==2.5.2
# via virtualenv
pluggy==1.0.0
# via molecule
pre-commit==2.21.0
# via -r requirements.in
pre-commit-hooks==4.5.0
# via -r requirements.in
pre-commit==2.20.0
pyasn1==0.4.8
# via
# pyasn1-modules
# rsa
pyasn1-modules==0.2.8
# via google-auth
pycparser==2.21
# via cffi
pygments==2.13.0
# via rich
Pygments==2.13.0
pyparsing==3.0.9
# via packaging
pyrsistent==0.19.2
# via jsonschema
pyrsistent==0.18.1
python-dateutil==2.8.2
# via
# arrow
# kubernetes
python-slugify==6.1.2
# via cookiecutter
python-vagrant==1.0.0
# via molecule-vagrant
pyyaml==6.0.1
# via
# -r requirements.in
# ansible-compat
# ansible-core
# cookiecutter
# kubernetes
# molecule
# molecule-vagrant
# pre-commit
PyYAML==6.0
requests==2.28.1
# via
# cookiecutter
# kubernetes
# requests-oauthlib
requests-oauthlib==1.3.1
# via kubernetes
resolvelib==0.8.1
# via ansible-core
rich==12.6.0
# via
# enrich
# molecule
rsa==4.9
# via google-auth
ruamel-yaml==0.17.21
# via pre-commit-hooks
ruamel.yaml==0.17.21
ruamel.yaml.clib==0.2.7
selinux==0.2.1
# via molecule-vagrant
six==1.16.0
# via
# google-auth
# kubernetes
# python-dateutil
subprocess-tee==0.4.1
# via ansible-compat
subprocess-tee==0.3.5
text-unidecode==1.3
# via python-slugify
tomli==2.0.1
typing-extensions==4.4.0
urllib3==1.26.12
# via
# kubernetes
# requests
virtualenv==20.16.6
# via pre-commit
websocket-client==1.4.2
# via kubernetes
# The following packages are considered to be unsafe in a requirements file:
# setuptools
wcmatch==8.4.1
websocket-client==1.4.1
yamllint==1.28.0
zipp==3.10.0

View File

@@ -1,3 +1,3 @@
#!/bin/bash
ansible-playbook reset.yml
ansible-playbook reset.yml -i inventory/my-cluster/hosts.ini

View File

@@ -1,24 +1,13 @@
---
- name: Reset k3s cluster
hosts: k3s_cluster
- hosts: k3s_cluster
gather_facts: yes
become: yes
roles:
- role: reset
become: true
- role: raspberrypi
become: true
vars: {state: absent}
post_tasks:
- name: Reboot and wait for node to come back up
become: true
reboot:
reboot_timeout: 3600
- name: Revert changes to Proxmox cluster
hosts: proxmox
gather_facts: true
become: yes
remote_user: "{{ proxmox_lxc_ssh_user }}"
roles:
- role: reset_proxmox_lxc
when: proxmox_lxc_configure

View File

@@ -0,0 +1,12 @@
---
ansible_user: root
server_init_args: >-
{% if groups['master'] | length > 1 %}
{% if ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] %}
--cluster-init
{% else %}
--server https://{{ hostvars[groups['master'][0]].k3s_node_ip }}:6443
{% endif %}
--token {{ k3s_token }}
{% endif %}
{{ extra_server_args | default('') }}

View File

@@ -1,27 +1,63 @@
---
- name: Stop k3s-init
- name: Clean previous runs of k3s-init
systemd:
name: k3s-init
state: stopped
failed_when: false
- name: Clean previous runs of k3s-init # noqa command-instead-of-module
# The systemd module does not support "reset-failed", so we need to resort to command.
- name: Clean previous runs of k3s-init
command: systemctl reset-failed k3s-init
failed_when: false
changed_when: false
args:
warn: false # The ansible systemd module does not support reset-failed
- name: Deploy K3s http_proxy conf
include_tasks: http_proxy.yml
when: proxy_env is defined
- name: Create manifests directory on first master
file:
path: /var/lib/rancher/k3s/server/manifests
state: directory
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
- name: Deploy vip manifest
include_tasks: vip.yml
- name: Copy vip rbac manifest to first master
template:
src: "vip.rbac.yaml.j2"
dest: "/var/lib/rancher/k3s/server/manifests/vip-rbac.yaml"
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
- name: Deploy metallb manifest
include_tasks: metallb.yml
tags: metallb
- name: Copy vip manifest to first master
template:
src: "vip.yaml.j2"
dest: "/var/lib/rancher/k3s/server/manifests/vip.yaml"
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
# these will be copied and installed now, then tested later and apply config
- name: Copy metallb namespace to first master
template:
src: "metallb.namespace.j2"
dest: "/var/lib/rancher/k3s/server/manifests/metallb-namespace.yaml"
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
- name: Copy metallb namespace to first master
template:
src: "metallb.crds.j2"
dest: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml"
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
- name: Init cluster inside the transient k3s-init service
command:
@@ -30,15 +66,16 @@
--unit=k3s-init \
k3s server {{ server_init_args }}"
creates: "{{ systemd_dir }}/k3s.service"
args:
warn: false # The ansible systemd module does not support transient units
- name: Verification
when: not ansible_check_mode
block:
- name: Verify that all nodes actually joined (check k3s-init.service if this fails)
command:
cmd: k3s kubectl get nodes -l "node-role.kubernetes.io/master=true" -o=jsonpath="{.items[*].metadata.name}"
register: nodes
until: nodes.rc == 0 and (nodes.stdout.split() | length) == (groups[group_name_master | default('master')] | length) # yamllint disable-line rule:line-length
until: nodes.rc == 0 and (nodes.stdout.split() | length) == (groups['master'] | length)
retries: "{{ retry_count | default(20) }}"
delay: 10
changed_when: false
@@ -54,6 +91,7 @@
name: k3s-init
state: stopped
failed_when: false
when: not ansible_check_mode
- name: Copy K3s service file
register: k3s_service
@@ -101,24 +139,24 @@
- name: Create directory .kube
file:
path: "{{ ansible_user_dir }}/.kube"
path: ~{{ ansible_user }}/.kube
state: directory
owner: "{{ ansible_user_id }}"
owner: "{{ ansible_user }}"
mode: "u=rwx,g=rx,o="
- name: Copy config file to user home directory
copy:
src: /etc/rancher/k3s/k3s.yaml
dest: "{{ ansible_user_dir }}/.kube/config"
dest: ~{{ ansible_user }}/.kube/config
remote_src: yes
owner: "{{ ansible_user_id }}"
owner: "{{ ansible_user }}"
mode: "u=rw,g=,o="
- name: Configure kubectl cluster to {{ endpoint_url }}
command: >-
k3s kubectl config set-cluster default
--server={{ endpoint_url }}
--kubeconfig {{ ansible_user_dir }}/.kube/config
--kubeconfig ~{{ ansible_user }}/.kube/config
changed_when: true
vars:
endpoint_url: >-

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
name: metallb-system
labels:
app: metallb

View File

@@ -0,0 +1,32 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-vip
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
name: system:kube-vip-role
rules:
- apiGroups: [""]
resources: ["services", "services/status", "nodes", "endpoints"]
verbs: ["list","get","watch", "update"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["list", "get", "watch", "update", "create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:kube-vip-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kube-vip-role
subjects:
- kind: ServiceAccount
name: kube-vip
namespace: kube-system

View File

@@ -30,10 +30,8 @@ spec:
value: "true"
- name: port
value: "6443"
{% if kube_vip_iface %}
- name: vip_interface
value: {{ kube_vip_iface }}
{% endif %}
value: {{ flannel_iface }}
- name: vip_cidr
value: "{{ apiserver_endpoint | ansible.utils.ipsubnet | ansible.utils.ipaddr('prefix') }}"
- name: cp_enable

View File

@@ -1,3 +0,0 @@
---
# Name of the master group
group_name_master: master

View File

@@ -1,9 +1,5 @@
---
- name: Deploy K3s http_proxy conf
include_tasks: http_proxy.yml
when: proxy_env is defined
- name: Copy K3s service file
template:
src: "k3s.service.j2"

View File

@@ -7,7 +7,7 @@ After=network-online.target
Type=notify
ExecStartPre=-/sbin/modprobe br_netfilter
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/local/bin/k3s agent --server https://{{ apiserver_endpoint | ansible.utils.ipwrap }}:6443 --token {{ hostvars[groups[group_name_master | default('master')][0]]['token'] | default(k3s_token) }} {{ extra_agent_args | default("") }}
ExecStart=/usr/local/bin/k3s agent --server https://{{ apiserver_endpoint | ansible.utils.ipwrap }}:6443 --token {{ hostvars[groups['master'][0]]['token'] | default(k3s_token) }} {{ extra_agent_args | default("") }}
KillMode=process
Delegate=yes
# Having non-zero Limit*s causes performance problems due to accounting overhead

View File

@@ -1,6 +1,3 @@
---
# Timeout to wait for MetalLB services to come up
metal_lb_available_timeout: 120s
# Name of the master group
group_name_master: master

View File

@@ -3,25 +3,25 @@
file:
path: /tmp/k3s
state: directory
owner: "{{ ansible_user_id }}"
owner: "{{ ansible_user }}"
mode: 0755
with_items: "{{ groups[group_name_master | default('master')] }}"
with_items: "{{ groups['master'] }}"
run_once: true
- name: Copy metallb CRs manifest to first master
template:
src: "metallb.crs.j2"
dest: "/tmp/k3s/metallb-crs.yaml"
owner: "{{ ansible_user_id }}"
owner: "{{ ansible_user }}"
mode: 0755
with_items: "{{ groups[group_name_master | default('master')] }}"
with_items: "{{ groups['master'] }}"
run_once: true
- name: Test metallb-system namespace
command: >-
k3s kubectl -n metallb-system
changed_when: false
with_items: "{{ groups[group_name_master | default('master')] }}"
with_items: "{{ groups['master'] }}"
run_once: true
- name: Wait for MetalLB resources
@@ -66,7 +66,7 @@
command: >-
k3s kubectl -n metallb-system get endpoints webhook-service
changed_when: false
with_items: "{{ groups[group_name_master | default('master')] }}"
with_items: "{{ groups['master'] }}"
run_once: true
- name: Apply metallb CRs
@@ -79,23 +79,16 @@
until: this.rc == 0
retries: 5
- name: Test metallb-system resources for Layer 2 configuration
- name: Test metallb-system resources
command: >-
k3s kubectl -n metallb-system get {{ item }}
changed_when: false
run_once: true
when: metal_lb_mode == "layer2"
with_items:
- IPAddressPool
- L2Advertisement
- name: Test metallb-system resources for BGP configuration
command: >-
k3s kubectl -n metallb-system get {{ item }}
changed_when: false
run_once: true
when: metal_lb_mode == "bgp"
with_items:
- IPAddressPool
- BGPPeer
- BGPAdvertisement
- name: Remove tmp directory used for manifests
file:
path: /tmp/k3s
state: absent

View File

@@ -13,31 +13,9 @@ spec:
{% for range in metal_lb_ip_range %}
- {{ range }}
{% endfor %}
{% if metal_lb_mode == "layer2" %}
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: default
namespace: metallb-system
{% endif %}
{% if metal_lb_mode == "bgp" %}
---
apiVersion: metallb.io/v1beta2
kind: BGPPeer
metadata:
name: default
namespace: metallb-system
spec:
myASN: {{ metal_lb_bgp_my_asn }}
peerASN: {{ metal_lb_bgp_peer_asn }}
peerAddress: {{ metal_lb_bgp_peer_address }}
---
apiVersion: metallb.io/v1beta1
kind: BGPAdvertisement
metadata:
name: default
namespace: metallb-system
{% endif %}

View File

@@ -1,18 +0,0 @@
---
- name: Create k3s.service.d directory
file:
path: '{{ systemd_dir }}/k3s.service.d'
state: directory
owner: root
group: root
mode: '0755'
- name: Copy K3s http_proxy conf file
template:
src: "http_proxy.conf.j2"
dest: "{{ systemd_dir }}/k3s.service.d/http_proxy.conf"
owner: root
group: root
mode: '0755'

View File

@@ -1,4 +0,0 @@
[Service]
Environment=HTTP_PROXY={{ proxy_env.HTTP_PROXY }}
Environment=HTTPS_PROXY={{ proxy_env.HTTPS_PROXY }}
Environment=NO_PROXY={{ proxy_env.NO_PROXY }}

View File

@@ -1,6 +0,0 @@
---
# Indicates whether custom registries for k3s should be configured
# Possible values:
# - present
# - absent
state: present

View File

@@ -1,17 +0,0 @@
---
- name: Create directory /etc/rancher/k3s
file:
path: "/etc/{{ item }}"
state: directory
mode: '0755'
loop:
- rancher
- rancher/k3s
- name: Insert registries into /etc/rancher/k3s/registries.yaml
blockinfile:
path: /etc/rancher/k3s/registries.yaml
block: "{{ custom_registries_yaml }}"
mode: '0600'
create: true

View File

@@ -1,20 +0,0 @@
---
# If you want to explicitly define an interface that ALL control nodes
# should use to propagate the VIP, define it here. Otherwise, kube-vip
# will determine the right interface automatically at runtime.
kube_vip_iface: null
# Name of the master group
group_name_master: master
# yamllint disable rule:line-length
server_init_args: >-
{% if groups[group_name_master | default('master')] | length > 1 %}
{% if ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] %}
--cluster-init
{% else %}
--server https://{{ hostvars[groups[group_name_master | default('master')][0]].k3s_node_ip | split(",") | first | ansible.utils.ipwrap }}:6443
{% endif %}
--token {{ k3s_token }}
{% endif %}
{{ extra_server_args | default('') }}

View File

@@ -1,18 +0,0 @@
---
- name: Create k3s.service.d directory
file:
path: '{{ systemd_dir }}/k3s.service.d'
state: directory
owner: root
group: root
mode: '0755'
- name: Copy K3s http_proxy conf file
template:
src: "http_proxy.conf.j2"
dest: "{{ systemd_dir }}/k3s.service.d/http_proxy.conf"
owner: root
group: root
mode: '0755'

View File

@@ -1,30 +0,0 @@
---
- name: Create manifests directory on first master
file:
path: /var/lib/rancher/k3s/server/manifests
state: directory
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
- name: "Download to first master: manifest for metallb-{{ metal_lb_type }}"
ansible.builtin.get_url:
url: "https://raw.githubusercontent.com/metallb/metallb/{{ metal_lb_controller_tag_version }}/config/manifests/metallb-{{ metal_lb_type }}.yaml" # noqa yaml[line-length]
dest: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml"
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
- name: Set image versions in manifest for metallb-{{ metal_lb_type }}
ansible.builtin.replace:
path: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml"
regexp: "{{ item.change | ansible.builtin.regex_escape }}"
replace: "{{ item.to }}"
with_items:
- change: "metallb/speaker:{{ metal_lb_controller_tag_version }}"
to: "metallb/speaker:{{ metal_lb_speaker_tag_version }}"
loop_control:
label: "{{ item.change }} => {{ item.to }}"
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']

View File

@@ -1,27 +0,0 @@
---
- name: Create manifests directory on first master
file:
path: /var/lib/rancher/k3s/server/manifests
state: directory
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
- name: Download vip rbac manifest to first master
ansible.builtin.get_url:
url: "https://raw.githubusercontent.com/kube-vip/kube-vip/{{ kube_vip_tag_version }}/docs/manifests/rbac.yaml"
dest: "/var/lib/rancher/k3s/server/manifests/vip-rbac.yaml"
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
- name: Copy vip manifest to first master
template:
src: "vip.yaml.j2"
dest: "/var/lib/rancher/k3s/server/manifests/vip.yaml"
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']

View File

@@ -1,4 +0,0 @@
[Service]
Environment=HTTP_PROXY={{ proxy_env.HTTP_PROXY }}
Environment=HTTPS_PROXY={{ proxy_env.HTTPS_PROXY }}
Environment=NO_PROXY={{ proxy_env.NO_PROXY }}

View File

@@ -1,9 +0,0 @@
---
- name: Deploy metallb pool
include_tasks: metallb.yml
tags: metallb
- name: Remove tmp directory used for manifests
file:
path: /tmp/k3s
state: absent

View File

@@ -1,5 +0,0 @@
---
- name: Reboot server
become: true
reboot:
listen: reboot server

View File

@@ -1,21 +0,0 @@
---
- name: Check for rc.local file
stat:
path: /etc/rc.local
register: rcfile
- name: Create rc.local if needed
lineinfile:
path: /etc/rc.local
line: "#!/bin/sh -e"
create: true
insertbefore: BOF
mode: "u=rwx,g=rx,o=rx"
when: not rcfile.stat.exists
- name: Write rc.local file
blockinfile:
path: /etc/rc.local
content: "{{ lookup('template', 'templates/rc.local.j2') }}"
state: present
notify: reboot server

View File

@@ -1,4 +0,0 @@
---
secure_path:
RedHat: '/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/bin'
Suse: '/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/bin'

View File

@@ -1,37 +1,34 @@
---
- name: Set same timezone on every Server
community.general.timezone:
timezone:
name: "{{ system_timezone }}"
when: (system_timezone is defined) and (system_timezone != "Your/Timezone")
- name: Set SELinux to disabled state
ansible.posix.selinux:
selinux:
state: disabled
when: ansible_os_family == "RedHat"
- name: Enable IPv4 forwarding
ansible.posix.sysctl:
sysctl:
name: net.ipv4.ip_forward
value: "1"
state: present
reload: yes
tags: sysctl
- name: Enable IPv6 forwarding
ansible.posix.sysctl:
sysctl:
name: net.ipv6.conf.all.forwarding
value: "1"
state: present
reload: yes
tags: sysctl
- name: Enable IPv6 router advertisements
ansible.posix.sysctl:
sysctl:
name: net.ipv6.conf.all.accept_ra
value: "2"
state: present
reload: yes
tags: sysctl
- name: Add br_netfilter to /etc/modules-load.d/
copy:
@@ -41,13 +38,13 @@
when: ansible_os_family == "RedHat"
- name: Load br_netfilter
community.general.modprobe:
modprobe:
name: br_netfilter
state: present
when: ansible_os_family == "RedHat"
- name: Set bridge-nf-call-iptables (just to be sure)
ansible.posix.sysctl:
sysctl:
name: "{{ item }}"
value: "1"
state: present
@@ -56,14 +53,13 @@
loop:
- net.bridge.bridge-nf-call-iptables
- net.bridge.bridge-nf-call-ip6tables
tags: sysctl
- name: Add /usr/local/bin to sudo secure_path
lineinfile:
line: 'Defaults secure_path = {{ secure_path[ansible_os_family] }}'
line: 'Defaults secure_path = /sbin:/bin:/usr/sbin:/usr/bin:/usr/local/bin'
regexp: "Defaults(\\s)*secure_path(\\s)*="
state: present
insertafter: EOF
path: /etc/sudoers
validate: 'visudo -cf %s'
when: ansible_os_family in [ "RedHat", "Suse" ]
when: ansible_os_family == "RedHat"

View File

@@ -1,13 +0,0 @@
---
- name: Reboot containers
block:
- name: Get container ids from filtered files
set_fact:
proxmox_lxc_filtered_ids: >-
{{ proxmox_lxc_filtered_files | map("split", "/") | map("last") | map("split", ".") | map("first") }}
listen: reboot containers
- name: Reboot container
command: "pct reboot {{ item }}"
loop: "{{ proxmox_lxc_filtered_ids }}"
changed_when: true
listen: reboot containers

View File

@@ -1,44 +0,0 @@
---
- name: Check for container files that exist on this host
stat:
path: "/etc/pve/lxc/{{ item }}.conf"
loop: "{{ proxmox_lxc_ct_ids }}"
register: stat_results
- name: Filter out files that do not exist
set_fact:
proxmox_lxc_filtered_files:
'{{ stat_results.results | rejectattr("stat.exists", "false") | map(attribute="stat.path") }}'
# https://gist.github.com/triangletodd/02f595cd4c0dc9aac5f7763ca2264185
- name: Ensure lxc config has the right apparmor profile
lineinfile:
dest: "{{ item }}"
regexp: "^lxc.apparmor.profile"
line: "lxc.apparmor.profile: unconfined"
loop: "{{ proxmox_lxc_filtered_files }}"
notify: reboot containers
- name: Ensure lxc config has the right cgroup
lineinfile:
dest: "{{ item }}"
regexp: "^lxc.cgroup.devices.allow"
line: "lxc.cgroup.devices.allow: a"
loop: "{{ proxmox_lxc_filtered_files }}"
notify: reboot containers
- name: Ensure lxc config has the right cap drop
lineinfile:
dest: "{{ item }}"
regexp: "^lxc.cap.drop"
line: "lxc.cap.drop: "
loop: "{{ proxmox_lxc_filtered_files }}"
notify: reboot containers
- name: Ensure lxc config has the right mounts
lineinfile:
dest: "{{ item }}"
regexp: "^lxc.mount.auto"
line: 'lxc.mount.auto: "proc:rw sys:rw"'
loop: "{{ proxmox_lxc_filtered_files }}"
notify: reboot containers

View File

@@ -1,4 +1,3 @@
---
- name: Reboot
reboot:
listen: reboot

View File

@@ -47,16 +47,20 @@
- raspberry_pi|default(false)
- ansible_facts.lsb.description|default("") is match("Debian.*bullseye")
- name: Execute OS related tasks on the Raspberry Pi - {{ action_ }}
- name: execute OS related tasks on the Raspberry Pi - {{ action }}
include_tasks: "{{ item }}"
with_first_found:
- "{{ action_ }}/{{ detected_distribution }}-{{ detected_distribution_major_version }}.yml"
- "{{ action_ }}/{{ detected_distribution }}.yml"
- "{{ action_ }}/{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml"
- "{{ action_ }}/{{ ansible_distribution }}.yml"
- "{{ action_ }}/default.yml"
- "{{ action }}/{{ detected_distribution }}-{{ detected_distribution_major_version }}.yml"
- "{{ action }}/{{ detected_distribution }}.yml"
- "{{ action }}/{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml"
- "{{ action }}/{{ ansible_distribution }}.yml"
- "{{ action }}/default.yml"
vars:
action_: >-
{% if state == "present" %}setup{% else %}teardown{% endif %}
action: >-
{% if state == "present" -%}
setup
{%- else -%}
teardown
{%- endif %}
when:
- raspberry_pi|default(false)

View File

@@ -8,22 +8,20 @@
notify: reboot
- name: Install iptables
apt:
name: iptables
state: present
apt: name=iptables state=present
- name: Flush iptables before changing to iptables-legacy
iptables:
flush: true
- name: Changing to iptables-legacy
community.general.alternatives:
alternatives:
path: /usr/sbin/iptables-legacy
name: iptables
register: ip4_legacy
- name: Changing to ip6tables-legacy
community.general.alternatives:
alternatives:
path: /usr/sbin/ip6tables-legacy
name: ip6tables
register: ip6_legacy

View File

@@ -46,15 +46,6 @@
- /var/lib/rancher/
- /var/lib/cni/
- name: Remove K3s http_proxy files
file:
name: "{{ item }}"
state: absent
with_items:
- "{{ systemd_dir }}/k3s.service.d"
- "{{ systemd_dir }}/k3s-node.service.d"
when: proxy_env is defined
- name: Reload daemon_reload
systemd:
daemon_reload: yes
@@ -63,31 +54,3 @@
file:
path: /tmp/k3s
state: absent
- name: Check if rc.local exists
stat:
path: /etc/rc.local
register: rcfile
- name: Remove rc.local modifications for proxmox lxc containers
become: true
blockinfile:
path: /etc/rc.local
content: "{{ lookup('template', 'templates/rc.local.j2') }}"
create: false
state: absent
when: proxmox_lxc_configure and rcfile.stat.exists
- name: Check rc.local for cleanup
become: true
slurp:
src: /etc/rc.local
register: rcslurp
when: proxmox_lxc_configure and rcfile.stat.exists
- name: Cleanup rc.local if we only have a Shebang line
become: true
file:
path: /etc/rc.local
state: absent
when: proxmox_lxc_configure and rcfile.stat.exists and ((rcslurp.content | b64decode).splitlines() | length) <= 1

View File

@@ -9,7 +9,7 @@
check_mode: false
- name: Umount filesystem
ansible.posix.mount:
mount:
path: "{{ item }}"
state: unmounted
with_items:

View File

@@ -1 +0,0 @@
../../proxmox_lxc/handlers/main.yml

View File

@@ -1,47 +0,0 @@
---
- name: Check for container files that exist on this host
stat:
path: "/etc/pve/lxc/{{ item }}.conf"
loop: "{{ proxmox_lxc_ct_ids }}"
register: stat_results
- name: Filter out files that do not exist
set_fact:
proxmox_lxc_filtered_files:
'{{ stat_results.results | rejectattr("stat.exists", "false") | map(attribute="stat.path") }}'
- name: Remove LXC apparmor profile
lineinfile:
dest: "{{ item }}"
regexp: "^lxc.apparmor.profile"
line: "lxc.apparmor.profile: unconfined"
state: absent
loop: "{{ proxmox_lxc_filtered_files }}"
notify: reboot containers
- name: Remove lxc cgroups
lineinfile:
dest: "{{ item }}"
regexp: "^lxc.cgroup.devices.allow"
line: "lxc.cgroup.devices.allow: a"
state: absent
loop: "{{ proxmox_lxc_filtered_files }}"
notify: reboot containers
- name: Remove lxc cap drop
lineinfile:
dest: "{{ item }}"
regexp: "^lxc.cap.drop"
line: "lxc.cap.drop: "
state: absent
loop: "{{ proxmox_lxc_filtered_files }}"
notify: reboot containers
- name: Remove lxc mounts
lineinfile:
dest: "{{ item }}"
regexp: "^lxc.mount.auto"
line: 'lxc.mount.auto: "proc:rw sys:rw"'
state: absent
loop: "{{ proxmox_lxc_filtered_files }}"
notify: reboot containers

View File

@@ -1,48 +1,24 @@
---
- name: Prepare Proxmox cluster
hosts: proxmox
gather_facts: true
become: yes
environment: "{{ proxy_env | default({}) }}"
roles:
- role: proxmox_lxc
when: proxmox_lxc_configure
- name: Prepare k3s nodes
hosts: k3s_cluster
- hosts: k3s_cluster
gather_facts: yes
environment: "{{ proxy_env | default({}) }}"
become: yes
roles:
- role: lxc
become: true
when: proxmox_lxc_configure
- role: prereq
become: true
- role: download
become: true
- role: raspberrypi
become: true
- role: k3s_custom_registries
become: true
when: custom_registries
- name: Setup k3s servers
hosts: master
environment: "{{ proxy_env | default({}) }}"
- hosts: master
become: yes
roles:
- role: k3s_server
become: true
- role: k3s/master
- name: Setup k3s agents
hosts: node
environment: "{{ proxy_env | default({}) }}"
- hosts: node
become: yes
roles:
- role: k3s_agent
become: true
- role: k3s/node
- name: Configure k3s cluster
hosts: master
environment: "{{ proxy_env | default({}) }}"
- hosts: master
become: yes
roles:
- role: k3s_server_post
become: true
- role: k3s/post

View File

@@ -1,8 +0,0 @@
# Kubeadm 1.15 needs /dev/kmsg to be there, but it's not in lxc, but we can just use /dev/console instead
# see: https://github.com/kubernetes-sigs/kind/issues/662
if [ ! -e /dev/kmsg ]; then
ln -s /dev/console /dev/kmsg
fi
# https://medium.com/@kvaps/run-kubernetes-in-lxc-container-f04aa94b6c9c
mount --make-rshared /