Compare commits

..

4 Commits

Author SHA1 Message Date
Ethan Shold
2cd03f38f2 Add calico-apiserver check 2024-01-17 10:14:04 -06:00
sholdee
8e1265fbae Merge branch 'techno-tim:master' into calico 2024-01-17 09:46:24 -06:00
sholdee
f6ee0c72ef Merge branch 'techno-tim:master' into calico 2024-01-14 01:40:08 -06:00
Ethan Shold
e7ba494a00 Add Tigera Operator/Calico CNI option
Small tweak to reduce delta from head

Set calico option to be disabled by default

Add rescue blocks in case updating existing

Refactor items and update comments

Refactor and consolidate calico.yml into block

Refactor to use template for Calico CRs

Revert use_calico to false

Template blockSize

Align default cidr in template with all.yml sample

Apply upstream version tags

Revert to current ver tags. Upstream's don't work.

Update template address detection

Add Tigera Operator/Calico CNI option
2024-01-14 01:31:42 -06:00
53 changed files with 247 additions and 1147 deletions

View File

@@ -13,9 +13,5 @@ exclude_paths:
- 'molecule/**/prepare.yml' - 'molecule/**/prepare.yml'
- 'molecule/**/reset.yml' - 'molecule/**/reset.yml'
# The file was generated by galaxy ansible - don't mess with it.
- 'galaxy.yml'
skip_list: skip_list:
- 'fqcn-builtins' - 'fqcn-builtins'
- var-naming[no-role-prefix]

View File

@@ -35,12 +35,7 @@ k3s_version: ""
ansible_user: NA ansible_user: NA
systemd_dir: "" systemd_dir: ""
flannel_iface: "" container_iface: ""
#calico_iface: ""
calico_ebpf: ""
calico_cidr: ""
calico_tag: ""
apiserver_endpoint: "" apiserver_endpoint: ""
@@ -51,9 +46,6 @@ extra_agent_args: ""
kube_vip_tag_version: "" kube_vip_tag_version: ""
kube_vip_cloud_provider_tag_version: ""
kube_vip_lb_ip_range: ""
metal_lb_speaker_tag_version: "" metal_lb_speaker_tag_version: ""
metal_lb_controller_tag_version: "" metal_lb_controller_tag_version: ""

View File

@@ -9,18 +9,3 @@ updates:
ignore: ignore:
- dependency-name: "*" - dependency-name: "*"
update-types: ["version-update:semver-major"] update-types: ["version-update:semver-major"]
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"
rebase-strategy: "auto"
- package-ecosystem: "docker"
directory: "/"
schedule:
interval: "daily"
rebase-strategy: "auto"
ignore:
- dependency-name: "*"
update-types: ["version-update:semver-major"]

View File

@@ -1,42 +0,0 @@
---
name: "Cache"
on:
workflow_call:
jobs:
molecule:
name: cache
runs-on: self-hosted
env:
PYTHON_VERSION: "3.11"
steps:
- name: Check out the codebase
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # 4.1.2
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # 5.0.0
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies
- name: Cache Vagrant boxes
id: cache-vagrant
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # 4.0
with:
lookup-only: true #if it exists, we don't need to restore and can skip the next step
path: |
~/.vagrant.d/boxes
key: vagrant-boxes-${{ hashFiles('**/molecule.yml') }}
restore-keys: |
vagrant-boxes
- name: Download Vagrant boxes for all scenarios
# To save some cache space, all scenarios share the same cache key.
# On the other hand, this means that the cache contents should be
# the same across all scenarios. This step ensures that.
if: steps.cache-vagrant.outputs.cache-hit != 'true' # only run if false since this is just a cache step
run: |
./.github/download-boxes.sh
vagrant box list

View File

@@ -2,26 +2,14 @@
name: "CI" name: "CI"
on: on:
pull_request: pull_request:
types: push:
- opened branches:
- synchronize - master
paths-ignore: paths-ignore:
- '**/.gitignore' - '**/README.md'
- '**/FUNDING.yml'
- '**/host.ini'
- '**/*.md'
- '**/.editorconfig'
- '**/ansible.example.cfg'
- '**/deploy.sh'
- '**/LICENSE'
- '**/reboot.sh'
- '**/reset.sh'
jobs: jobs:
pre:
uses: ./.github/workflows/cache.yml
lint: lint:
uses: ./.github/workflows/lint.yml uses: ./.github/workflows/lint.yml
needs: [pre]
test: test:
uses: ./.github/workflows/test.yml uses: ./.github/workflows/test.yml
needs: [pre, lint] needs: [lint]

View File

@@ -5,27 +5,37 @@ on:
jobs: jobs:
pre-commit-ci: pre-commit-ci:
name: Pre-Commit name: Pre-Commit
runs-on: self-hosted runs-on: ubuntu-latest
env: env:
PYTHON_VERSION: "3.11" PYTHON_VERSION: "3.11"
steps: steps:
- name: Check out the codebase - name: Check out the codebase
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # 4.1.2 uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v3 2.5.0
with: with:
ref: ${{ github.event.pull_request.head.sha }} ref: ${{ github.event.pull_request.head.sha }}
- name: Set up Python ${{ env.PYTHON_VERSION }} - name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # 5.0.0 uses: actions/setup-python@75f3110429a8c05be0e1bf360334e4cced2b63fa # 2.3.3
with: with:
python-version: ${{ env.PYTHON_VERSION }} python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies cache: 'pip' # caching pip dependencies
- name: Restore Ansible cache - name: Cache pip
uses: actions/cache/restore@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # 4.0 uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # 3.0.11
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('./requirements.txt') }}
restore-keys: |
${{ runner.os }}-pip-
- name: Cache Ansible
uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # 3.0.11
with: with:
path: ~/.ansible/collections path: ~/.ansible/collections
key: ansible-${{ hashFiles('collections/requirements.yml') }} key: ${{ runner.os }}-ansible-${{ hashFiles('collections/requirements.txt') }}
restore-keys: |
${{ runner.os }}-ansible-
- name: Install dependencies - name: Install dependencies
run: | run: |
@@ -37,17 +47,21 @@ jobs:
python3 -m pip install -r requirements.txt python3 -m pip install -r requirements.txt
echo "::endgroup::" echo "::endgroup::"
echo "::group::Install Ansible role requirements from collections/requirements.yml"
ansible-galaxy install -r collections/requirements.yml
echo "::endgroup::"
- name: Run pre-commit - name: Run pre-commit
uses: pre-commit/action@2c7b3805fd2a0fd8c1884dcaebf91fc102a13ecd # 3.0.1 uses: pre-commit/action@646c83fcd040023954eafda54b4db0192ce70507 # 3.0.0
ensure-pinned-actions: ensure-pinned-actions:
name: Ensure SHA Pinned Actions name: Ensure SHA Pinned Actions
runs-on: self-hosted runs-on: ubuntu-latest
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # 4.1.2 uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v3 2.5.0
- name: Ensure SHA pinned actions - name: Ensure SHA pinned actions
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@ba37328d4ea95eaf8b3bd6c6cef308f709a5f2ec # 3.0.3 uses: zgosalvez/github-actions-ensure-sha-pinned-actions@af2eb3226618e2494e3d9084f515ad6dcf16e229 # 2.0.1
with: with:
allowlist: | allowlist: |
aws-actions/ aws-actions/

View File

@@ -5,51 +5,23 @@ on:
jobs: jobs:
molecule: molecule:
name: Molecule name: Molecule
runs-on: self-hosted runs-on: macos-12
strategy: strategy:
matrix: matrix:
scenario: scenario:
- default - default
# - ipv6 - ipv6
- single_node - single_node
- calico
- cilium
- kube-vip
fail-fast: false fail-fast: false
env: env:
PYTHON_VERSION: "3.11" PYTHON_VERSION: "3.11"
steps: steps:
- name: Check out the codebase - name: Check out the codebase
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # 4.1.2 uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v3 2.5.0
with: with:
ref: ${{ github.event.pull_request.head.sha }} ref: ${{ github.event.pull_request.head.sha }}
# these steps are necessary if not using ephemeral nodes
- name: Delete old Vagrant box versions
if: always() # do this even if a step before has failed
run: vagrant box prune --force
- name: Remove all local Vagrant boxes
if: always() # do this even if a step before has failed
run: if vagrant box list 2>/dev/null; then vagrant box list | cut -f 1 -d ' ' | xargs -L 1 vagrant box remove -f 2>/dev/null && echo "All Vagrant boxes removed." || echo "No Vagrant boxes found."; else echo "No Vagrant boxes found."; fi
- name: Remove all Virtualbox VMs
if: always() # do this even if a step before has failed
run: VBoxManage list vms | awk -F'"' '{print $2}' | xargs -I {} VBoxManage unregistervm --delete "{}"
- name: Remove all Virtualbox HDs
if: always() # do this even if a step before has failed
run: VBoxManage list hdds | awk -F':' '/^UUID:/ {print $2}' | xargs -I {} VBoxManage closemedium disk "{}" --delete
- name: Remove all Virtualbox Networks
if: always() # do this even if a step before has failed
run: VBoxManage list hostonlyifs | grep '^Name:' | awk '{print $2}' | grep '^vboxnet' | xargs -I {} VBoxManage hostonlyif remove {}
- name: Remove Virtualbox network config
if: always() # do this even if a step before has failed
run: sudo rm /etc/vbox/networks.conf || true
- name: Configure VirtualBox - name: Configure VirtualBox
run: |- run: |-
sudo mkdir -p /etc/vbox sudo mkdir -p /etc/vbox
@@ -58,19 +30,35 @@ jobs:
* fdad:bad:ba55::/64 * fdad:bad:ba55::/64
EOF EOF
- name: Cache pip
uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # 3.0.11
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('./requirements.txt') }}
restore-keys: |
${{ runner.os }}-pip-
- name: Cache Vagrant boxes
uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # 3.0.11
with:
path: |
~/.vagrant.d/boxes
key: vagrant-boxes-${{ hashFiles('**/molecule.yml') }}
restore-keys: |
vagrant-boxes
- name: Download Vagrant boxes for all scenarios
# To save some cache space, all scenarios share the same cache key.
# On the other hand, this means that the cache contents should be
# the same across all scenarios. This step ensures that.
run: ./.github/download-boxes.sh
- name: Set up Python ${{ env.PYTHON_VERSION }} - name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # 5.0.0 uses: actions/setup-python@75f3110429a8c05be0e1bf360334e4cced2b63fa # 2.3.3
with: with:
python-version: ${{ env.PYTHON_VERSION }} python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies cache: 'pip' # caching pip dependencies
- name: Restore vagrant Boxes cache
uses: actions/cache/restore@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # 4.0
with:
path: ~/.vagrant.d/boxes
key: vagrant-boxes-${{ hashFiles('**/molecule.yml') }}
fail-on-cache-miss: true
- name: Install dependencies - name: Install dependencies
run: | run: |
echo "::group::Upgrade pip" echo "::group::Upgrade pip"
@@ -87,40 +75,18 @@ jobs:
env: env:
ANSIBLE_K3S_LOG_DIR: ${{ runner.temp }}/logs/k3s-ansible/${{ matrix.scenario }} ANSIBLE_K3S_LOG_DIR: ${{ runner.temp }}/logs/k3s-ansible/${{ matrix.scenario }}
ANSIBLE_SSH_RETRIES: 4 ANSIBLE_SSH_RETRIES: 4
ANSIBLE_TIMEOUT: 120 ANSIBLE_TIMEOUT: 60
PY_COLORS: 1 PY_COLORS: 1
ANSIBLE_FORCE_COLOR: 1 ANSIBLE_FORCE_COLOR: 1
# these steps are necessary if not using ephemeral nodes
- name: Delete old Vagrant box versions
if: always() # do this even if a step before has failed
run: vagrant box prune --force
- name: Remove all local Vagrant boxes
if: always() # do this even if a step before has failed
run: if vagrant box list 2>/dev/null; then vagrant box list | cut -f 1 -d ' ' | xargs -L 1 vagrant box remove -f 2>/dev/null && echo "All Vagrant boxes removed." || echo "No Vagrant boxes found."; else echo "No Vagrant boxes found."; fi
- name: Remove all Virtualbox VMs
if: always() # do this even if a step before has failed
run: VBoxManage list vms | awk -F'"' '{print $2}' | xargs -I {} VBoxManage unregistervm --delete "{}"
- name: Remove all Virtualbox HDs
if: always() # do this even if a step before has failed
run: VBoxManage list hdds | awk -F':' '/^UUID:/ {print $2}' | xargs -I {} VBoxManage closemedium disk "{}" --delete
- name: Remove all Virtualbox Networks
if: always() # do this even if a step before has failed
run: VBoxManage list hostonlyifs | grep '^Name:' | awk '{print $2}' | grep '^vboxnet' | xargs -I {} VBoxManage hostonlyif remove {}
- name: Remove Virtualbox network config
if: always() # do this even if a step before has failed
run: sudo rm /etc/vbox/networks.conf || true
- name: Upload log files - name: Upload log files
if: always() # do this even if a step before has failed if: always() # do this even if a step before has failed
uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # 4.3.1 uses: actions/upload-artifact@83fd05a356d7e2593de66fc9913b3002723633cb # 3.1.1
with: with:
name: logs name: logs
path: | path: |
${{ runner.temp }}/logs ${{ runner.temp }}/logs
overwrite: true
- name: Delete old box versions
if: always() # do this even if a step before has failed
run: vagrant box prune --force

1
.gitignore vendored
View File

@@ -1,4 +1,3 @@
.env/ .env/
*.log *.log
ansible.cfg ansible.cfg
kubeconfig

View File

@@ -1,7 +1,7 @@
--- ---
repos: repos:
- repo: https://github.com/pre-commit/pre-commit-hooks - repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.5.0 rev: f71fa2c1f9cf5cb705f73dffe4b21f7c61470ba9 # frozen: v4.4.0
hooks: hooks:
- id: requirements-txt-fixer - id: requirements-txt-fixer
- id: sort-simple-yaml - id: sort-simple-yaml
@@ -12,24 +12,24 @@ repos:
- id: trailing-whitespace - id: trailing-whitespace
args: [--markdown-linebreak-ext=md] args: [--markdown-linebreak-ext=md]
- repo: https://github.com/adrienverge/yamllint.git - repo: https://github.com/adrienverge/yamllint.git
rev: v1.33.0 rev: b05e028c5881819161d11cb543fd96a30c06cceb # frozen: v1.32.0
hooks: hooks:
- id: yamllint - id: yamllint
args: [-c=.yamllint] args: [-c=.yamllint]
- repo: https://github.com/ansible-community/ansible-lint.git - repo: https://github.com/ansible-community/ansible-lint.git
rev: v6.22.2 rev: 3293b64b939c0de16ef8cb81dd49255e475bf89a # frozen: v6.17.2
hooks: hooks:
- id: ansible-lint - id: ansible-lint
- repo: https://github.com/shellcheck-py/shellcheck-py - repo: https://github.com/shellcheck-py/shellcheck-py
rev: v0.9.0.6 rev: 375289a39f5708101b1f916eb729e8d6da96993f # frozen: v0.9.0.5
hooks: hooks:
- id: shellcheck - id: shellcheck
- repo: https://github.com/Lucas-C/pre-commit-hooks - repo: https://github.com/Lucas-C/pre-commit-hooks
rev: v1.5.4 rev: 12885e376b93dc4536ad68d156065601e4433665 # frozen: v1.5.1
hooks: hooks:
- id: remove-crlf - id: remove-crlf
- id: remove-tabs - id: remove-tabs
- repo: https://github.com/sirosen/texthooks - repo: https://github.com/sirosen/texthooks
rev: 0.6.4 rev: c4ffd3e31669dd4fa4d31a23436cc13839730084 # frozen: 0.5.0
hooks: hooks:
- id: fix-smartquotes - id: fix-smartquotes

View File

@@ -7,5 +7,3 @@ rules:
level: warning level: warning
truthy: truthy:
allowed-values: ['true', 'false'] allowed-values: ['true', 'false']
ignore:
- galaxy.yml

View File

@@ -96,22 +96,8 @@ ansible-playbook reset.yml -i inventory/my-cluster/hosts.ini
To copy your `kube config` locally so that you can access your **Kubernetes** cluster run: To copy your `kube config` locally so that you can access your **Kubernetes** cluster run:
```bash ```bash
scp debian@master_ip:/etc/rancher/k3s/k3s.yaml ~/.kube/config scp debian@master_ip:~/.kube/config ~/.kube/config
``` ```
If you get file Permission denied, go into the node and temporarly run:
```bash
sudo chmod 777 /etc/rancher/k3s/k3s.yaml
```
Then copy with the scp command and reset the permissions back to:
```bash
sudo chmod 600 /etc/rancher/k3s/k3s.yaml
```
You'll then want to modify the config to point to master IP by running:
```bash
sudo nano ~/.kube/config
```
Then change `server: https://127.0.0.1:6443` to match your master IP: `server: https://192.168.1.222:6443`
### 🔨 Testing your cluster ### 🔨 Testing your cluster
@@ -132,28 +118,6 @@ You can find more information about it [here](molecule/README.md).
This repo uses `pre-commit` and `pre-commit-hooks` to lint and fix common style and syntax errors. Be sure to install python packages and then run `pre-commit install`. For more information, see [pre-commit](https://pre-commit.com/) This repo uses `pre-commit` and `pre-commit-hooks` to lint and fix common style and syntax errors. Be sure to install python packages and then run `pre-commit install`. For more information, see [pre-commit](https://pre-commit.com/)
## 🌌 Ansible Galaxy
This collection can now be used in larger ansible projects.
Instructions:
- create or modify a file `collections/requirements.yml` in your project
```yml
collections:
- name: ansible.utils
- name: community.general
- name: ansible.posix
- name: kubernetes.core
- name: https://github.com/techno-tim/k3s-ansible.git
type: git
version: master
```
- install via `ansible-galaxy collection install -r ./collections/requirements.yml`
- every role is now available via the prefix `techno_tim.k3s_ansible.` e.g. `techno_tim.k3s_ansible.lxc`
## Thanks 🤝 ## Thanks 🤝
This repo is really standing on the shoulders of giants. Thank you to all those who have contributed and thanks to these repos for code and ideas: This repo is really standing on the shoulders of giants. Thank you to all those who have contributed and thanks to these repos for code and ideas:

View File

@@ -1,81 +0,0 @@
### REQUIRED
# The namespace of the collection. This can be a company/brand/organization or product namespace under which all
# content lives. May only contain alphanumeric lowercase characters and underscores. Namespaces cannot start with
# underscores or numbers and cannot contain consecutive underscores
namespace: techno_tim
# The name of the collection. Has the same character restrictions as 'namespace'
name: k3s_ansible
# The version of the collection. Must be compatible with semantic versioning
version: 1.0.0
# The path to the Markdown (.md) readme file. This path is relative to the root of the collection
readme: README.md
# A list of the collection's content authors. Can be just the name or in the format 'Full Name <email> (url)
# @nicks:irc/im.site#channel'
authors:
- your name <example@domain.com>
### OPTIONAL but strongly recommended
# A short summary description of the collection
description: >
The easiest way to bootstrap a self-hosted High Availability Kubernetes
cluster. A fully automated HA k3s etcd install with kube-vip, MetalLB,
and more.
# Either a single license or a list of licenses for content inside of a collection. Ansible Galaxy currently only
# accepts L(SPDX,https://spdx.org/licenses/) licenses. This key is mutually exclusive with 'license_file'
license:
- Apache-2.0
# A list of tags you want to associate with the collection for indexing/searching. A tag name has the same character
# requirements as 'namespace' and 'name'
tags:
- etcd
- high-availability
- k8s
- k3s
- k3s-cluster
- kube-vip
- kubernetes
- metallb
- rancher
# Collections that this collection requires to be installed for it to be usable. The key of the dict is the
# collection label 'namespace.name'. The value is a version range
# L(specifiers,https://python-semanticversion.readthedocs.io/en/latest/#requirement-specification). Multiple version
# range specifiers can be set and are separated by ','
dependencies:
ansible.utils: '*'
ansible.posix: '*'
community.general: '*'
kubernetes.core: '*'
# The URL of the originating SCM repository
repository: https://github.com/techno-tim/k3s-ansible
# The URL to any online docs
documentation: https://github.com/techno-tim/k3s-ansible
# The URL to the homepage of the collection/project
homepage: https://www.youtube.com/watch?v=CbkEWcUZ7zM
# The URL to the collection issue tracker
issues: https://github.com/techno-tim/k3s-ansible/issues
# A list of file glob-like patterns used to filter any files or directories that should not be included in the build
# artifact. A pattern is matched from the relative path of the file or directory of the collection directory. This
# uses 'fnmatch' to match the files or directories. Some directories and files like 'galaxy.yml', '*.pyc', '*.retry',
# and '.git' are always filtered. Mutually exclusive with 'manifest'
build_ignore: []
# A dict controlling use of manifest directives used in building the collection artifact. The key 'directives' is a
# list of MANIFEST.in style
# L(directives,https://packaging.python.org/en/latest/guides/using-manifest-in/#manifest-in-commands). The key
# 'omit_default_directives' is a boolean that controls whether the default directives are used. Mutually exclusive
# with 'build_ignore'
# manifest: null

View File

@@ -1,5 +1,5 @@
--- ---
k3s_version: v1.29.2+k3s1 k3s_version: v1.25.16+k3s4
# this is the user that has ssh access to these machines # this is the user that has ssh access to these machines
ansible_user: ansibleuser ansible_user: ansibleuser
systemd_dir: /etc/systemd/system systemd_dir: /etc/systemd/system
@@ -7,32 +7,14 @@ systemd_dir: /etc/systemd/system
# Set your timezone # Set your timezone
system_timezone: "Your/Timezone" system_timezone: "Your/Timezone"
# interface which will be used for flannel # node interface which will be used for the container network interface (flannel or calico)
flannel_iface: "eth0" container_iface: "eth0"
# uncomment calico_iface to use tigera operator/calico cni instead of flannel https://docs.tigera.io/calico/latest/about # set use_calico to true to use tigera operator/calico instead of the default CNI flannel
# calico_iface: "eth0" # install reference: https://docs.tigera.io/calico/latest/getting-started/kubernetes/k3s/multi-node-install#install-calico
calico_ebpf: false # use eBPF dataplane instead of iptables use_calico: false
calico_tag: "v3.27.2" # calico version tag calico_cidr: "10.52.0.0/16" # pod cidr pool
calico_tag: "v3.27.0" # calico version tag
# uncomment cilium_iface to use cilium cni instead of flannel or calico
# ensure v4.19.57, v5.1.16, v5.2.0 or more recent kernel
# cilium_iface: "eth0"
cilium_mode: "native" # native when nodes on same subnet or using bgp, else set routed
cilium_tag: "v1.15.2" # cilium version tag
cilium_hubble: true # enable hubble observability relay and ui
# if using calico or cilium, you may specify the cluster pod cidr pool
cluster_cidr: "10.52.0.0/16"
# enable cilium bgp control plane for lb services and pod cidrs. disables metallb.
cilium_bgp: false
# bgp parameters for cilium cni. only active when cilium_iface is defined and cilium_bgp is true.
cilium_bgp_my_asn: "64513"
cilium_bgp_peer_asn: "64512"
cilium_bgp_peer_address: "192.168.30.1"
cilium_bgp_lb_cidr: "192.168.31.0/24" # cidr for cilium loadbalancer ipam
# apiserver_endpoint is virtual ip-address which will be configured on each master # apiserver_endpoint is virtual ip-address which will be configured on each master
apiserver_endpoint: "192.168.30.222" apiserver_endpoint: "192.168.30.222"
@@ -44,25 +26,25 @@ k3s_token: "some-SUPER-DEDEUPER-secret-password"
# The IP on which the node is reachable in the cluster. # The IP on which the node is reachable in the cluster.
# Here, a sensible default is provided, you can still override # Here, a sensible default is provided, you can still override
# it for each of your hosts, though. # it for each of your hosts, though.
k3s_node_ip: "{{ ansible_facts[(cilium_iface | default(calico_iface | default(flannel_iface)))]['ipv4']['address'] }}" k3s_node_ip: '{{ ansible_facts[container_iface]["ipv4"]["address"] }}'
# Disable the taint manually by setting: k3s_master_taint = false # Disable the taint manually by setting: k3s_master_taint = false
k3s_master_taint: "{{ true if groups['node'] | default([]) | length >= 1 else false }}" k3s_master_taint: "{{ true if groups['node'] | default([]) | length >= 1 else false }}"
# these arguments are recommended for servers as well as agents: # these arguments are recommended for servers as well as agents:
extra_args: >- extra_args: >-
{{ '--flannel-iface=' + flannel_iface if calico_iface is not defined and cilium_iface is not defined else '' }} {{ '--flannel-iface=' + container_iface if not use_calico else '' }}
--node-ip={{ k3s_node_ip }} --node-ip={{ k3s_node_ip }}
# change these to your liking, the only required are: --disable servicelb, --tls-san {{ apiserver_endpoint }} # change these to your liking, the only required are: --disable servicelb, --tls-san {{ apiserver_endpoint }}
# the contents of the if block is also required if using calico or cilium # the contents of the if block is also required if using calico
extra_server_args: >- extra_server_args: >-
{{ extra_args }} {{ extra_args }}
{{ '--node-taint node-role.kubernetes.io/master=true:NoSchedule' if k3s_master_taint else '' }} {{ '--node-taint node-role.kubernetes.io/master=true:NoSchedule' if k3s_master_taint else '' }}
{% if calico_iface is defined or cilium_iface is defined %} {% if use_calico %}
--flannel-backend=none --flannel-backend=none
--disable-network-policy --disable-network-policy
--cluster-cidr={{ cluster_cidr | default('10.52.0.0/16') }} --cluster-cidr={{ calico_cidr }}
{% endif %} {% endif %}
--tls-san {{ apiserver_endpoint }} --tls-san {{ apiserver_endpoint }}
--disable servicelb --disable servicelb
@@ -72,14 +54,7 @@ extra_agent_args: >-
{{ extra_args }} {{ extra_args }}
# image tag for kube-vip # image tag for kube-vip
kube_vip_tag_version: "v0.7.2" kube_vip_tag_version: "v0.5.12"
# tag for kube-vip-cloud-provider manifest
# kube_vip_cloud_provider_tag_version: "main"
# kube-vip ip range for load balancer
# (uncomment to use kube-vip for services instead of MetalLB)
# kube_vip_lb_ip_range: "192.168.30.80-192.168.30.90"
# metallb type frr or native # metallb type frr or native
metal_lb_type: "native" metal_lb_type: "native"
@@ -93,8 +68,8 @@ metal_lb_mode: "layer2"
# metal_lb_bgp_peer_address: "192.168.30.1" # metal_lb_bgp_peer_address: "192.168.30.1"
# image tag for metal lb # image tag for metal lb
metal_lb_speaker_tag_version: "v0.14.3" metal_lb_speaker_tag_version: "v0.13.9"
metal_lb_controller_tag_version: "v0.14.3" metal_lb_controller_tag_version: "v0.13.9"
# metallb ip range for load balancer # metallb ip range for load balancer
metal_lb_ip_range: "192.168.30.80-192.168.30.90" metal_lb_ip_range: "192.168.30.80-192.168.30.90"
@@ -104,9 +79,9 @@ metal_lb_ip_range: "192.168.30.80-192.168.30.90"
# Please read https://gist.github.com/triangletodd/02f595cd4c0dc9aac5f7763ca2264185 before using this. # Please read https://gist.github.com/triangletodd/02f595cd4c0dc9aac5f7763ca2264185 before using this.
# Most notably, your containers must be privileged, and must not have nesting set to true. # Most notably, your containers must be privileged, and must not have nesting set to true.
# Please note this script disables most of the security of lxc containers, with the trade off being that lxc # Please note this script disables most of the security of lxc containers, with the trade off being that lxc
# containers are significantly more resource efficient compared to full VMs. # containers are significantly more resource efficent compared to full VMs.
# Mixing and matching VMs and lxc containers is not supported, ymmv if you want to do this. # Mixing and matching VMs and lxc containers is not supported, ymmv if you want to do this.
# I would only really recommend using this if you have particularly low powered proxmox nodes where the overhead of # I would only really recommend using this if you have partiularly low powered proxmox nodes where the overhead of
# VMs would use a significant portion of your available resources. # VMs would use a significant portion of your available resources.
proxmox_lxc_configure: false proxmox_lxc_configure: false
# the user that you would use to ssh into the host, for example if you run ssh some-user@my-proxmox-host, # the user that you would use to ssh into the host, for example if you run ssh some-user@my-proxmox-host,
@@ -160,10 +135,6 @@ custom_registries_yaml: |
username: yourusername username: yourusername
password: yourpassword password: yourpassword
# On some distros like Diet Pi, there is no dbus installed. dbus required by the default reboot command.
# Uncomment if you need a custom reboot command
# custom_reboot_command: /usr/sbin/shutdown -r now
# Only enable and configure these if you access the internet through a proxy # Only enable and configure these if you access the internet through a proxy
# proxy_env: # proxy_env:
# HTTP_PROXY: "http://proxy.domain.local:3128" # HTTP_PROXY: "http://proxy.domain.local:3128"

View File

@@ -13,12 +13,6 @@ We have these scenarios:
To save a bit of test time, this cluster is _not_ highly available, it consists of only one control and one worker node. To save a bit of test time, this cluster is _not_ highly available, it consists of only one control and one worker node.
- **single_node**: - **single_node**:
Very similar to the default scenario, but uses only a single node for all cluster functionality. Very similar to the default scenario, but uses only a single node for all cluster functionality.
- **calico**:
The same as single node, but uses calico cni instead of flannel.
- **cilium**:
The same as single node, but uses cilium cni instead of flannel.
- **kube-vip**
The same as single node, but uses kube-vip as service loadbalancer instead of MetalLB
## How to execute ## How to execute

View File

@@ -1,49 +0,0 @@
---
dependency:
name: galaxy
driver:
name: vagrant
platforms:
- name: control1
box: generic/ubuntu2204
memory: 4096
cpus: 4
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
groups:
- k3s_cluster
- master
interfaces:
- network_name: private_network
ip: 192.168.30.62
provisioner:
name: ansible
env:
ANSIBLE_VERBOSITY: 1
playbooks:
converge: ../resources/converge.yml
side_effect: ../resources/reset.yml
verify: ../resources/verify.yml
inventory:
links:
group_vars: ../../inventory/sample/group_vars
scenario:
test_sequence:
- dependency
- cleanup
- destroy
- syntax
- create
- prepare
- converge
# idempotence is not possible with the playbook in its current form.
- verify
# We are repurposing side_effect here to test the reset playbook.
# This is why we do not run it before verify (which tests the cluster),
# but after the verify step.
- side_effect
- cleanup
- destroy

View File

@@ -1,16 +0,0 @@
---
- name: Apply overrides
hosts: all
tasks:
- name: Override host variables
ansible.builtin.set_fact:
# See:
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
calico_iface: eth1
# The test VMs might be a bit slow, so we give them more time to join the cluster:
retry_count: 45
# Make sure that our IP ranges do not collide with those of the other scenarios
apiserver_endpoint: "192.168.30.224"
metal_lb_ip_range: "192.168.30.100-192.168.30.109"

View File

@@ -1,49 +0,0 @@
---
dependency:
name: galaxy
driver:
name: vagrant
platforms:
- name: control1
box: generic/ubuntu2204
memory: 4096
cpus: 4
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
groups:
- k3s_cluster
- master
interfaces:
- network_name: private_network
ip: 192.168.30.63
provisioner:
name: ansible
env:
ANSIBLE_VERBOSITY: 1
playbooks:
converge: ../resources/converge.yml
side_effect: ../resources/reset.yml
verify: ../resources/verify.yml
inventory:
links:
group_vars: ../../inventory/sample/group_vars
scenario:
test_sequence:
- dependency
- cleanup
- destroy
- syntax
- create
- prepare
- converge
# idempotence is not possible with the playbook in its current form.
- verify
# We are repurposing side_effect here to test the reset playbook.
# This is why we do not run it before verify (which tests the cluster),
# but after the verify step.
- side_effect
- cleanup
- destroy

View File

@@ -1,16 +0,0 @@
---
- name: Apply overrides
hosts: all
tasks:
- name: Override host variables
ansible.builtin.set_fact:
# See:
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
cilium_iface: eth1
# The test VMs might be a bit slow, so we give them more time to join the cluster:
retry_count: 45
# Make sure that our IP ranges do not collide with those of the other scenarios
apiserver_endpoint: "192.168.30.225"
metal_lb_ip_range: "192.168.30.110-192.168.30.119"

View File

@@ -7,7 +7,7 @@ platforms:
- name: control1 - name: control1
box: generic/ubuntu2204 box: generic/ubuntu2204
memory: 1024 memory: 2048
cpus: 2 cpus: 2
groups: groups:
- k3s_cluster - k3s_cluster
@@ -22,8 +22,8 @@ platforms:
ssh.password: "vagrant" ssh.password: "vagrant"
- name: control2 - name: control2
box: generic/debian12 box: generic/debian11
memory: 1024 memory: 2048
cpus: 2 cpus: 2
groups: groups:
- k3s_cluster - k3s_cluster
@@ -34,7 +34,7 @@ platforms:
- name: control3 - name: control3
box: generic/rocky9 box: generic/rocky9
memory: 1024 memory: 2048
cpus: 2 cpus: 2
groups: groups:
- k3s_cluster - k3s_cluster
@@ -45,7 +45,7 @@ platforms:
- name: node1 - name: node1
box: generic/ubuntu2204 box: generic/ubuntu2204
memory: 1024 memory: 2048
cpus: 2 cpus: 2
groups: groups:
- k3s_cluster - k3s_cluster
@@ -61,7 +61,7 @@ platforms:
- name: node2 - name: node2
box: generic/rocky9 box: generic/rocky9
memory: 1024 memory: 2048
cpus: 2 cpus: 2
groups: groups:
- k3s_cluster - k3s_cluster
@@ -72,8 +72,6 @@ platforms:
provisioner: provisioner:
name: ansible name: ansible
env:
ANSIBLE_VERBOSITY: 1
playbooks: playbooks:
converge: ../resources/converge.yml converge: ../resources/converge.yml
side_effect: ../resources/reset.yml side_effect: ../resources/reset.yml
@@ -84,6 +82,7 @@ provisioner:
scenario: scenario:
test_sequence: test_sequence:
- dependency - dependency
- lint
- cleanup - cleanup
- destroy - destroy
- syntax - syntax

View File

@@ -6,7 +6,7 @@
ansible.builtin.set_fact: ansible.builtin.set_fact:
# See: # See:
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant # https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
flannel_iface: eth1 container_iface: eth1
# The test VMs might be a bit slow, so we give them more time to join the cluster: # The test VMs might be a bit slow, so we give them more time to join the cluster:
retry_count: 45 retry_count: 45

View File

@@ -6,7 +6,7 @@ driver:
platforms: platforms:
- name: control1 - name: control1
box: generic/ubuntu2204 box: generic/ubuntu2204
memory: 1024 memory: 2048
cpus: 2 cpus: 2
groups: groups:
- k3s_cluster - k3s_cluster
@@ -22,7 +22,7 @@ platforms:
- name: control2 - name: control2
box: generic/ubuntu2204 box: generic/ubuntu2204
memory: 1024 memory: 2048
cpus: 2 cpus: 2
groups: groups:
- k3s_cluster - k3s_cluster
@@ -38,7 +38,7 @@ platforms:
- name: node1 - name: node1
box: generic/ubuntu2204 box: generic/ubuntu2204
memory: 1024 memory: 2048
cpus: 2 cpus: 2
groups: groups:
- k3s_cluster - k3s_cluster
@@ -53,8 +53,6 @@ platforms:
ssh.password: "vagrant" ssh.password: "vagrant"
provisioner: provisioner:
name: ansible name: ansible
env:
ANSIBLE_VERBOSITY: 1
playbooks: playbooks:
converge: ../resources/converge.yml converge: ../resources/converge.yml
side_effect: ../resources/reset.yml side_effect: ../resources/reset.yml
@@ -65,6 +63,7 @@ provisioner:
scenario: scenario:
test_sequence: test_sequence:
- dependency - dependency
- lint
- cleanup - cleanup
- destroy - destroy
- syntax - syntax

View File

@@ -6,7 +6,7 @@
ansible.builtin.set_fact: ansible.builtin.set_fact:
# See: # See:
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant # https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
flannel_iface: eth1 container_iface: eth1
# In this scenario, we have multiple interfaces that the VIP could be # In this scenario, we have multiple interfaces that the VIP could be
# broadcasted on. Since we have assigned a dedicated private network # broadcasted on. Since we have assigned a dedicated private network
@@ -27,13 +27,13 @@
- fdad:bad:ba55::1b:0/112 - fdad:bad:ba55::1b:0/112
- 192.168.123.80-192.168.123.90 - 192.168.123.80-192.168.123.90
# k3s_node_ip is by default set to the IPv4 address of flannel_iface. # k3s_node_ip is by default set to the IPv4 address of container_iface.
# We want IPv6 addresses here of course, so we just specify them # We want IPv6 addresses here of course, so we just specify them
# manually below. # manually below.
k3s_node_ip: "{{ node_ipv4 }},{{ node_ipv6 }}" k3s_node_ip: "{{ node_ipv4 }},{{ node_ipv6 }}"
- name: Override host variables (2/2) - name: Override host variables (2/2)
# Since "extra_args" depends on "k3s_node_ip" and "flannel_iface" we have # Since "extra_args" depends on "k3s_node_ip" and "container_iface" we have
# to set this AFTER overriding the both of them. # to set this AFTER overriding the both of them.
ansible.builtin.set_fact: ansible.builtin.set_fact:
# A few extra server args are necessary: # A few extra server args are necessary:

View File

@@ -30,7 +30,7 @@
name: net.ipv6.conf.{{ item }}.accept_dad name: net.ipv6.conf.{{ item }}.accept_dad
value: "0" value: "0"
with_items: with_items:
- "{{ flannel_iface }}" - "{{ container_iface }}"
- name: Write IPv4 configuration - name: Write IPv4 configuration
ansible.builtin.template: ansible.builtin.template:

View File

@@ -3,6 +3,6 @@ network:
version: 2 version: 2
renderer: networkd renderer: networkd
ethernets: ethernets:
{{ flannel_iface }}: {{ container_iface }}:
addresses: addresses:
- {{ node_ipv4 }}/24 - {{ node_ipv4 }}/24

View File

@@ -1,49 +0,0 @@
---
dependency:
name: galaxy
driver:
name: vagrant
platforms:
- name: control1
box: generic/ubuntu2204
memory: 4096
cpus: 4
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
groups:
- k3s_cluster
- master
interfaces:
- network_name: private_network
ip: 192.168.30.62
provisioner:
name: ansible
env:
ANSIBLE_VERBOSITY: 1
playbooks:
converge: ../resources/converge.yml
side_effect: ../resources/reset.yml
verify: ../resources/verify.yml
inventory:
links:
group_vars: ../../inventory/sample/group_vars
scenario:
test_sequence:
- dependency
- cleanup
- destroy
- syntax
- create
- prepare
- converge
# idempotence is not possible with the playbook in its current form.
- verify
# We are repurposing side_effect here to test the reset playbook.
# This is why we do not run it before verify (which tests the cluster),
# but after the verify step.
- side_effect
- cleanup
- destroy

View File

@@ -1,17 +0,0 @@
---
- name: Apply overrides
hosts: all
tasks:
- name: Override host variables
ansible.builtin.set_fact:
# See:
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
flannel_iface: eth1
# The test VMs might be a bit slow, so we give them more time to join the cluster:
retry_count: 45
# Make sure that our IP ranges do not collide with those of the other scenarios
apiserver_endpoint: "192.168.30.225"
# Use kube-vip instead of MetalLB
kube_vip_lb_ip_range: "192.168.30.110-192.168.30.119"

View File

@@ -21,8 +21,6 @@ platforms:
ip: 192.168.30.50 ip: 192.168.30.50
provisioner: provisioner:
name: ansible name: ansible
env:
ANSIBLE_VERBOSITY: 1
playbooks: playbooks:
converge: ../resources/converge.yml converge: ../resources/converge.yml
side_effect: ../resources/reset.yml side_effect: ../resources/reset.yml
@@ -33,6 +31,7 @@ provisioner:
scenario: scenario:
test_sequence: test_sequence:
- dependency - dependency
- lint
- cleanup - cleanup
- destroy - destroy
- syntax - syntax

View File

@@ -6,7 +6,7 @@
ansible.builtin.set_fact: ansible.builtin.set_fact:
# See: # See:
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant # https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
flannel_iface: eth1 container_iface: eth1
# The test VMs might be a bit slow, so we give them more time to join the cluster: # The test VMs might be a bit slow, so we give them more time to join the cluster:
retry_count: 45 retry_count: 45

View File

@@ -6,5 +6,4 @@
- name: Reboot the nodes (and Wait upto 5 mins max) - name: Reboot the nodes (and Wait upto 5 mins max)
become: true become: true
reboot: reboot:
reboot_command: "{{ custom_reboot_command | default(omit) }}"
reboot_timeout: 300 reboot_timeout: 300

View File

@@ -1,10 +1,10 @@
ansible-core>=2.16.2 ansible-core>=2.13.5
jmespath>=1.0.1 jmespath>=1.0.1
jsonpatch>=1.33 jsonpatch>=1.32
kubernetes>=29.0.0 kubernetes>=25.3.0
molecule-plugins[vagrant] molecule-vagrant>=1.0.0
molecule>=6.0.3 molecule>=4.0.3
netaddr>=0.10.1 netaddr>=0.8.0
pre-commit>=3.6.0 pre-commit>=2.20.0
pre-commit-hooks>=4.5.0 pre-commit-hooks>=1.3.1
pyyaml>=6.0.1 pyyaml>=6.0

View File

@@ -4,165 +4,174 @@
# #
# pip-compile requirements.in # pip-compile requirements.in
# #
ansible-compat==4.1.11 ansible-compat==3.0.1
# via molecule # via molecule
ansible-core==2.16.4 ansible-core==2.15.4
# via # via
# -r requirements.in # -r requirements.in
# ansible-compat # ansible-compat
# molecule arrow==1.2.3
attrs==23.2.0 # via jinja2-time
# via attrs==22.1.0
# jsonschema # via jsonschema
# referencing binaryornot==0.4.4
bracex==2.4 # via cookiecutter
# via wcmatch cachetools==5.2.0
cachetools==5.3.2
# via google-auth # via google-auth
certifi==2023.11.17 certifi==2022.9.24
# via # via
# kubernetes # kubernetes
# requests # requests
cffi==1.16.0 cffi==1.15.1
# via cryptography # via cryptography
cfgv==3.4.0 cfgv==3.3.1
# via pre-commit # via pre-commit
charset-normalizer==3.3.2 chardet==5.0.0
# via binaryornot
charset-normalizer==2.1.1
# via requests # via requests
click==8.1.7 click==8.1.3
# via # via
# click-help-colors # click-help-colors
# cookiecutter
# molecule # molecule
click-help-colors==0.9.4 click-help-colors==0.9.1
# via molecule # via molecule
cryptography==41.0.7 commonmark==0.9.1
# via rich
cookiecutter==2.1.1
# via molecule
cryptography==38.0.3
# via ansible-core # via ansible-core
distlib==0.3.8 distlib==0.3.6
# via virtualenv # via virtualenv
distro==1.8.0
# via selinux
enrich==1.2.7 enrich==1.2.7
# via molecule # via molecule
filelock==3.13.1 filelock==3.8.0
# via virtualenv # via virtualenv
google-auth==2.26.2 google-auth==2.14.0
# via kubernetes # via kubernetes
identify==2.5.33 identify==2.5.8
# via pre-commit # via pre-commit
idna==3.6 idna==3.4
# via requests # via requests
jinja2==3.1.3 jinja2==3.1.2
# via # via
# ansible-core # ansible-core
# cookiecutter
# jinja2-time
# molecule # molecule
# molecule-vagrant
jinja2-time==0.2.0
# via cookiecutter
jmespath==1.0.1 jmespath==1.0.1
# via -r requirements.in # via -r requirements.in
jsonpatch==1.33 jsonpatch==1.33
# via -r requirements.in # via -r requirements.in
jsonpointer==2.4 jsonpointer==2.3
# via jsonpatch # via jsonpatch
jsonschema==4.21.1 jsonschema==4.17.0
# via # via
# ansible-compat # ansible-compat
# molecule # molecule
jsonschema-specifications==2023.12.1 kubernetes==25.3.0
# via jsonschema
kubernetes==29.0.0
# via -r requirements.in # via -r requirements.in
markdown-it-py==3.0.0 markupsafe==2.1.1
# via rich
markupsafe==2.1.4
# via jinja2 # via jinja2
mdurl==0.1.2 molecule==4.0.4
# via markdown-it-py
molecule==6.0.3
# via # via
# -r requirements.in # -r requirements.in
# molecule-plugins # molecule-vagrant
molecule-plugins[vagrant]==23.5.3 molecule-vagrant==1.0.0
# via -r requirements.in # via -r requirements.in
netaddr==0.10.1 netaddr==0.10.0
# via -r requirements.in # via -r requirements.in
nodeenv==1.8.0 nodeenv==1.7.0
# via pre-commit # via pre-commit
oauthlib==3.2.2 oauthlib==3.2.2
# via # via requests-oauthlib
# kubernetes packaging==21.3
# requests-oauthlib
packaging==23.2
# via # via
# ansible-compat # ansible-compat
# ansible-core # ansible-core
# molecule # molecule
platformdirs==4.1.0 platformdirs==2.5.2
# via virtualenv # via virtualenv
pluggy==1.3.0 pluggy==1.0.0
# via molecule # via molecule
pre-commit==3.6.2 pre-commit==2.21.0
# via -r requirements.in # via -r requirements.in
pre-commit-hooks==4.5.0 pre-commit-hooks==4.5.0
# via -r requirements.in # via -r requirements.in
pyasn1==0.5.1 pyasn1==0.4.8
# via # via
# pyasn1-modules # pyasn1-modules
# rsa # rsa
pyasn1-modules==0.3.0 pyasn1-modules==0.2.8
# via google-auth # via google-auth
pycparser==2.21 pycparser==2.21
# via cffi # via cffi
pygments==2.17.2 pygments==2.13.0
# via rich # via rich
pyparsing==3.0.9
# via packaging
pyrsistent==0.19.2
# via jsonschema
python-dateutil==2.8.2 python-dateutil==2.8.2
# via kubernetes # via
# arrow
# kubernetes
python-slugify==6.1.2
# via cookiecutter
python-vagrant==1.0.0 python-vagrant==1.0.0
# via molecule-plugins # via molecule-vagrant
pyyaml==6.0.1 pyyaml==6.0.1
# via # via
# -r requirements.in # -r requirements.in
# ansible-compat # ansible-compat
# ansible-core # ansible-core
# cookiecutter
# kubernetes # kubernetes
# molecule # molecule
# molecule-vagrant
# pre-commit # pre-commit
referencing==0.32.1 requests==2.28.1
# via
# jsonschema
# jsonschema-specifications
requests==2.31.0
# via # via
# cookiecutter
# kubernetes # kubernetes
# requests-oauthlib # requests-oauthlib
requests-oauthlib==1.3.1 requests-oauthlib==1.3.1
# via kubernetes # via kubernetes
resolvelib==1.0.1 resolvelib==0.8.1
# via ansible-core # via ansible-core
rich==13.7.0 rich==12.6.0
# via # via
# enrich # enrich
# molecule # molecule
rpds-py==0.17.1
# via
# jsonschema
# referencing
rsa==4.9 rsa==4.9
# via google-auth # via google-auth
ruamel-yaml==0.18.5 ruamel-yaml==0.17.21
# via pre-commit-hooks # via pre-commit-hooks
ruamel-yaml-clib==0.2.8 selinux==0.2.1
# via ruamel-yaml # via molecule-vagrant
six==1.16.0 six==1.16.0
# via # via
# google-auth
# kubernetes # kubernetes
# python-dateutil # python-dateutil
subprocess-tee==0.4.1 subprocess-tee==0.4.1
# via ansible-compat # via ansible-compat
urllib3==2.1.0 text-unidecode==1.3
# via python-slugify
urllib3==1.26.12
# via # via
# kubernetes # kubernetes
# requests # requests
virtualenv==20.25.0 virtualenv==20.16.6
# via pre-commit # via pre-commit
wcmatch==8.5 websocket-client==1.4.2
# via molecule
websocket-client==1.7.0
# via kubernetes # via kubernetes
# The following packages are considered to be unsafe in a requirements file: # The following packages are considered to be unsafe in a requirements file:

View File

@@ -12,7 +12,6 @@
- name: Reboot and wait for node to come back up - name: Reboot and wait for node to come back up
become: true become: true
reboot: reboot:
reboot_command: "{{ custom_reboot_command | default(omit) }}"
reboot_timeout: 3600 reboot_timeout: 3600
- name: Revert changes to Proxmox cluster - name: Revert changes to Proxmox cluster

View File

@@ -1,18 +1,18 @@
--- ---
- name: Create k3s-node.service.d directory
- name: Create k3s.service.d directory
file: file:
path: '{{ systemd_dir }}/k3s-node.service.d' path: '{{ systemd_dir }}/k3s.service.d'
state: directory state: directory
owner: root owner: root
group: root group: root
mode: '0755' mode: '0755'
when: proxy_env is defined
- name: Copy K3s http_proxy conf file - name: Copy K3s http_proxy conf file
template: template:
src: "http_proxy.conf.j2" src: "http_proxy.conf.j2"
dest: "{{ systemd_dir }}/k3s-node.service.d/http_proxy.conf" dest: "{{ systemd_dir }}/k3s.service.d/http_proxy.conf"
owner: root owner: root
group: root group: root
mode: '0755' mode: '0755'
when: proxy_env is defined

View File

@@ -1,35 +1,19 @@
--- ---
- name: Check for PXE-booted system
block:
- name: Check if system is PXE-booted
ansible.builtin.command:
cmd: cat /proc/cmdline
register: boot_cmdline
changed_when: false
check_mode: false
- name: Set fact for PXE-booted system
ansible.builtin.set_fact:
is_pxe_booted: "{{ 'root=/dev/nfs' in boot_cmdline.stdout }}"
when: boot_cmdline.stdout is defined
- name: Include http_proxy configuration tasks
ansible.builtin.include_tasks: http_proxy.yml
- name: Deploy K3s http_proxy conf - name: Deploy K3s http_proxy conf
include_tasks: http_proxy.yml include_tasks: http_proxy.yml
when: proxy_env is defined when: proxy_env is defined
- name: Configure the k3s service - name: Copy K3s service file
ansible.builtin.template: template:
src: "k3s.service.j2" src: "k3s.service.j2"
dest: "{{ systemd_dir }}/k3s-node.service" dest: "{{ systemd_dir }}/k3s-node.service"
owner: root owner: root
group: root group: root
mode: '0755' mode: 0755
- name: Manage k3s service - name: Enable and check K3s service
ansible.builtin.systemd: systemd:
name: k3s-node name: k3s-node
daemon_reload: true daemon_reload: true
state: restarted state: restarted

View File

@@ -7,14 +7,11 @@ After=network-online.target
Type=notify Type=notify
ExecStartPre=-/sbin/modprobe br_netfilter ExecStartPre=-/sbin/modprobe br_netfilter
ExecStartPre=-/sbin/modprobe overlay ExecStartPre=-/sbin/modprobe overlay
# Conditional snapshotter based on PXE boot status ExecStart=/usr/local/bin/k3s agent --server https://{{ apiserver_endpoint | ansible.utils.ipwrap }}:6443 --token {{ hostvars[groups[group_name_master | default('master')][0]]['token'] | default(k3s_token) }} {{ extra_agent_args | default("") }}
ExecStart=/usr/local/bin/k3s agent \
--server https://{{ apiserver_endpoint | ansible.utils.ipwrap }}:6443 \
{% if is_pxe_booted | default(false) %}--snapshotter native \
{% endif %}--token {{ hostvars[groups[group_name_master | default('master')][0]]['token'] | default(k3s_token) }} \
{{ extra_agent_args | default("") }}
KillMode=process KillMode=process
Delegate=yes Delegate=yes
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=1048576 LimitNOFILE=1048576
LimitNPROC=infinity LimitNPROC=infinity
LimitCORE=infinity LimitCORE=infinity

View File

@@ -1,27 +0,0 @@
---
- name: Create manifests directory on first master
file:
path: /var/lib/rancher/k3s/server/manifests
state: directory
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
- name: Download vip cloud provider manifest to first master
ansible.builtin.get_url:
url: "https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/{{ kube_vip_cloud_provider_tag_version | default('main') }}/manifest/kube-vip-cloud-controller.yaml" # noqa yaml[line-length]
dest: "/var/lib/rancher/k3s/server/manifests/kube-vip-cloud-controller.yaml"
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
- name: Copy kubevip configMap manifest to first master
template:
src: "kubevip.yaml.j2"
dest: "/var/lib/rancher/k3s/server/manifests/kubevip.yaml"
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']

View File

@@ -6,13 +6,6 @@
state: stopped state: stopped
failed_when: false failed_when: false
# k3s-init won't work if the port is already in use
- name: Stop k3s
systemd:
name: k3s
state: stopped
failed_when: false
- name: Clean previous runs of k3s-init # noqa command-instead-of-module - name: Clean previous runs of k3s-init # noqa command-instead-of-module
# The systemd module does not support "reset-failed", so we need to resort to command. # The systemd module does not support "reset-failed", so we need to resort to command.
command: systemctl reset-failed k3s-init command: systemctl reset-failed k3s-init
@@ -29,12 +22,6 @@
- name: Deploy metallb manifest - name: Deploy metallb manifest
include_tasks: metallb.yml include_tasks: metallb.yml
tags: metallb tags: metallb
when: kube_vip_lb_ip_range is not defined and (not cilium_bgp or cilium_iface is not defined)
- name: Deploy kube-vip manifest
include_tasks: kube-vip.yml
tags: kubevip
when: kube_vip_lb_ip_range is defined
- name: Init cluster inside the transient k3s-init service - name: Init cluster inside the transient k3s-init service
command: command:
@@ -42,7 +29,7 @@
-p Restart=on-failure \ -p Restart=on-failure \
--unit=k3s-init \ --unit=k3s-init \
k3s server {{ server_init_args }}" k3s server {{ server_init_args }}"
creates: "{{ systemd_dir }}/k3s-init.service" creates: "{{ systemd_dir }}/k3s.service"
- name: Verification - name: Verification
when: not ansible_check_mode when: not ansible_check_mode

View File

@@ -10,7 +10,7 @@
- name: Download vip rbac manifest to first master - name: Download vip rbac manifest to first master
ansible.builtin.get_url: ansible.builtin.get_url:
url: "https://kube-vip.io/manifests/rbac.yaml" url: "https://raw.githubusercontent.com/kube-vip/kube-vip/{{ kube_vip_tag_version }}/docs/manifests/rbac.yaml"
dest: "/var/lib/rancher/k3s/server/manifests/vip-rbac.yaml" dest: "/var/lib/rancher/k3s/server/manifests/vip-rbac.yaml"
owner: root owner: root
group: root group: root

View File

@@ -1,13 +0,0 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: kubevip
namespace: kube-system
data:
{% if kube_vip_lb_ip_range is string %}
{# kube_vip_lb_ip_range was used in the legacy way: single string instead of a list #}
{# => transform to list with single element #}
{% set kube_vip_lb_ip_range = [kube_vip_lb_ip_range] %}
{% endif %}
range-global: {{ kube_vip_lb_ip_range | join(',') }}

View File

@@ -43,7 +43,7 @@ spec:
- name: vip_ddns - name: vip_ddns
value: "false" value: "false"
- name: svc_enable - name: svc_enable
value: "{{ 'true' if kube_vip_lb_ip_range is defined else 'false' }}" value: "false"
- name: vip_leaderelection - name: vip_leaderelection
value: "true" value: "true"
- name: vip_leaseduration - name: vip_leaseduration

View File

@@ -1,6 +1,6 @@
--- ---
# Timeout to wait for MetalLB services to come up # Timeout to wait for MetalLB services to come up
metal_lb_available_timeout: 240s metal_lb_available_timeout: 120s
# Name of the master group # Name of the master group
group_name_master: master group_name_master: master

View File

@@ -1,8 +1,5 @@
--- ---
- name: Deploy Calico to cluster - block:
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
run_once: true
block:
- name: Create manifests directory on first master - name: Create manifests directory on first master
file: file:
path: /tmp/k3s path: /tmp/k3s
@@ -23,9 +20,6 @@
ansible.builtin.template: ansible.builtin.template:
src: "calico.crs.j2" src: "calico.crs.j2"
dest: /tmp/k3s/custom-resources.yaml dest: /tmp/k3s/custom-resources.yaml
owner: root
group: root
mode: 0755
- name: Deploy or replace Tigera Operator - name: Deploy or replace Tigera Operator
block: block:
@@ -48,17 +42,17 @@
k3s kubectl wait {{ item.type }}/{{ item.name }} k3s kubectl wait {{ item.type }}/{{ item.name }}
--namespace='tigera-operator' --namespace='tigera-operator'
--for=condition=Available=True --for=condition=Available=True
--timeout=30s --timeout=7s
register: tigera_result register: tigera_result
changed_when: false changed_when: false
until: tigera_result is succeeded until: tigera_result is succeeded
retries: 7 retries: 7
delay: 7 delay: 7
with_items: with_items:
- {name: tigera-operator, type: deployment} - { name: tigera-operator, type: deployment }
loop_control: loop_control:
label: "{{ item.type }}/{{ item.name }}" label: "{{ item.type }}/{{ item.name }}"
- name: Deploy Calico custom resources - name: Deploy Calico custom resources
block: block:
- name: Deploy custom resources for Calico - name: Deploy custom resources for Calico
@@ -74,7 +68,7 @@
register: apply_cr register: apply_cr
changed_when: "'configured' in apply_cr.stdout or 'created' in apply_cr.stdout" changed_when: "'configured' in apply_cr.stdout or 'created' in apply_cr.stdout"
failed_when: "'Error' in apply_cr.stderr" failed_when: "'Error' in apply_cr.stderr"
- name: Wait for Calico system resources to be available - name: Wait for Calico system resources to be available
command: >- command: >-
{% if item.type == 'daemonset' %} {% if item.type == 'daemonset' %}
@@ -87,28 +81,19 @@
--namespace='{{ item.namespace }}' --namespace='{{ item.namespace }}'
--for=condition=Available --for=condition=Available
{% endif %} {% endif %}
--timeout=30s --timeout=7s
register: cr_result register: cr_result
changed_when: false changed_when: false
until: cr_result is succeeded until: cr_result is succeeded
retries: 30 retries: 30
delay: 7 delay: 7
with_items: with_items:
- {name: calico-typha, type: deployment, namespace: calico-system} - { name: calico-typha, type: deployment, namespace: calico-system }
- {name: calico-kube-controllers, type: deployment, namespace: calico-system} - { name: calico-kube-controllers, type: deployment, namespace: calico-system }
- {name: csi-node-driver, type: daemonset, selector: 'k8s-app=csi-node-driver', namespace: calico-system} - { name: csi-node-driver, type: daemonset, selector: 'k8s-app=csi-node-driver', namespace: calico-system }
- {name: calico-node, type: daemonset, selector: 'k8s-app=calico-node', namespace: calico-system} - { name: calico-node, type: daemonset, selector: 'k8s-app=calico-node', namespace: calico-system }
- {name: calico-apiserver, type: deployment, namespace: calico-apiserver} - { name: calico-apiserver, type: deployment, selector: 'k8s-app=calico-apiserver', namespace: calico-apiserver }
loop_control: loop_control:
label: "{{ item.type }}/{{ item.name }}" label: "{{ item.type }}/{{ item.name }}"
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
- name: Patch Felix configuration for eBPF mode run_once: true # stops "skipped" log spam
ansible.builtin.command:
cmd: >
kubectl patch felixconfiguration default
--type='merge'
--patch='{"spec": {"bpfKubeProxyIptablesCleanupEnabled": false}}'
register: patch_result
changed_when: "'felixconfiguration.projectcalico.org/default patched' in patch_result.stdout"
failed_when: "'Error' in patch_result.stderr"
when: calico_ebpf

View File

@@ -1,253 +0,0 @@
---
- name: Prepare Cilium CLI on first master and deploy CNI
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
run_once: true
block:
- name: Create tmp directory on first master
file:
path: /tmp/k3s
state: directory
owner: root
group: root
mode: 0755
- name: Check if Cilium CLI is installed
ansible.builtin.command: cilium version
register: cilium_cli_installed
failed_when: false
changed_when: false
ignore_errors: true
- name: Check for Cilium CLI version in command output
set_fact:
installed_cli_version: >-
{{
cilium_cli_installed.stdout_lines
| join(' ')
| regex_findall('cilium-cli: (v\d+\.\d+\.\d+)')
| first
| default('unknown')
}}
when: cilium_cli_installed.rc == 0
- name: Get latest stable Cilium CLI version file
ansible.builtin.get_url:
url: "https://raw.githubusercontent.com/cilium/cilium-cli/main/stable.txt"
dest: "/tmp/k3s/cilium-cli-stable.txt"
owner: root
group: root
mode: 0755
- name: Read Cilium CLI stable version from file
ansible.builtin.command: cat /tmp/k3s/cilium-cli-stable.txt
register: cli_ver
changed_when: false
- name: Log installed Cilium CLI version
ansible.builtin.debug:
msg: "Installed Cilium CLI version: {{ installed_cli_version | default('Not installed') }}"
- name: Log latest stable Cilium CLI version
ansible.builtin.debug:
msg: "Latest Cilium CLI version: {{ cli_ver.stdout }}"
- name: Determine if Cilium CLI needs installation or update
set_fact:
cilium_cli_needs_update: >-
{{
cilium_cli_installed.rc != 0 or
(cilium_cli_installed.rc == 0 and
installed_cli_version != cli_ver.stdout)
}}
- name: Install or update Cilium CLI
when: cilium_cli_needs_update
block:
- name: Set architecture variable
ansible.builtin.set_fact:
cli_arch: "{{ 'arm64' if ansible_architecture == 'aarch64' else 'amd64' }}"
- name: Download Cilium CLI and checksum
ansible.builtin.get_url:
url: "{{ cilium_base_url }}/cilium-linux-{{ cli_arch }}{{ item }}"
dest: "/tmp/k3s/cilium-linux-{{ cli_arch }}{{ item }}"
owner: root
group: root
mode: 0755
loop:
- ".tar.gz"
- ".tar.gz.sha256sum"
vars:
cilium_base_url: "https://github.com/cilium/cilium-cli/releases/download/{{ cli_ver.stdout }}"
- name: Verify the downloaded tarball
ansible.builtin.shell: |
cd /tmp/k3s && sha256sum --check cilium-linux-{{ cli_arch }}.tar.gz.sha256sum
args:
executable: /bin/bash
changed_when: false
- name: Extract Cilium CLI to /usr/local/bin
ansible.builtin.unarchive:
src: "/tmp/k3s/cilium-linux-{{ cli_arch }}.tar.gz"
dest: /usr/local/bin
remote_src: true
- name: Remove downloaded tarball and checksum file
ansible.builtin.file:
path: "{{ item }}"
state: absent
loop:
- "/tmp/k3s/cilium-linux-{{ cli_arch }}.tar.gz"
- "/tmp/k3s/cilium-linux-{{ cli_arch }}.tar.gz.sha256sum"
- name: Wait for connectivity to kube VIP
ansible.builtin.command: ping -c 1 {{ apiserver_endpoint }}
register: ping_result
until: ping_result.rc == 0
retries: 21
delay: 1
ignore_errors: true
changed_when: false
- name: Fail if kube VIP not reachable
ansible.builtin.fail:
msg: "API endpoint {{ apiserver_endpoint }} is not reachable"
when: ping_result.rc != 0
- name: Test for existing Cilium install
ansible.builtin.command: k3s kubectl -n kube-system get daemonsets cilium
register: cilium_installed
failed_when: false
changed_when: false
ignore_errors: true
- name: Check existing Cilium install
when: cilium_installed.rc == 0
block:
- name: Check Cilium version
ansible.builtin.command: cilium version
register: cilium_version
failed_when: false
changed_when: false
ignore_errors: true
- name: Parse installed Cilium version
set_fact:
installed_cilium_version: >-
{{
cilium_version.stdout_lines
| join(' ')
| regex_findall('cilium image.+(\d+\.\d+\.\d+)')
| first
| default('unknown')
}}
- name: Determine if Cilium needs update
set_fact:
cilium_needs_update: >-
{{ 'v' + installed_cilium_version != cilium_tag }}
- name: Log result
ansible.builtin.debug:
msg: >
Installed Cilium version: {{ installed_cilium_version }},
Target Cilium version: {{ cilium_tag }},
Update needed: {{ cilium_needs_update }}
- name: Install Cilium
ansible.builtin.command: >-
{% if cilium_installed.rc != 0 %}
cilium install
{% else %}
cilium upgrade
{% endif %}
--version "{{ cilium_tag }}"
--helm-set operator.replicas="1"
{{ '--helm-set devices=' + cilium_iface if cilium_iface != 'auto' else '' }}
--helm-set ipam.operator.clusterPoolIPv4PodCIDRList={{ cluster_cidr }}
{% if cilium_mode == "native" or (cilium_bgp and cilium_exportPodCIDR != 'false') %}
--helm-set ipv4NativeRoutingCIDR={{ cluster_cidr }}
{% endif %}
--helm-set k8sServiceHost="127.0.0.1"
--helm-set k8sServicePort="6444"
--helm-set routingMode={{ cilium_mode | default("native") }}
--helm-set autoDirectNodeRoutes={{ "true" if cilium_mode == "native" else "false" }}
--helm-set kubeProxyReplacement={{ kube_proxy_replacement | default("true") }}
--helm-set bpf.masquerade={{ enable_bpf_masquerade | default("true") }}
--helm-set bgpControlPlane.enabled={{ cilium_bgp | default("false") }}
--helm-set hubble.enabled={{ "true" if cilium_hubble else "false" }}
--helm-set hubble.relay.enabled={{ "true" if cilium_hubble else "false" }}
--helm-set hubble.ui.enabled={{ "true" if cilium_hubble else "false" }}
{% if kube_proxy_replacement is not false %}
--helm-set bpf.loadBalancer.algorithm={{ bpf_lb_algorithm | default("maglev") }}
--helm-set bpf.loadBalancer.mode={{ bpf_lb_mode | default("hybrid") }}
{% endif %}
environment:
KUBECONFIG: "{{ ansible_user_dir }}/.kube/config"
register: cilium_install_result
changed_when: cilium_install_result.rc == 0
when: cilium_installed.rc != 0 or cilium_needs_update
- name: Wait for Cilium resources
command: >-
{% if item.type == 'daemonset' %}
k3s kubectl wait pods
--namespace=kube-system
--selector='k8s-app=cilium'
--for=condition=Ready
{% else %}
k3s kubectl wait {{ item.type }}/{{ item.name }}
--namespace=kube-system
--for=condition=Available
{% endif %}
--timeout=30s
register: cr_result
changed_when: false
until: cr_result is succeeded
retries: 30
delay: 7
with_items:
- {name: cilium-operator, type: deployment}
- {name: cilium, type: daemonset, selector: 'k8s-app=cilium'}
- {name: hubble-relay, type: deployment, check_hubble: true}
- {name: hubble-ui, type: deployment, check_hubble: true}
loop_control:
label: "{{ item.type }}/{{ item.name }}"
when: >-
not item.check_hubble | default(false) or (item.check_hubble | default(false) and cilium_hubble)
- name: Configure Cilium BGP
when: cilium_bgp
block:
- name: Copy BGP manifests to first master
ansible.builtin.template:
src: "cilium.crs.j2"
dest: /tmp/k3s/cilium-bgp.yaml
owner: root
group: root
mode: 0755
- name: Apply BGP manifests
ansible.builtin.command:
cmd: kubectl apply -f /tmp/k3s/cilium-bgp.yaml
register: apply_cr
changed_when: "'configured' in apply_cr.stdout or 'created' in apply_cr.stdout"
failed_when: "'is invalid' in apply_cr.stderr"
ignore_errors: true
- name: Print error message if BGP manifests application fails
ansible.builtin.debug:
msg: "{{ apply_cr.stderr }}"
when: "'is invalid' in apply_cr.stderr"
- name: Test for BGP config resources
ansible.builtin.command: "{{ item }}"
loop:
- k3s kubectl get CiliumBGPPeeringPolicy.cilium.io
- k3s kubectl get CiliumLoadBalancerIPPool.cilium.io
changed_when: false
loop_control:
label: "{{ item }}"

View File

@@ -2,17 +2,11 @@
- name: Deploy calico - name: Deploy calico
include_tasks: calico.yml include_tasks: calico.yml
tags: calico tags: calico
when: calico_iface is defined and cilium_iface is not defined when: use_calico == true
- name: Deploy cilium
include_tasks: cilium.yml
tags: cilium
when: cilium_iface is defined
- name: Deploy metallb pool - name: Deploy metallb pool
include_tasks: metallb.yml include_tasks: metallb.yml
tags: metallb tags: metallb
when: kube_vip_lb_ip_range is not defined and (not cilium_bgp or cilium_iface is not defined)
- name: Remove tmp directory used for manifests - name: Remove tmp directory used for manifests
file: file:

View File

@@ -8,27 +8,6 @@
with_items: "{{ groups[group_name_master | default('master')] }}" with_items: "{{ groups[group_name_master | default('master')] }}"
run_once: true run_once: true
- name: Delete outdated metallb replicas
shell: |-
set -o pipefail
REPLICAS=$(k3s kubectl --namespace='metallb-system' get replicasets \
-l 'component=controller,app=metallb' \
-o jsonpath='{.items[0].spec.template.spec.containers[0].image}, {.items[0].metadata.name}' 2>/dev/null || true)
REPLICAS_SETS=$(echo ${REPLICAS} | grep -v '{{ metal_lb_controller_tag_version }}' | sed -e "s/^.*\s//g")
if [ -n "${REPLICAS_SETS}" ] ; then
for REPLICAS in "${REPLICAS_SETS}"
do
k3s kubectl --namespace='metallb-system' \
delete rs "${REPLICAS}"
done
fi
args:
executable: /bin/bash
changed_when: false
run_once: true
with_items: "{{ groups[group_name_master | default('master')] }}"
- name: Copy metallb CRs manifest to first master - name: Copy metallb CRs manifest to first master
template: template:
src: "metallb.crs.j2" src: "metallb.crs.j2"

View File

@@ -9,14 +9,13 @@ spec:
calicoNetwork: calicoNetwork:
# Note: The ipPools section cannot be modified post-install. # Note: The ipPools section cannot be modified post-install.
ipPools: ipPools:
- blockSize: {{ calico_blockSize | default('26') }} - blockSize: {{ calico_blockSize if calico_blockSize is defined else '26' }}
cidr: {{ cluster_cidr | default('10.52.0.0/16') }} cidr: {{ calico_cidr if calico_cidr is defined else '10.52.0.0/16' }}
encapsulation: {{ calico_encapsulation | default('VXLANCrossSubnet') }} encapsulation: {{ calico_encapsulation if calico_encapsulation is defined else 'VXLANCrossSubnet' }}
natOutgoing: {{ calico_natOutgoing | default('Enabled') }} natOutgoing: {{ calico_natOutgoing if calico_natOutgoing is defined else 'Enabled' }}
nodeSelector: {{ calico_nodeSelector | default('all()') }} nodeSelector: {{ calico_nodeSelector if calico_nodeSelector is defined else 'all()' }}
nodeAddressAutodetectionV4: nodeAddressAutodetectionV4:
interface: {{ calico_iface }} interface: {{ container_iface if container_iface is defined else 'eth0' }}
linuxDataplane: {{ 'BPF' if calico_ebpf else 'Iptables' }}
--- ---
@@ -27,15 +26,3 @@ kind: APIServer
metadata: metadata:
name: default name: default
spec: {} spec: {}
{% if calico_ebpf %}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kubernetes-services-endpoint
namespace: tigera-operator
data:
KUBERNETES_SERVICE_HOST: '{{ apiserver_endpoint }}'
KUBERNETES_SERVICE_PORT: '6443'
{% endif %}

View File

@@ -1,29 +0,0 @@
apiVersion: "cilium.io/v2alpha1"
kind: CiliumBGPPeeringPolicy
metadata:
name: 01-bgp-peering-policy
spec: # CiliumBGPPeeringPolicySpec
virtualRouters: # []CiliumBGPVirtualRouter
- localASN: {{ cilium_bgp_my_asn }}
exportPodCIDR: {{ cilium_exportPodCIDR | default('true') }}
neighbors: # []CiliumBGPNeighbor
- peerAddress: '{{ cilium_bgp_peer_address + "/32"}}'
peerASN: {{ cilium_bgp_peer_asn }}
eBGPMultihopTTL: 10
connectRetryTimeSeconds: 120
holdTimeSeconds: 90
keepAliveTimeSeconds: 30
gracefulRestart:
enabled: true
restartTimeSeconds: 120
serviceSelector:
matchExpressions:
- {key: somekey, operator: NotIn, values: ['never-used-value']}
---
apiVersion: "cilium.io/v2alpha1"
kind: CiliumLoadBalancerIPPool
metadata:
name: "01-lb-pool"
spec:
cidrs:
- cidr: "{{ cilium_bgp_lb_cidr }}"

View File

@@ -2,5 +2,4 @@
- name: Reboot server - name: Reboot server
become: true become: true
reboot: reboot:
reboot_command: "{{ custom_reboot_command | default(omit) }}"
listen: reboot server listen: reboot server

View File

@@ -1,5 +1,4 @@
--- ---
- name: Reboot - name: Reboot
reboot: reboot:
reboot_command: "{{ custom_reboot_command | default(omit) }}"
listen: reboot listen: reboot

View File

@@ -17,27 +17,21 @@
when: when:
grep_cpuinfo_raspberrypi.rc == 0 or grep_device_tree_model_raspberrypi.rc == 0 grep_cpuinfo_raspberrypi.rc == 0 or grep_device_tree_model_raspberrypi.rc == 0
- name: Set detected_distribution to Raspbian (ARM64 on Raspbian, Debian Buster/Bullseye/Bookworm) - name: Set detected_distribution to Raspbian
set_fact: set_fact:
detected_distribution: Raspbian detected_distribution: Raspbian
vars: when: >
allowed_descriptions: raspberry_pi|default(false) and
- "[Rr]aspbian.*" ( ansible_facts.lsb.id|default("") == "Raspbian" or
- "Debian.*buster" ansible_facts.lsb.description|default("") is match("[Rr]aspbian.*") )
- "Debian.*bullseye"
- "Debian.*bookworm"
when:
- ansible_facts.architecture is search("aarch64")
- raspberry_pi|default(false)
- ansible_facts.lsb.description|default("") is match(allowed_descriptions | join('|'))
- name: Set detected_distribution to Raspbian (ARM64 on Debian Bookworm) - name: Set detected_distribution to Raspbian (ARM64 on Debian Buster)
set_fact: set_fact:
detected_distribution: Raspbian detected_distribution: Raspbian
when: when:
- ansible_facts.architecture is search("aarch64") - ansible_facts.architecture is search("aarch64")
- raspberry_pi|default(false) - raspberry_pi|default(false)
- ansible_facts.lsb.description|default("") is match("Debian.*bookworm") - ansible_facts.lsb.description|default("") is match("Debian.*buster")
- name: Set detected_distribution_major_version - name: Set detected_distribution_major_version
set_fact: set_fact:
@@ -45,6 +39,14 @@
when: when:
- detected_distribution | default("") == "Raspbian" - detected_distribution | default("") == "Raspbian"
- name: Set detected_distribution to Raspbian (ARM64 on Debian Bullseye)
set_fact:
detected_distribution: Raspbian
when:
- ansible_facts.architecture is search("aarch64")
- raspberry_pi|default(false)
- ansible_facts.lsb.description|default("") is match("Debian.*bullseye")
- name: Execute OS related tasks on the Raspberry Pi - {{ action_ }} - name: Execute OS related tasks on the Raspberry Pi - {{ action_ }}
include_tasks: "{{ item }}" include_tasks: "{{ item }}"
with_first_found: with_first_found:

View File

@@ -1,27 +1,7 @@
--- ---
- name: Test for cmdline path
stat:
path: /boot/firmware/cmdline.txt
register: boot_cmdline_path
failed_when: false
changed_when: false
- name: Set cmdline path based on Debian version and command result
set_fact:
cmdline_path: >-
{{
(
boot_cmdline_path.stat.exists and
ansible_facts.lsb.description | default('') is match('Debian.*(?!(bookworm|sid))')
) | ternary(
'/boot/firmware/cmdline.txt',
'/boot/cmdline.txt'
)
}}
- name: Activating cgroup support - name: Activating cgroup support
lineinfile: lineinfile:
path: "{{ cmdline_path }}" path: /boot/cmdline.txt
regexp: '^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$' regexp: '^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$'
line: '\1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory' line: '\1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory'
backrefs: true backrefs: true

View File

@@ -45,16 +45,13 @@
- /var/lib/rancher/k3s - /var/lib/rancher/k3s
- /var/lib/rancher/ - /var/lib/rancher/
- /var/lib/cni/ - /var/lib/cni/
- /etc/cni/net.d
- name: Remove K3s http_proxy files - name: Remove K3s http_proxy files
file: file:
name: "{{ item }}" name: "{{ item }}"
state: absent state: absent
with_items: with_items:
- "{{ systemd_dir }}/k3s.service.d/http_proxy.conf"
- "{{ systemd_dir }}/k3s.service.d" - "{{ systemd_dir }}/k3s.service.d"
- "{{ systemd_dir }}/k3s-node.service.d/http_proxy.conf"
- "{{ systemd_dir }}/k3s-node.service.d" - "{{ systemd_dir }}/k3s-node.service.d"
when: proxy_env is defined when: proxy_env is defined

View File

@@ -1,13 +1,4 @@
--- ---
- name: Pre tasks
hosts: all
pre_tasks:
- name: Verify Ansible is version 2.11 or above. (If this fails you may need to update Ansible)
assert:
that: "ansible_version.full is version_compare('2.11', '>=')"
msg: >
"Ansible is out of date. See here for more info: https://docs.technotim.live/posts/ansible-automation/"
- name: Prepare Proxmox cluster - name: Prepare Proxmox cluster
hosts: proxmox hosts: proxmox
gather_facts: true gather_facts: true
@@ -55,14 +46,3 @@
roles: roles:
- role: k3s_server_post - role: k3s_server_post
become: true become: true
- name: Storing kubeconfig in the playbook directory
hosts: master
environment: "{{ proxy_env | default({}) }}"
tasks:
- name: Copying kubeconfig from {{ hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] }}
ansible.builtin.fetch:
src: "{{ ansible_user_dir }}/.kube/config"
dest: ./kubeconfig
flat: true
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']