Compare commits

..

80 Commits

Author SHA1 Message Date
Timothy Stewart
903f302236 fix(ci): move to ubuntu 2024-01-22 22:18:58 -06:00
Timothy Stewart
c54a1c62ef fix(ci): move to ubuntu 2024-01-22 21:53:45 -06:00
Timothy Stewart
e5a1da1913 fix(ci): testing with macos-12 2024-01-22 21:12:10 -06:00
Timothy Stewart
f239f8eb39 fix(ci): fix molecule 2024-01-22 20:50:05 -06:00
Timothy Stewart
ddcbeadde1 fix(ci): fix molecule 2024-01-22 20:18:55 -06:00
Timothy Stewart
d7a9a8db76 fix(ci): fix molecule 2024-01-22 19:53:51 -06:00
Timothy Stewart
e0ecd44c8c fix(ci): fix molecule 2024-01-22 19:40:43 -06:00
Timothy Stewart
b928c930d5 fix(ci): fix molecule 2024-01-22 19:28:53 -06:00
Timothy Stewart
83c586654f fix(ci): fix molecule 2024-01-22 19:11:52 -06:00
Timothy Stewart
b00f020e82 fix(ci): fix molecule 2024-01-22 19:08:32 -06:00
Timothy Stewart
c8efd7de12 fix(ci): fix molecule 2024-01-22 19:04:03 -06:00
Timothy Stewart
62d37e1242 fix(ci): fix molecule 2024-01-22 19:00:20 -06:00
Timothy Stewart
31b36869c7 fix(ci): fix molecule 2024-01-22 18:40:05 -06:00
Timothy Stewart
40d7a0ba5a fix(ci): fix molecule 2024-01-22 18:38:59 -06:00
Timothy Stewart
7a2062af39 fix(ci): fix molecule 2024-01-22 18:18:56 -06:00
Timothy Stewart
01d4bb8f8a fix(ci): fix molecule 2024-01-22 18:13:00 -06:00
Timothy Stewart
bdc5d0970d fix(ci): fix homebrew 2024-01-22 17:05:21 -06:00
Timothy Stewart
8dc6b7632b fix(ci): fix homebrew 2024-01-22 16:40:07 -06:00
Timothy Stewart
3409f0d431 fix(ci): fix homebrew 2024-01-22 16:34:49 -06:00
Timothy Stewart
d786f615b8 fix(ci): fix homebrew 2024-01-22 16:34:20 -06:00
Timothy Stewart
647a3a8461 fix(ci): fix homebrew 2024-01-22 16:32:53 -06:00
Timothy Stewart
a83078d98d fix(ci): fix homebrew 2024-01-22 16:11:14 -06:00
Timothy Stewart
4d1d11bcb4 fix(ci): fix homebrew 2024-01-22 16:10:21 -06:00
Timothy Stewart
c9f50a792f fix(ci): fix homebrew 2024-01-22 16:07:58 -06:00
Timothy Stewart
74ca472a0f fix(molecule): remove lint 2024-01-22 16:00:20 -06:00
Timothy Stewart
c59d4c5ae4 fix(pip): updating dependencies 2024-01-22 15:50:28 -06:00
Timothy Stewart
27c477be70 fix(molecule): reduce resources 2024-01-22 11:32:48 -06:00
Timothy Stewart
4707002267 fix(ci): adjusting cache steps 2024-01-21 22:35:47 -06:00
Timothy Stewart
ffcc79300a fix(ci): adjusting cache steps 2024-01-21 22:25:22 -06:00
Timothy Stewart
0ae666dfe5 fix(post): Fix liquid formatting 2024-01-21 22:04:09 -06:00
Timothy Stewart
b9d94f3675 fix(ci): adjusting cache steps 2024-01-21 21:13:24 -06:00
Timothy Stewart
8acec7055a fix(ci): adjusting cache steps 2024-01-21 19:59:25 -06:00
Timothy Stewart
fc8ab77be4 fix(ci): adjusting cache steps 2024-01-21 18:42:29 -06:00
Timothy Stewart
f7869f447d fix(ci): adjusting cache steps 2024-01-21 18:32:57 -06:00
Timothy Stewart
eb89255d59 fix(ci): adjusting cache steps 2024-01-21 18:28:51 -06:00
Timothy Stewart
4b8c97c715 fix(ci): adjusting cache steps 2024-01-21 18:01:02 -06:00
Timothy Stewart
5a36416ccb fix(ci): adjusting cache steps 2024-01-21 17:54:20 -06:00
Timothy Stewart
59e76924b8 fix(ci): adjusting cache steps 2024-01-21 17:33:57 -06:00
Timothy Stewart
4f635eb0ef fix(ci): adjusting cache steps 2024-01-21 17:16:46 -06:00
Timothy Stewart
f6597e859d fix(ci): adjusting cache steps 2024-01-21 16:51:29 -06:00
Timothy Stewart
82d36572f1 fix(ci): adjusting cache steps 2024-01-21 16:47:41 -06:00
Timothy Stewart
7f7e0e7921 fix(ci): adjusting cache steps 2024-01-21 16:32:26 -06:00
Timothy Stewart
cdfee6f1e9 fix(ci): adjusting cache steps 2024-01-21 16:28:19 -06:00
Timothy Stewart
f767c32bf8 fix(ci): adjusting cache steps 2024-01-21 16:09:59 -06:00
Timothy Stewart
5cc46eb360 fix(ci): adjusting cache steps 2024-01-21 16:00:24 -06:00
Timothy Stewart
980622fdbd fix(ci): adjusting cache steps 2024-01-21 15:42:13 -06:00
Timothy Stewart
f8e408b3bd fix(ci): adjusting cache steps 2024-01-21 15:40:17 -06:00
Timothy Stewart
0c4bafa70c fix(ci): adjusting cache steps 2024-01-21 15:37:51 -06:00
Timothy Stewart
eb7046fb34 fix(ci): adjusting cache steps 2024-01-21 15:35:30 -06:00
Timothy Stewart
dfdcff7e11 fix(ci): adjusting cache steps 2024-01-21 15:26:36 -06:00
Timothy Stewart
d66e745979 fix(ci): adjusting cache steps 2024-01-21 15:20:26 -06:00
Timothy Stewart
c3597a9623 fix(ci): adjusting cache steps 2024-01-21 15:19:52 -06:00
Timothy Stewart
2333e85148 fix(ci): adjusting cache steps 2024-01-21 15:17:04 -06:00
Timothy Stewart
7c1b17a40c fix(ci): adjusting cache steps 2024-01-21 15:14:37 -06:00
Timothy Stewart
4b4922e1b6 fix(ci): adjusting cache steps 2024-01-21 15:11:38 -06:00
Timothy Stewart
f07009e0c5 fix(ci): move to macos13 2024-01-21 14:49:06 -06:00
Timothy Stewart
0e233e1d0f fix(ci): move to macos13 2024-01-21 14:35:45 -06:00
Timothy Stewart
22a617734d fix(ci): move to macos13 2024-01-21 14:24:51 -06:00
Timothy Stewart
272e9cde2b fix(ci): move to macos13 2024-01-21 14:24:37 -06:00
Timothy Stewart
03e0d00180 fix(ci): move to macos13 2024-01-21 14:11:24 -06:00
Timothy Stewart
68f8f20cd7 fix(ci): move to macos13 2024-01-21 13:57:12 -06:00
Timothy Stewart
10f545ff30 fix(ci): move to macos13 2024-01-21 13:56:20 -06:00
Timothy Stewart
5b7794c6bf fix(ci): move to macos13 2024-01-21 13:55:16 -06:00
Timothy Stewart
0c640c5a95 fix(ci): move to macos13 2024-01-21 13:53:56 -06:00
Timothy Stewart
9117ec4b7a fix(ci): move to macos13 2024-01-21 13:48:07 -06:00
Timothy Stewart
50d60e6164 fix(ci): move to macos13 2024-01-21 13:45:59 -06:00
Timothy Stewart
3345de29fc fix(ci): move to macos13 2024-01-21 13:44:14 -06:00
Timothy Stewart
b7248f89d9 fix(ci): move to macos13 2024-01-21 13:39:56 -06:00
Timothy Stewart
0715ab9440 fix(ci): move to macos13 2024-01-21 13:37:10 -06:00
Timothy Stewart
f2b87ec097 fix(ci): move to macos13 2024-01-21 13:20:36 -06:00
Timothy Stewart
3bb8984d7c fix(ci): Add a cache prestep 2024-01-21 13:15:43 -06:00
Timothy Stewart
a2d4e91aa5 fix(ci): Add a cache prestep 2024-01-21 13:06:16 -06:00
Timothy Stewart
7cfcd9727c fix(ci): Add a cache prestep 2024-01-21 12:52:28 -06:00
Timothy Stewart
7a8c7eccb6 fix(ci): Add a cache prestep 2024-01-21 12:45:33 -06:00
Timothy Stewart
f54eb1bf41 fix(ci): Add a cache prestep 2024-01-21 12:26:18 -06:00
Timothy Stewart
20ea0bc998 fix(ci): Add a cache prestep 2024-01-21 12:18:29 -06:00
Timothy Stewart
867eabcd7e fix(ci): Add a cache prestep 2024-01-21 12:16:21 -06:00
Timothy Stewart
9084c90675 fix(ci): Add a cache prestep 2024-01-21 12:11:54 -06:00
Timothy Stewart
fecf7c7fb3 fix(molecule): Reducing cores and memory 2024-01-21 11:10:43 -06:00
Timothy Stewart
ac4a6e7c20 fix(molecule): Reducing cores and memory 2024-01-21 10:47:38 -06:00
29 changed files with 165 additions and 547 deletions

View File

@@ -37,11 +37,6 @@ systemd_dir: ""
flannel_iface: ""
#calico_iface: ""
calico_ebpf: ""
calico_cidr: ""
calico_tag: ""
apiserver_endpoint: ""
k3s_token: "NA"
@@ -51,9 +46,6 @@ extra_agent_args: ""
kube_vip_tag_version: ""
kube_vip_cloud_provider_tag_version: ""
kube_vip_lb_ip_range: ""
metal_lb_speaker_tag_version: ""
metal_lb_controller_tag_version: ""

View File

@@ -9,18 +9,3 @@ updates:
ignore:
- dependency-name: "*"
update-types: ["version-update:semver-major"]
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"
rebase-strategy: "auto"
- package-ecosystem: "docker"
directory: "/"
schedule:
interval: "daily"
rebase-strategy: "auto"
ignore:
- dependency-name: "*"
update-types: ["version-update:semver-major"]

View File

@@ -5,22 +5,48 @@ on:
jobs:
molecule:
name: cache
runs-on: self-hosted
runs-on: ubuntu-latest
env:
PYTHON_VERSION: "3.11"
steps:
- name: Check out the codebase
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v3 4.1.1
uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v3 2.5.0
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # 5.0.0
uses: actions/setup-python@75f3110429a8c05be0e1bf360334e4cced2b63fa # 2.3.3
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies
# - name: Cache Ansible
# uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # 4.0
# id: cache-ansible
# with:
# path: ~/.ansible/collections
# key: ansible-${{ hashFiles('collections/requirements.yml') }}
# restore-keys: |
# ansible-
- name: Install dependencies
run: |
echo "::group::Upgrade pip"
python3 -m pip install --upgrade pip
echo "::endgroup::"
echo "::group::Install Python requirements from requirements.txt"
python3 -m pip install -r requirements.txt
echo "::endgroup::"
# - name: Install ansible dependencies
# if: steps.cache-ansible.outputs.cache-hit != 'true' # only run if false since this is just a cache step
# run: |
# echo "::group::Install Ansible role requirements from collections/requirements.yml"
# ansible-galaxy install -r collections/requirements.yml
# echo "::endgroup::"
- name: Cache Vagrant boxes
id: cache-vagrant
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # 4.0
@@ -32,6 +58,36 @@ jobs:
restore-keys: |
vagrant-boxes
- name: install apt packages
run: |
wget -O- https://apt.releases.hashicorp.com/gpg | sudo gpg --dearmor -o /usr/share/keyrings/hashicorp-archive-keyring.gpg
echo "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/hashicorp.list
sudo apt update && sudo apt -y install vagrant virtualbox
# - name: Configure Homebrew cache
# uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # 4.0
# id: cache-homebrew
# with:
# path: |
# ~/Library/Caches/Homebrew
# key: brew-${{ hashFiles('./Brewfile') }}
# restore-keys: brew-
# - name: Update Homebrew
# if: | # only run if false since this is just a cache step
# steps.cache-homebrew.outputs.cache-hit != 'true' || steps.cache-vagrant.outputs.cache-hit != 'true'
# run: |
# brew update --preinstall
# - name: Install Homebrew dependencies
# if: | # only run if false since this is just a cache step
# steps.cache-homebrew.outputs.cache-hit != 'true' || steps.cache-cache-vagrant.outputs.cache-hit != 'true'
# run: |
# env HOMEBREW_NO_AUTO_UPDATE=1 brew bundle --no-upgrade --file ./Brewfile
# vagrant --version
# vboxmanage --version
- name: Download Vagrant boxes for all scenarios
# To save some cache space, all scenarios share the same cache key.
# On the other hand, this means that the cache contents should be

View File

@@ -2,20 +2,11 @@
name: "CI"
on:
pull_request:
types:
- opened
- synchronize
push:
branches:
- master
paths-ignore:
- '**/.gitignore'
- '**/FUNDING.yml'
- '**/host.ini'
- '**/*.md'
- '**/.editorconfig'
- '**/ansible.example.cfg'
- '**/deploy.sh'
- '**/LICENSE'
- '**/reboot.sh'
- '**/reset.sh'
- '**/README.md'
jobs:
pre:
uses: ./.github/workflows/cache.yml

View File

@@ -5,18 +5,18 @@ on:
jobs:
pre-commit-ci:
name: Pre-Commit
runs-on: self-hosted
runs-on: ubuntu-latest
env:
PYTHON_VERSION: "3.11"
steps:
- name: Check out the codebase
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v3 4.1.1
uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v3 2.5.0
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # 5.0.0
uses: actions/setup-python@75f3110429a8c05be0e1bf360334e4cced2b63fa # 2.3.3
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies
@@ -42,12 +42,12 @@ jobs:
ensure-pinned-actions:
name: Ensure SHA Pinned Actions
runs-on: self-hosted
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v3 4.1.1
uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v3 2.5.0
- name: Ensure SHA pinned actions
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@ba37328d4ea95eaf8b3bd6c6cef308f709a5f2ec # 3.0.3
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@af2eb3226618e2494e3d9084f515ad6dcf16e229 # 2.0.1
with:
allowlist: |
aws-actions/

View File

@@ -5,50 +5,28 @@ on:
jobs:
molecule:
name: Molecule
runs-on: self-hosted
runs-on: ubuntu-latest
strategy:
matrix:
scenario:
- default
- ipv6
- single_node
- calico
- kube-vip
fail-fast: false
env:
PYTHON_VERSION: "3.11"
steps:
- name: Check out the codebase
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v3 4.1.1
uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v3 2.5.0
with:
ref: ${{ github.event.pull_request.head.sha }}
# these steps are necessary if not using ephemeral nodes
- name: Delete old Vagrant box versions
if: always() # do this even if a step before has failed
run: vagrant box prune --force
- name: Remove all local Vagrant boxes
if: always() # do this even if a step before has failed
run: if vagrant box list 2>/dev/null; then vagrant box list | cut -f 1 -d ' ' | xargs -L 1 vagrant box remove -f 2>/dev/null && echo "All Vagrant boxes removed." || echo "No Vagrant boxes found."; else echo "No Vagrant boxes found."; fi
- name: Remove all Virtualbox VMs
if: always() # do this even if a step before has failed
run: VBoxManage list vms | awk -F'"' '{print $2}' | xargs -I {} VBoxManage unregistervm --delete "{}"
- name: Remove all Virtualbox HDs
if: always() # do this even if a step before has failed
run: VBoxManage list hdds | awk -F':' '/^UUID:/ {print $2}' | xargs -I {} VBoxManage closemedium disk "{}" --delete
- name: Remove all Virtualbox Networks
if: always() # do this even if a step before has failed
run: VBoxManage list hostonlyifs | grep '^Name:' | awk '{print $2}' | grep '^vboxnet' | xargs -I {} VBoxManage hostonlyif remove {}
- name: Remove Virtualbox network config
if: always() # do this even if a step before has failed
run: sudo rm /etc/vbox/networks.conf || true
# - name: Restore Ansible cache
# uses: actions/cache/restore@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # 4.0
# with:
# path: ~/.ansible/collections
# key: ansible-${{ hashFiles('collections/requirements.yml') }}
- name: Configure VirtualBox
run: |-
sudo mkdir -p /etc/vbox
@@ -58,11 +36,34 @@ jobs:
EOF
- name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # 5.0.0
uses: actions/setup-python@75f3110429a8c05be0e1bf360334e4cced2b63fa # 2.3.3
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies
# - name: Restore Homebrew cache
# uses: actions/cache/restore@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # 4.0
# with:
# path: |
# ~/Library/Caches/Homebrew
# key: brew-${{ hashFiles('./Brewfile') }}
# - name: Update Homebrew
# run: |
# brew update --preinstall
# - name: Install Homebrew dependencies
# run: |
# env HOMEBREW_NO_AUTO_UPDATE=1 brew bundle --no-upgrade --file ./Brewfile
# vagrant --version
# vboxmanage --version
- name: install apt packages
run: |
wget -O- https://apt.releases.hashicorp.com/gpg | sudo gpg --dearmor -o /usr/share/keyrings/hashicorp-archive-keyring.gpg
echo "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/hashicorp.list
sudo apt update && sudo apt -y install vagrant virtualbox
- name: Restore vagrant Boxes cache
uses: actions/cache/restore@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # 4.0
with:
@@ -90,36 +91,14 @@ jobs:
PY_COLORS: 1
ANSIBLE_FORCE_COLOR: 1
# these steps are necessary if not using ephemeral nodes
- name: Delete old Vagrant box versions
if: always() # do this even if a step before has failed
run: vagrant box prune --force
- name: Remove all local Vagrant boxes
if: always() # do this even if a step before has failed
run: if vagrant box list 2>/dev/null; then vagrant box list | cut -f 1 -d ' ' | xargs -L 1 vagrant box remove -f 2>/dev/null && echo "All Vagrant boxes removed." || echo "No Vagrant boxes found."; else echo "No Vagrant boxes found."; fi
- name: Remove all Virtualbox VMs
if: always() # do this even if a step before has failed
run: VBoxManage list vms | awk -F'"' '{print $2}' | xargs -I {} VBoxManage unregistervm --delete "{}"
- name: Remove all Virtualbox HDs
if: always() # do this even if a step before has failed
run: VBoxManage list hdds | awk -F':' '/^UUID:/ {print $2}' | xargs -I {} VBoxManage closemedium disk "{}" --delete
- name: Remove all Virtualbox Networks
if: always() # do this even if a step before has failed
run: VBoxManage list hostonlyifs | grep '^Name:' | awk '{print $2}' | grep '^vboxnet' | xargs -I {} VBoxManage hostonlyif remove {}
- name: Remove Virtualbox network config
if: always() # do this even if a step before has failed
run: sudo rm /etc/vbox/networks.conf || true
- name: Upload log files
if: always() # do this even if a step before has failed
uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 # 4.3.0
uses: actions/upload-artifact@83fd05a356d7e2593de66fc9913b3002723633cb # 3.1.1
with:
name: logs
path: |
${{ runner.temp }}/logs
overwrite: true
- name: Delete old box versions
if: always() # do this even if a step before has failed
run: vagrant box prune --force

1
.gitignore vendored
View File

@@ -1,4 +1,3 @@
.env/
*.log
ansible.cfg
kubeconfig

5
Brewfile Normal file
View File

@@ -0,0 +1,5 @@
tap "homebrew/bundle"
tap "hashicorp/tap"
cask "virtualbox"
cask "vagrant"

View File

@@ -1,5 +1,5 @@
---
k3s_version: v1.29.0+k3s1
k3s_version: v1.25.16+k3s4
# this is the user that has ssh access to these machines
ansible_user: ansibleuser
systemd_dir: /etc/systemd/system
@@ -10,12 +10,6 @@ system_timezone: "Your/Timezone"
# interface which will be used for flannel
flannel_iface: "eth0"
# uncomment calico_iface to use tigera operator/calico cni instead of flannel https://docs.tigera.io/calico/latest/about
# calico_iface: "eth0"
calico_ebpf: false # use eBPF dataplane instead of iptables
calico_cidr: "10.52.0.0/16" # calico cluster pod cidr pool
calico_tag: "v3.27.0" # calico version tag
# apiserver_endpoint is virtual ip-address which will be configured on each master
apiserver_endpoint: "192.168.30.222"
@@ -26,42 +20,28 @@ k3s_token: "some-SUPER-DEDEUPER-secret-password"
# The IP on which the node is reachable in the cluster.
# Here, a sensible default is provided, you can still override
# it for each of your hosts, though.
k3s_node_ip: "{{ ansible_facts[(calico_iface | default(flannel_iface))]['ipv4']['address'] }}"
k3s_node_ip: '{{ ansible_facts[flannel_iface]["ipv4"]["address"] }}'
# Disable the taint manually by setting: k3s_master_taint = false
k3s_master_taint: "{{ true if groups['node'] | default([]) | length >= 1 else false }}"
# these arguments are recommended for servers as well as agents:
extra_args: >-
{{ '--flannel-iface=' + flannel_iface if calico_iface is not defined else '' }}
--flannel-iface={{ flannel_iface }}
--node-ip={{ k3s_node_ip }}
# change these to your liking, the only required are: --disable servicelb, --tls-san {{ apiserver_endpoint }}
# the contents of the if block is also required if using calico
extra_server_args: >-
{{ extra_args }}
{{ '--node-taint node-role.kubernetes.io/master=true:NoSchedule' if k3s_master_taint else '' }}
{% if calico_iface is defined %}
--flannel-backend=none
--disable-network-policy
--cluster-cidr={{ calico_cidr | default('10.52.0.0/16') }}
{% endif %}
--tls-san {{ apiserver_endpoint }}
--disable servicelb
--disable traefik
extra_agent_args: >-
{{ extra_args }}
# image tag for kube-vip
kube_vip_tag_version: "v0.6.4"
# tag for kube-vip-cloud-provider manifest
# kube_vip_cloud_provider_tag_version: "main"
# kube-vip ip range for load balancer
# (uncomment to use kube-vip for services instead of MetalLB)
# kube_vip_lb_ip_range: "192.168.30.80-192.168.30.90"
kube_vip_tag_version: "v0.5.12"
# metallb type frr or native
metal_lb_type: "native"
@@ -75,8 +55,8 @@ metal_lb_mode: "layer2"
# metal_lb_bgp_peer_address: "192.168.30.1"
# image tag for metal lb
metal_lb_speaker_tag_version: "v0.13.12"
metal_lb_controller_tag_version: "v0.13.12"
metal_lb_speaker_tag_version: "v0.13.9"
metal_lb_controller_tag_version: "v0.13.9"
# metallb ip range for load balancer
metal_lb_ip_range: "192.168.30.80-192.168.30.90"
@@ -86,9 +66,9 @@ metal_lb_ip_range: "192.168.30.80-192.168.30.90"
# Please read https://gist.github.com/triangletodd/02f595cd4c0dc9aac5f7763ca2264185 before using this.
# Most notably, your containers must be privileged, and must not have nesting set to true.
# Please note this script disables most of the security of lxc containers, with the trade off being that lxc
# containers are significantly more resource efficient compared to full VMs.
# containers are significantly more resource efficent compared to full VMs.
# Mixing and matching VMs and lxc containers is not supported, ymmv if you want to do this.
# I would only really recommend using this if you have particularly low powered proxmox nodes where the overhead of
# I would only really recommend using this if you have partiularly low powered proxmox nodes where the overhead of
# VMs would use a significant portion of your available resources.
proxmox_lxc_configure: false
# the user that you would use to ssh into the host, for example if you run ssh some-user@my-proxmox-host,

View File

@@ -13,10 +13,6 @@ We have these scenarios:
To save a bit of test time, this cluster is _not_ highly available, it consists of only one control and one worker node.
- **single_node**:
Very similar to the default scenario, but uses only a single node for all cluster functionality.
- **calico**:
The same as single node, but uses calico cni instead of flannel.
- **kube-vip**
The same as single node, but uses kube-vip as service loadbalancer instead of MetalLB
## How to execute

View File

@@ -1,49 +0,0 @@
---
dependency:
name: galaxy
driver:
name: vagrant
platforms:
- name: control1
box: generic/ubuntu2204
memory: 4096
cpus: 4
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
groups:
- k3s_cluster
- master
interfaces:
- network_name: private_network
ip: 192.168.30.62
provisioner:
name: ansible
env:
ANSIBLE_VERBOSITY: 1
playbooks:
converge: ../resources/converge.yml
side_effect: ../resources/reset.yml
verify: ../resources/verify.yml
inventory:
links:
group_vars: ../../inventory/sample/group_vars
scenario:
test_sequence:
- dependency
- cleanup
- destroy
- syntax
- create
- prepare
- converge
# idempotence is not possible with the playbook in its current form.
- verify
# We are repurposing side_effect here to test the reset playbook.
# This is why we do not run it before verify (which tests the cluster),
# but after the verify step.
- side_effect
- cleanup
- destroy

View File

@@ -1,16 +0,0 @@
---
- name: Apply overrides
hosts: all
tasks:
- name: Override host variables
ansible.builtin.set_fact:
# See:
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
calico_iface: eth1
# The test VMs might be a bit slow, so we give them more time to join the cluster:
retry_count: 45
# Make sure that our IP ranges do not collide with those of the other scenarios
apiserver_endpoint: "192.168.30.224"
metal_lb_ip_range: "192.168.30.100-192.168.30.109"

View File

@@ -1,14 +1,16 @@
---
dependency:
name: galaxy
options:
requirements-file: collections/requirements.yml
driver:
name: vagrant
platforms:
- name: control1
box: generic/ubuntu2204
memory: 1024
cpus: 2
memory: 512
cpus: 1
groups:
- k3s_cluster
- master
@@ -22,9 +24,9 @@ platforms:
ssh.password: "vagrant"
- name: control2
box: generic/debian12
memory: 1024
cpus: 2
box: generic/debian11
memory: 512
cpus: 1
groups:
- k3s_cluster
- master
@@ -34,8 +36,8 @@ platforms:
- name: control3
box: generic/rocky9
memory: 1024
cpus: 2
memory: 512
cpus: 1
groups:
- k3s_cluster
- master
@@ -45,8 +47,8 @@ platforms:
- name: node1
box: generic/ubuntu2204
memory: 1024
cpus: 2
memory: 512
cpus: 1
groups:
- k3s_cluster
- node
@@ -61,8 +63,8 @@ platforms:
- name: node2
box: generic/rocky9
memory: 1024
cpus: 2
memory: 512
cpus: 1
groups:
- k3s_cluster
- node
@@ -73,7 +75,7 @@ platforms:
provisioner:
name: ansible
env:
ANSIBLE_VERBOSITY: 1
ANSIBLE_VERBOSITY: 3
playbooks:
converge: ../resources/converge.yml
side_effect: ../resources/reset.yml

View File

@@ -1,13 +1,15 @@
---
dependency:
name: galaxy
options:
requirements-file: collections/requirements.yml
driver:
name: vagrant
platforms:
- name: control1
box: generic/ubuntu2204
memory: 1024
cpus: 2
memory: 512
cpus: 1
groups:
- k3s_cluster
- master
@@ -22,8 +24,8 @@ platforms:
- name: control2
box: generic/ubuntu2204
memory: 1024
cpus: 2
memory: 512
cpus: 1
groups:
- k3s_cluster
- master
@@ -38,8 +40,8 @@ platforms:
- name: node1
box: generic/ubuntu2204
memory: 1024
cpus: 2
memory: 512
cpus: 1
groups:
- k3s_cluster
- node
@@ -54,7 +56,7 @@ platforms:
provisioner:
name: ansible
env:
ANSIBLE_VERBOSITY: 1
ANSIBLE_VERBOSITY: 3
playbooks:
converge: ../resources/converge.yml
side_effect: ../resources/reset.yml

View File

@@ -1,49 +0,0 @@
---
dependency:
name: galaxy
driver:
name: vagrant
platforms:
- name: control1
box: generic/ubuntu2204
memory: 4096
cpus: 4
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
groups:
- k3s_cluster
- master
interfaces:
- network_name: private_network
ip: 192.168.30.62
provisioner:
name: ansible
env:
ANSIBLE_VERBOSITY: 1
playbooks:
converge: ../resources/converge.yml
side_effect: ../resources/reset.yml
verify: ../resources/verify.yml
inventory:
links:
group_vars: ../../inventory/sample/group_vars
scenario:
test_sequence:
- dependency
- cleanup
- destroy
- syntax
- create
- prepare
- converge
# idempotence is not possible with the playbook in its current form.
- verify
# We are repurposing side_effect here to test the reset playbook.
# This is why we do not run it before verify (which tests the cluster),
# but after the verify step.
- side_effect
- cleanup
- destroy

View File

@@ -1,17 +0,0 @@
---
- name: Apply overrides
hosts: all
tasks:
- name: Override host variables
ansible.builtin.set_fact:
# See:
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
flannel_iface: eth1
# The test VMs might be a bit slow, so we give them more time to join the cluster:
retry_count: 45
# Make sure that our IP ranges do not collide with those of the other scenarios
apiserver_endpoint: "192.168.30.225"
# Use kube-vip instead of MetalLB
kube_vip_lb_ip_range: "192.168.30.110-192.168.30.119"

View File

@@ -1,13 +1,15 @@
---
dependency:
name: galaxy
options:
requirements-file: collections/requirements.yml
driver:
name: vagrant
platforms:
- name: control1
box: generic/ubuntu2204
memory: 4096
cpus: 4
memory: 512
cpus: 1
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
@@ -22,7 +24,7 @@ platforms:
provisioner:
name: ansible
env:
ANSIBLE_VERBOSITY: 1
ANSIBLE_VERBOSITY: 3
playbooks:
converge: ../resources/converge.yml
side_effect: ../resources/reset.yml

View File

@@ -1,8 +1,8 @@
---
- name: Create k3s-node.service.d directory
- name: Create k3s.service.d directory
file:
path: '{{ systemd_dir }}/k3s-node.service.d'
path: '{{ systemd_dir }}/k3s.service.d'
state: directory
owner: root
group: root
@@ -12,7 +12,7 @@
- name: Copy K3s http_proxy conf file
template:
src: "http_proxy.conf.j2"
dest: "{{ systemd_dir }}/k3s-node.service.d/http_proxy.conf"
dest: "{{ systemd_dir }}/k3s.service.d/http_proxy.conf"
owner: root
group: root
mode: '0755'

View File

@@ -1,27 +0,0 @@
---
- name: Create manifests directory on first master
file:
path: /var/lib/rancher/k3s/server/manifests
state: directory
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
- name: Download vip cloud provider manifest to first master
ansible.builtin.get_url:
url: "https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/{{ kube_vip_cloud_provider_tag_version | default('main') }}/manifest/kube-vip-cloud-controller.yaml" # noqa yaml[line-length]
dest: "/var/lib/rancher/k3s/server/manifests/kube-vip-cloud-controller.yaml"
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
- name: Copy kubevip configMap manifest to first master
template:
src: "kubevip.yaml.j2"
dest: "/var/lib/rancher/k3s/server/manifests/kubevip.yaml"
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']

View File

@@ -29,12 +29,6 @@
- name: Deploy metallb manifest
include_tasks: metallb.yml
tags: metallb
when: kube_vip_lb_ip_range is not defined
- name: Deploy kube-vip manifest
include_tasks: kube-vip.yml
tags: kubevip
when: kube_vip_lb_ip_range is defined
- name: Init cluster inside the transient k3s-init service
command:

View File

@@ -1,13 +0,0 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: kubevip
namespace: kube-system
data:
{% if kube_vip_lb_ip_range is string %}
{# kube_vip_lb_ip_range was used in the legacy way: single string instead of a list #}
{# => transform to list with single element #}
{% set kube_vip_lb_ip_range = [kube_vip_lb_ip_range] %}
{% endif %}
range-global: {{ kube_vip_lb_ip_range | join(',') }}

View File

@@ -43,7 +43,7 @@ spec:
- name: vip_ddns
value: "false"
- name: svc_enable
value: "{{ 'true' if kube_vip_lb_ip_range is defined else 'false' }}"
value: "false"
- name: vip_leaderelection
value: "true"
- name: vip_leaseduration

View File

@@ -1,114 +0,0 @@
---
- name: Deploy Calico to cluster
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
run_once: true
block:
- name: Create manifests directory on first master
file:
path: /tmp/k3s
state: directory
owner: root
group: root
mode: 0755
- name: "Download to first master: manifest for Tigera Operator and Calico CRDs"
ansible.builtin.get_url:
url: "https://raw.githubusercontent.com/projectcalico/calico/{{ calico_tag }}/manifests/tigera-operator.yaml"
dest: "/tmp/k3s/tigera-operator.yaml"
owner: root
group: root
mode: 0755
- name: Copy Calico custom resources manifest to first master
ansible.builtin.template:
src: "calico.crs.j2"
dest: /tmp/k3s/custom-resources.yaml
owner: root
group: root
mode: 0755
- name: Deploy or replace Tigera Operator
block:
- name: Deploy Tigera Operator
ansible.builtin.command:
cmd: kubectl create -f /tmp/k3s/tigera-operator.yaml
register: create_operator
changed_when: "'created' in create_operator.stdout"
failed_when: "'Error' in create_operator.stderr and 'already exists' not in create_operator.stderr"
rescue:
- name: Replace existing Tigera Operator
ansible.builtin.command:
cmd: kubectl replace -f /tmp/k3s/tigera-operator.yaml
register: replace_operator
changed_when: "'replaced' in replace_operator.stdout"
failed_when: "'Error' in replace_operator.stderr"
- name: Wait for Tigera Operator resources
command: >-
k3s kubectl wait {{ item.type }}/{{ item.name }}
--namespace='tigera-operator'
--for=condition=Available=True
--timeout=7s
register: tigera_result
changed_when: false
until: tigera_result is succeeded
retries: 7
delay: 7
with_items:
- {name: tigera-operator, type: deployment}
loop_control:
label: "{{ item.type }}/{{ item.name }}"
- name: Deploy Calico custom resources
block:
- name: Deploy custom resources for Calico
ansible.builtin.command:
cmd: kubectl create -f /tmp/k3s/custom-resources.yaml
register: create_cr
changed_when: "'created' in create_cr.stdout"
failed_when: "'Error' in create_cr.stderr and 'already exists' not in create_cr.stderr"
rescue:
- name: Apply new Calico custom resource manifest
ansible.builtin.command:
cmd: kubectl apply -f /tmp/k3s/custom-resources.yaml
register: apply_cr
changed_when: "'configured' in apply_cr.stdout or 'created' in apply_cr.stdout"
failed_when: "'Error' in apply_cr.stderr"
- name: Wait for Calico system resources to be available
command: >-
{% if item.type == 'daemonset' %}
k3s kubectl wait pods
--namespace='{{ item.namespace }}'
--selector={{ item.selector }}
--for=condition=Ready
{% else %}
k3s kubectl wait {{ item.type }}/{{ item.name }}
--namespace='{{ item.namespace }}'
--for=condition=Available
{% endif %}
--timeout=7s
register: cr_result
changed_when: false
until: cr_result is succeeded
retries: 30
delay: 7
with_items:
- {name: calico-typha, type: deployment, namespace: calico-system}
- {name: calico-kube-controllers, type: deployment, namespace: calico-system}
- {name: csi-node-driver, type: daemonset, selector: 'k8s-app=csi-node-driver', namespace: calico-system}
- {name: calico-node, type: daemonset, selector: 'k8s-app=calico-node', namespace: calico-system}
- {name: calico-apiserver, type: deployment, namespace: calico-apiserver}
loop_control:
label: "{{ item.type }}/{{ item.name }}"
- name: Patch Felix configuration for eBPF mode
ansible.builtin.command:
cmd: >
kubectl patch felixconfiguration default
--type='merge'
--patch='{"spec": {"bpfKubeProxyIptablesCleanupEnabled": false}}'
register: patch_result
changed_when: "'felixconfiguration.projectcalico.org/default patched' in patch_result.stdout"
failed_when: "'Error' in patch_result.stderr"
when: calico_ebpf

View File

@@ -1,13 +1,7 @@
---
- name: Deploy calico
include_tasks: calico.yml
tags: calico
when: calico_iface is defined
- name: Deploy metallb pool
include_tasks: metallb.yml
tags: metallb
when: kube_vip_lb_ip_range is not defined
- name: Remove tmp directory used for manifests
file:

View File

@@ -8,27 +8,6 @@
with_items: "{{ groups[group_name_master | default('master')] }}"
run_once: true
- name: Delete outdated metallb replicas
shell: |-
set -o pipefail
REPLICAS=$(k3s kubectl --namespace='metallb-system' get replicasets \
-l 'component=controller,app=metallb' \
-o jsonpath='{.items[0].spec.template.spec.containers[0].image}, {.items[0].metadata.name}' 2>/dev/null || true)
REPLICAS_SETS=$(echo ${REPLICAS} | grep -v '{{ metal_lb_controller_tag_version }}' | sed -e "s/^.*\s//g")
if [ -n "${REPLICAS_SETS}" ] ; then
for REPLICAS in "${REPLICAS_SETS}"
do
k3s kubectl --namespace='metallb-system' \
delete rs "${REPLICAS}"
done
fi
args:
executable: /bin/bash
changed_when: false
run_once: true
with_items: "{{ groups[group_name_master | default('master')] }}"
- name: Copy metallb CRs manifest to first master
template:
src: "metallb.crs.j2"

View File

@@ -1,41 +0,0 @@
# This section includes base Calico installation configuration.
# For more information, see: https://docs.tigera.io/calico/latest/reference/installation/api#operator.tigera.io/v1.Installation
apiVersion: operator.tigera.io/v1
kind: Installation
metadata:
name: default
spec:
# Configures Calico networking.
calicoNetwork:
# Note: The ipPools section cannot be modified post-install.
ipPools:
- blockSize: {{ calico_blockSize | default('26') }}
cidr: {{ calico_cidr | default('10.52.0.0/16') }}
encapsulation: {{ calico_encapsulation | default('VXLANCrossSubnet') }}
natOutgoing: {{ calico_natOutgoing | default('Enabled') }}
nodeSelector: {{ calico_nodeSelector | default('all()') }}
nodeAddressAutodetectionV4:
interface: {{ calico_iface }}
linuxDataplane: {{ 'BPF' if calico_ebpf else 'Iptables' }}
---
# This section configures the Calico API server.
# For more information, see: https://docs.tigera.io/calico/latest/reference/installation/api#operator.tigera.io/v1.APIServer
apiVersion: operator.tigera.io/v1
kind: APIServer
metadata:
name: default
spec: {}
{% if calico_ebpf %}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kubernetes-services-endpoint
namespace: tigera-operator
data:
KUBERNETES_SERVICE_HOST: '{{ apiserver_endpoint }}'
KUBERNETES_SERVICE_PORT: '6443'
{% endif %}

View File

@@ -17,27 +17,21 @@
when:
grep_cpuinfo_raspberrypi.rc == 0 or grep_device_tree_model_raspberrypi.rc == 0
- name: Set detected_distribution to Raspbian (ARM64 on Raspbian, Debian Buster/Bullseye/Bookworm)
- name: Set detected_distribution to Raspbian
set_fact:
detected_distribution: Raspbian
vars:
allowed_descriptions:
- "[Rr]aspbian.*"
- "Debian.*buster"
- "Debian.*bullseye"
- "Debian.*bookworm"
when:
- ansible_facts.architecture is search("aarch64")
- raspberry_pi|default(false)
- ansible_facts.lsb.description|default("") is match(allowed_descriptions | join('|'))
when: >
raspberry_pi|default(false) and
( ansible_facts.lsb.id|default("") == "Raspbian" or
ansible_facts.lsb.description|default("") is match("[Rr]aspbian.*") )
- name: Set detected_distribution to Raspbian (ARM64 on Debian Bookworm)
- name: Set detected_distribution to Raspbian (ARM64 on Debian Buster)
set_fact:
detected_distribution: Raspbian
when:
- ansible_facts.architecture is search("aarch64")
- raspberry_pi|default(false)
- ansible_facts.lsb.description|default("") is match("Debian.*bookworm")
- ansible_facts.lsb.description|default("") is match("Debian.*buster")
- name: Set detected_distribution_major_version
set_fact:
@@ -45,6 +39,14 @@
when:
- detected_distribution | default("") == "Raspbian"
- name: Set detected_distribution to Raspbian (ARM64 on Debian Bullseye)
set_fact:
detected_distribution: Raspbian
when:
- ansible_facts.architecture is search("aarch64")
- raspberry_pi|default(false)
- ansible_facts.lsb.description|default("") is match("Debian.*bullseye")
- name: Execute OS related tasks on the Raspberry Pi - {{ action_ }}
include_tasks: "{{ item }}"
with_first_found:

View File

@@ -45,16 +45,13 @@
- /var/lib/rancher/k3s
- /var/lib/rancher/
- /var/lib/cni/
- /etc/cni/net.d
- name: Remove K3s http_proxy files
file:
name: "{{ item }}"
state: absent
with_items:
- "{{ systemd_dir }}/k3s.service.d/http_proxy.conf"
- "{{ systemd_dir }}/k3s.service.d"
- "{{ systemd_dir }}/k3s-node.service.d/http_proxy.conf"
- "{{ systemd_dir }}/k3s-node.service.d"
when: proxy_env is defined

View File

@@ -46,14 +46,3 @@
roles:
- role: k3s_server_post
become: true
- name: Storing kubeconfig in the playbook directory
hosts: master
environment: "{{ proxy_env | default({}) }}"
tasks:
- name: Copying kubeconfig from {{ hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] }}
ansible.builtin.fetch:
src: "{{ ansible_user_dir }}/.kube/config"
dest: ./kubeconfig
flat: true
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']