Compare commits

..

6 Commits

Author SHA1 Message Date
Daniel San Pedro
d325eae726 Add LSB release checks. This is untested atm. 2024-02-26 23:39:07 -05:00
Techno Tim
25d696c762 Merge branch 'master' into raspbian-cgroups-fix 2024-02-26 22:10:04 -06:00
Techno Tim
c057a436a1 Merge branch 'master' into raspbian-cgroups-fix 2024-02-26 21:38:29 -06:00
Daniel SP
0919571744 Merge branch 'master' into raspbian-cgroups-fix 2024-02-24 21:26:40 -05:00
Daniel San Pedro
7db9f5bd9b Add check and variable so that we write to the correct file 2024-02-24 21:25:55 -05:00
Daniel San Pedro
842bfd8de9 Fix cgroups cmdline path 2024-02-24 16:53:51 -05:00
19 changed files with 41 additions and 100 deletions

View File

@@ -9,17 +9,12 @@ set -euo pipefail
GIT_ROOT=$(git rev-parse --show-toplevel) GIT_ROOT=$(git rev-parse --show-toplevel)
PROVIDER=virtualbox PROVIDER=virtualbox
yq --version # Read all boxes for all platforms from the "molecule.yml" files
all_boxes=$(cat "${GIT_ROOT}"/molecule/*/molecule.yml |
# Define the path to the molecule.yml files yq -r '.platforms[].box' | # Read the "box" property of each node under "platforms"
MOLECULE_YML_PATH="${GIT_ROOT}/molecule/*/molecule.yml" grep --invert-match --regexp=--- | # Filter out file separators
sort |
# Extract and sort unique boxes from all molecule.yml files uniq)
all_boxes=$(for file in $MOLECULE_YML_PATH; do
yq eval '.platforms[].box' "$file"
done | sort -u)
echo all_boxes: "$all_boxes"
# Read the boxes that are currently present on the system (for the current provider) # Read the boxes that are currently present on the system (for the current provider)
present_boxes=$( present_boxes=$(

View File

@@ -11,19 +11,19 @@ jobs:
steps: steps:
- name: Check out the codebase - name: Check out the codebase
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # 4.1.7 uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # 4.1.1
with: with:
ref: ${{ github.event.pull_request.head.sha }} ref: ${{ github.event.pull_request.head.sha }}
- name: Set up Python ${{ env.PYTHON_VERSION }} - name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # 5.1.1 uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # 5.0.0
with: with:
python-version: ${{ env.PYTHON_VERSION }} python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies cache: 'pip' # caching pip dependencies
- name: Cache Vagrant boxes - name: Cache Vagrant boxes
id: cache-vagrant id: cache-vagrant
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # 4.0.2 uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # 4.0
with: with:
lookup-only: true #if it exists, we don't need to restore and can skip the next step lookup-only: true #if it exists, we don't need to restore and can skip the next step
path: | path: |

View File

@@ -11,18 +11,18 @@ jobs:
steps: steps:
- name: Check out the codebase - name: Check out the codebase
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # 4.1.7 uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # 4.1.1
with: with:
ref: ${{ github.event.pull_request.head.sha }} ref: ${{ github.event.pull_request.head.sha }}
- name: Set up Python ${{ env.PYTHON_VERSION }} - name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # 5.1.1 uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # 5.0.0
with: with:
python-version: ${{ env.PYTHON_VERSION }} python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies cache: 'pip' # caching pip dependencies
- name: Restore Ansible cache - name: Restore Ansible cache
uses: actions/cache/restore@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # 4.0.2 uses: actions/cache/restore@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # 4.0
with: with:
path: ~/.ansible/collections path: ~/.ansible/collections
key: ansible-${{ hashFiles('collections/requirements.yml') }} key: ansible-${{ hashFiles('collections/requirements.yml') }}
@@ -45,9 +45,9 @@ jobs:
runs-on: self-hosted runs-on: self-hosted
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # 4.1.7 uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # 4.1.1
- name: Ensure SHA pinned actions - name: Ensure SHA pinned actions
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@b88cd0aad2c36a63e42c71f81cb1958fed95ac87 # 3.0.10 uses: zgosalvez/github-actions-ensure-sha-pinned-actions@ba37328d4ea95eaf8b3bd6c6cef308f709a5f2ec # 3.0.3
with: with:
allowlist: | allowlist: |
aws-actions/ aws-actions/

View File

@@ -10,7 +10,7 @@ jobs:
matrix: matrix:
scenario: scenario:
- default - default
# - ipv6 - ipv6
- single_node - single_node
- calico - calico
- cilium - cilium
@@ -21,7 +21,7 @@ jobs:
steps: steps:
- name: Check out the codebase - name: Check out the codebase
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # 4.1.7 uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # 4.1.1
with: with:
ref: ${{ github.event.pull_request.head.sha }} ref: ${{ github.event.pull_request.head.sha }}
@@ -59,13 +59,13 @@ jobs:
EOF EOF
- name: Set up Python ${{ env.PYTHON_VERSION }} - name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # 5.1.1 uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # 5.0.0
with: with:
python-version: ${{ env.PYTHON_VERSION }} python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip' # caching pip dependencies cache: 'pip' # caching pip dependencies
- name: Restore vagrant Boxes cache - name: Restore vagrant Boxes cache
uses: actions/cache/restore@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # 4.0.2 uses: actions/cache/restore@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # 4.0
with: with:
path: ~/.vagrant.d/boxes path: ~/.vagrant.d/boxes
key: vagrant-boxes-${{ hashFiles('**/molecule.yml') }} key: vagrant-boxes-${{ hashFiles('**/molecule.yml') }}
@@ -118,7 +118,7 @@ jobs:
- name: Upload log files - name: Upload log files
if: always() # do this even if a step before has failed if: always() # do this even if a step before has failed
uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # 4.3.4 uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # 4.3.1
with: with:
name: logs name: logs
path: | path: |

View File

@@ -96,22 +96,8 @@ ansible-playbook reset.yml -i inventory/my-cluster/hosts.ini
To copy your `kube config` locally so that you can access your **Kubernetes** cluster run: To copy your `kube config` locally so that you can access your **Kubernetes** cluster run:
```bash ```bash
scp debian@master_ip:/etc/rancher/k3s/k3s.yaml ~/.kube/config scp debian@master_ip:~/.kube/config ~/.kube/config
``` ```
If you get file Permission denied, go into the node and temporarly run:
```bash
sudo chmod 777 /etc/rancher/k3s/k3s.yaml
```
Then copy with the scp command and reset the permissions back to:
```bash
sudo chmod 600 /etc/rancher/k3s/k3s.yaml
```
You'll then want to modify the config to point to master IP by running:
```bash
sudo nano ~/.kube/config
```
Then change `server: https://127.0.0.1:6443` to match your master IP: `server: https://192.168.1.222:6443`
### 🔨 Testing your cluster ### 🔨 Testing your cluster

View File

@@ -1,5 +1,5 @@
--- ---
k3s_version: v1.30.2+k3s2 k3s_version: v1.29.0+k3s1
# this is the user that has ssh access to these machines # this is the user that has ssh access to these machines
ansible_user: ansibleuser ansible_user: ansibleuser
systemd_dir: /etc/systemd/system systemd_dir: /etc/systemd/system
@@ -13,13 +13,13 @@ flannel_iface: "eth0"
# uncomment calico_iface to use tigera operator/calico cni instead of flannel https://docs.tigera.io/calico/latest/about # uncomment calico_iface to use tigera operator/calico cni instead of flannel https://docs.tigera.io/calico/latest/about
# calico_iface: "eth0" # calico_iface: "eth0"
calico_ebpf: false # use eBPF dataplane instead of iptables calico_ebpf: false # use eBPF dataplane instead of iptables
calico_tag: "v3.28.0" # calico version tag calico_tag: "v3.27.0" # calico version tag
# uncomment cilium_iface to use cilium cni instead of flannel or calico # uncomment cilium_iface to use cilium cni instead of flannel or calico
# ensure v4.19.57, v5.1.16, v5.2.0 or more recent kernel # ensure v4.19.57, v5.1.16, v5.2.0 or more recent kernel
# cilium_iface: "eth0" # cilium_iface: "eth0"
cilium_mode: "native" # native when nodes on same subnet or using bgp, else set routed cilium_mode: "native" # native when nodes on same subnet or using bgp, else set routed
cilium_tag: "v1.16.0" # cilium version tag cilium_tag: "v1.14.6" # cilium version tag
cilium_hubble: true # enable hubble observability relay and ui cilium_hubble: true # enable hubble observability relay and ui
# if using calico or cilium, you may specify the cluster pod cidr pool # if using calico or cilium, you may specify the cluster pod cidr pool
@@ -72,7 +72,7 @@ extra_agent_args: >-
{{ extra_args }} {{ extra_args }}
# image tag for kube-vip # image tag for kube-vip
kube_vip_tag_version: "v0.8.2" kube_vip_tag_version: "v0.6.4"
# tag for kube-vip-cloud-provider manifest # tag for kube-vip-cloud-provider manifest
# kube_vip_cloud_provider_tag_version: "main" # kube_vip_cloud_provider_tag_version: "main"
@@ -93,8 +93,8 @@ metal_lb_mode: "layer2"
# metal_lb_bgp_peer_address: "192.168.30.1" # metal_lb_bgp_peer_address: "192.168.30.1"
# image tag for metal lb # image tag for metal lb
metal_lb_speaker_tag_version: "v0.14.8" metal_lb_speaker_tag_version: "v0.13.12"
metal_lb_controller_tag_version: "v0.14.8" metal_lb_controller_tag_version: "v0.13.12"
# metallb ip range for load balancer # metallb ip range for load balancer
metal_lb_ip_range: "192.168.30.80-192.168.30.90" metal_lb_ip_range: "192.168.30.80-192.168.30.90"
@@ -160,10 +160,6 @@ custom_registries_yaml: |
username: yourusername username: yourusername
password: yourpassword password: yourpassword
# On some distros like Diet Pi, there is no dbus installed. dbus required by the default reboot command.
# Uncomment if you need a custom reboot command
# custom_reboot_command: /usr/sbin/shutdown -r now
# Only enable and configure these if you access the internet through a proxy # Only enable and configure these if you access the internet through a proxy
# proxy_env: # proxy_env:
# HTTP_PROXY: "http://proxy.domain.local:3128" # HTTP_PROXY: "http://proxy.domain.local:3128"

View File

@@ -6,5 +6,4 @@
- name: Reboot the nodes (and Wait upto 5 mins max) - name: Reboot the nodes (and Wait upto 5 mins max)
become: true become: true
reboot: reboot:
reboot_command: "{{ custom_reboot_command | default(omit) }}"
reboot_timeout: 300 reboot_timeout: 300

View File

@@ -6,7 +6,7 @@
# #
ansible-compat==4.1.11 ansible-compat==4.1.11
# via molecule # via molecule
ansible-core==2.17.2 ansible-core==2.16.4
# via # via
# -r requirements.in # -r requirements.in
# ansible-compat # ansible-compat
@@ -96,9 +96,9 @@ platformdirs==4.1.0
# via virtualenv # via virtualenv
pluggy==1.3.0 pluggy==1.3.0
# via molecule # via molecule
pre-commit==3.8.0 pre-commit==3.6.2
# via -r requirements.in # via -r requirements.in
pre-commit-hooks==4.6.0 pre-commit-hooks==4.5.0
# via -r requirements.in # via -r requirements.in
pyasn1==0.5.1 pyasn1==0.5.1
# via # via

View File

@@ -12,7 +12,6 @@
- name: Reboot and wait for node to come back up - name: Reboot and wait for node to come back up
become: true become: true
reboot: reboot:
reboot_command: "{{ custom_reboot_command | default(omit) }}"
reboot_timeout: 3600 reboot_timeout: 3600
- name: Revert changes to Proxmox cluster - name: Revert changes to Proxmox cluster

View File

@@ -4,9 +4,6 @@
# will determine the right interface automatically at runtime. # will determine the right interface automatically at runtime.
kube_vip_iface: null kube_vip_iface: null
# Enables ARP broadcasts from Leader
kube_vip_arp: true
# Name of the master group # Name of the master group
group_name_master: master group_name_master: master

View File

@@ -10,7 +10,7 @@
- name: Download vip rbac manifest to first master - name: Download vip rbac manifest to first master
ansible.builtin.get_url: ansible.builtin.get_url:
url: "https://kube-vip.io/manifests/rbac.yaml" url: "https://raw.githubusercontent.com/kube-vip/kube-vip/{{ kube_vip_tag_version }}/docs/manifests/rbac.yaml"
dest: "/var/lib/rancher/k3s/server/manifests/vip-rbac.yaml" dest: "/var/lib/rancher/k3s/server/manifests/vip-rbac.yaml"
owner: root owner: root
group: root group: root

View File

@@ -27,7 +27,7 @@ spec:
- manager - manager
env: env:
- name: vip_arp - name: vip_arp
value: "{{ 'true' if kube_vip_arp | bool else 'false' }}" value: "true"
- name: port - name: port
value: "6443" value: "6443"
{% if kube_vip_iface %} {% if kube_vip_iface %}

View File

@@ -48,7 +48,7 @@
k3s kubectl wait {{ item.type }}/{{ item.name }} k3s kubectl wait {{ item.type }}/{{ item.name }}
--namespace='tigera-operator' --namespace='tigera-operator'
--for=condition=Available=True --for=condition=Available=True
--timeout=30s --timeout=7s
register: tigera_result register: tigera_result
changed_when: false changed_when: false
until: tigera_result is succeeded until: tigera_result is succeeded
@@ -87,7 +87,7 @@
--namespace='{{ item.namespace }}' --namespace='{{ item.namespace }}'
--for=condition=Available --for=condition=Available
{% endif %} {% endif %}
--timeout=30s --timeout=7s
register: cr_result register: cr_result
changed_when: false changed_when: false
until: cr_result is succeeded until: cr_result is succeeded

View File

@@ -185,7 +185,7 @@
--helm-set bpf.loadBalancer.mode={{ bpf_lb_mode | default("hybrid") }} --helm-set bpf.loadBalancer.mode={{ bpf_lb_mode | default("hybrid") }}
{% endif %} {% endif %}
environment: environment:
KUBECONFIG: "{{ ansible_user_dir }}/.kube/config" KUBECONFIG: /home/{{ ansible_user }}/.kube/config
register: cilium_install_result register: cilium_install_result
changed_when: cilium_install_result.rc == 0 changed_when: cilium_install_result.rc == 0
when: cilium_installed.rc != 0 or cilium_needs_update when: cilium_installed.rc != 0 or cilium_needs_update
@@ -202,7 +202,7 @@
--namespace=kube-system --namespace=kube-system
--for=condition=Available --for=condition=Available
{% endif %} {% endif %}
--timeout=30s --timeout=7s
register: cr_result register: cr_result
changed_when: false changed_when: false
until: cr_result is succeeded until: cr_result is succeeded

View File

@@ -83,23 +83,9 @@
loop_control: loop_control:
label: "{{ item.description }}" label: "{{ item.description }}"
- name: Set metallb webhook service name
set_fact:
metallb_webhook_service_name: >-
{{
(
(metal_lb_controller_tag_version | regex_replace('^v', ''))
is
version('0.14.4', '<', version_type='semver')
) | ternary(
'webhook-service',
'metallb-webhook-service'
)
}}
- name: Test metallb-system webhook-service endpoint - name: Test metallb-system webhook-service endpoint
command: >- command: >-
k3s kubectl -n metallb-system get endpoints {{ metallb_webhook_service_name }} k3s kubectl -n metallb-system get endpoints webhook-service
changed_when: false changed_when: false
with_items: "{{ groups[group_name_master | default('master')] }}" with_items: "{{ groups[group_name_master | default('master')] }}"
run_once: true run_once: true

View File

@@ -25,10 +25,5 @@ kind: CiliumLoadBalancerIPPool
metadata: metadata:
name: "01-lb-pool" name: "01-lb-pool"
spec: spec:
blocks: cidrs:
{% if "/" in cilium_bgp_lb_cidr %} - cidr: "{{ cilium_bgp_lb_cidr }}"
- cidr: {{ cilium_bgp_lb_cidr }}
{% else %}
- start: {{ cilium_bgp_lb_cidr.split('-')[0] }}
stop: {{ cilium_bgp_lb_cidr.split('-')[1] }}
{% endif %}

View File

@@ -2,5 +2,4 @@
- name: Reboot server - name: Reboot server
become: true become: true
reboot: reboot:
reboot_command: "{{ custom_reboot_command | default(omit) }}"
listen: reboot server listen: reboot server

View File

@@ -1,5 +1,4 @@
--- ---
- name: Reboot - name: Reboot
reboot: reboot:
reboot_command: "{{ custom_reboot_command | default(omit) }}"
listen: reboot listen: reboot

View File

@@ -1,23 +1,13 @@
--- ---
- name: Test for cmdline path - name: Test for cmdline path
stat: command: grep -E "console=|rootfstype" /boot/cmdline.txt
path: /boot/firmware/cmdline.txt
register: boot_cmdline_path register: boot_cmdline_path
failed_when: false failed_when: false
changed_when: false changed_when: false
- name: Set cmdline path based on Debian version and command result - name: Set cmdline path based on Debian version and boot_cmdline_path result
set_fact: set_fact:
cmdline_path: >- cmdline_path: "{{ (boot_cmdline_path.rc == 0 and ansible_facts['distribution'] == 'Debian' and ansible_facts['distribution_version'] is version('bookworm', '<')) | ternary('/boot/cmdline.txt', '/boot/firmware/cmdline.txt') }}"
{{
(
boot_cmdline_path.stat.exists and
ansible_facts.lsb.description | default('') is match('Debian.*(?!(bookworm|sid))')
) | ternary(
'/boot/firmware/cmdline.txt',
'/boot/cmdline.txt'
)
}}
- name: Activating cgroup support - name: Activating cgroup support
lineinfile: lineinfile: