Compare commits

..

2 Commits

Author SHA1 Message Date
Techno Tim
d8145fb6a6 Merge branch 'master' into dependabot/github_actions/actions/upload-artifact-4.3.1 2024-02-06 14:44:12 -06:00
dependabot[bot]
a0f5eed121 chore(deps): bump actions/upload-artifact from 4.3.0 to 4.3.1
Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4.3.0 to 4.3.1.
- [Release notes](https://github.com/actions/upload-artifact/releases)
- [Commits](26f96dfa69...5d5d22a312)

---
updated-dependencies:
- dependency-name: actions/upload-artifact
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-02-06 02:16:04 +00:00
15 changed files with 25 additions and 76 deletions

View File

@@ -11,7 +11,7 @@ jobs:
steps: steps:
- name: Check out the codebase - name: Check out the codebase
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # 4.1.2 uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # 4.1.1
with: with:
ref: ${{ github.event.pull_request.head.sha }} ref: ${{ github.event.pull_request.head.sha }}
@@ -23,7 +23,7 @@ jobs:
- name: Cache Vagrant boxes - name: Cache Vagrant boxes
id: cache-vagrant id: cache-vagrant
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # 4.0 uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # 4.0
with: with:
lookup-only: true #if it exists, we don't need to restore and can skip the next step lookup-only: true #if it exists, we don't need to restore and can skip the next step
path: | path: |

View File

@@ -11,7 +11,7 @@ jobs:
steps: steps:
- name: Check out the codebase - name: Check out the codebase
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # 4.1.2 uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # 4.1.1
with: with:
ref: ${{ github.event.pull_request.head.sha }} ref: ${{ github.event.pull_request.head.sha }}
@@ -22,7 +22,7 @@ jobs:
cache: 'pip' # caching pip dependencies cache: 'pip' # caching pip dependencies
- name: Restore Ansible cache - name: Restore Ansible cache
uses: actions/cache/restore@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # 4.0 uses: actions/cache/restore@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # 4.0
with: with:
path: ~/.ansible/collections path: ~/.ansible/collections
key: ansible-${{ hashFiles('collections/requirements.yml') }} key: ansible-${{ hashFiles('collections/requirements.yml') }}
@@ -38,14 +38,14 @@ jobs:
echo "::endgroup::" echo "::endgroup::"
- name: Run pre-commit - name: Run pre-commit
uses: pre-commit/action@2c7b3805fd2a0fd8c1884dcaebf91fc102a13ecd # 3.0.1 uses: pre-commit/action@646c83fcd040023954eafda54b4db0192ce70507 # 3.0.0
ensure-pinned-actions: ensure-pinned-actions:
name: Ensure SHA Pinned Actions name: Ensure SHA Pinned Actions
runs-on: self-hosted runs-on: self-hosted
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # 4.1.2 uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # 4.1.1
- name: Ensure SHA pinned actions - name: Ensure SHA pinned actions
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@ba37328d4ea95eaf8b3bd6c6cef308f709a5f2ec # 3.0.3 uses: zgosalvez/github-actions-ensure-sha-pinned-actions@ba37328d4ea95eaf8b3bd6c6cef308f709a5f2ec # 3.0.3
with: with:

View File

@@ -10,7 +10,7 @@ jobs:
matrix: matrix:
scenario: scenario:
- default - default
# - ipv6 - ipv6
- single_node - single_node
- calico - calico
- cilium - cilium
@@ -21,7 +21,7 @@ jobs:
steps: steps:
- name: Check out the codebase - name: Check out the codebase
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # 4.1.2 uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # 4.1.1
with: with:
ref: ${{ github.event.pull_request.head.sha }} ref: ${{ github.event.pull_request.head.sha }}
@@ -65,7 +65,7 @@ jobs:
cache: 'pip' # caching pip dependencies cache: 'pip' # caching pip dependencies
- name: Restore vagrant Boxes cache - name: Restore vagrant Boxes cache
uses: actions/cache/restore@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # 4.0 uses: actions/cache/restore@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # 4.0
with: with:
path: ~/.vagrant.d/boxes path: ~/.vagrant.d/boxes
key: vagrant-boxes-${{ hashFiles('**/molecule.yml') }} key: vagrant-boxes-${{ hashFiles('**/molecule.yml') }}

View File

@@ -96,22 +96,8 @@ ansible-playbook reset.yml -i inventory/my-cluster/hosts.ini
To copy your `kube config` locally so that you can access your **Kubernetes** cluster run: To copy your `kube config` locally so that you can access your **Kubernetes** cluster run:
```bash ```bash
scp debian@master_ip:/etc/rancher/k3s/k3s.yaml ~/.kube/config scp debian@master_ip:~/.kube/config ~/.kube/config
``` ```
If you get file Permission denied, go into the node and temporarly run:
```bash
sudo chmod 777 /etc/rancher/k3s/k3s.yaml
```
Then copy with the scp command and reset the permissions back to:
```bash
sudo chmod 600 /etc/rancher/k3s/k3s.yaml
```
You'll then want to modify the config to point to master IP by running:
```bash
sudo nano ~/.kube/config
```
Then change `server: https://127.0.0.1:6443` to match your master IP: `server: https://192.168.1.222:6443`
### 🔨 Testing your cluster ### 🔨 Testing your cluster

View File

@@ -1,5 +1,5 @@
--- ---
k3s_version: v1.29.2+k3s1 k3s_version: v1.29.0+k3s1
# this is the user that has ssh access to these machines # this is the user that has ssh access to these machines
ansible_user: ansibleuser ansible_user: ansibleuser
systemd_dir: /etc/systemd/system systemd_dir: /etc/systemd/system
@@ -13,13 +13,13 @@ flannel_iface: "eth0"
# uncomment calico_iface to use tigera operator/calico cni instead of flannel https://docs.tigera.io/calico/latest/about # uncomment calico_iface to use tigera operator/calico cni instead of flannel https://docs.tigera.io/calico/latest/about
# calico_iface: "eth0" # calico_iface: "eth0"
calico_ebpf: false # use eBPF dataplane instead of iptables calico_ebpf: false # use eBPF dataplane instead of iptables
calico_tag: "v3.27.2" # calico version tag calico_tag: "v3.27.0" # calico version tag
# uncomment cilium_iface to use cilium cni instead of flannel or calico # uncomment cilium_iface to use cilium cni instead of flannel or calico
# ensure v4.19.57, v5.1.16, v5.2.0 or more recent kernel # ensure v4.19.57, v5.1.16, v5.2.0 or more recent kernel
# cilium_iface: "eth0" # cilium_iface: "eth0"
cilium_mode: "native" # native when nodes on same subnet or using bgp, else set routed cilium_mode: "native" # native when nodes on same subnet or using bgp, else set routed
cilium_tag: "v1.15.2" # cilium version tag cilium_tag: "v1.14.6" # cilium version tag
cilium_hubble: true # enable hubble observability relay and ui cilium_hubble: true # enable hubble observability relay and ui
# if using calico or cilium, you may specify the cluster pod cidr pool # if using calico or cilium, you may specify the cluster pod cidr pool
@@ -72,7 +72,7 @@ extra_agent_args: >-
{{ extra_args }} {{ extra_args }}
# image tag for kube-vip # image tag for kube-vip
kube_vip_tag_version: "v0.7.2" kube_vip_tag_version: "v0.6.4"
# tag for kube-vip-cloud-provider manifest # tag for kube-vip-cloud-provider manifest
# kube_vip_cloud_provider_tag_version: "main" # kube_vip_cloud_provider_tag_version: "main"
@@ -93,8 +93,8 @@ metal_lb_mode: "layer2"
# metal_lb_bgp_peer_address: "192.168.30.1" # metal_lb_bgp_peer_address: "192.168.30.1"
# image tag for metal lb # image tag for metal lb
metal_lb_speaker_tag_version: "v0.14.3" metal_lb_speaker_tag_version: "v0.13.12"
metal_lb_controller_tag_version: "v0.14.3" metal_lb_controller_tag_version: "v0.13.12"
# metallb ip range for load balancer # metallb ip range for load balancer
metal_lb_ip_range: "192.168.30.80-192.168.30.90" metal_lb_ip_range: "192.168.30.80-192.168.30.90"
@@ -160,10 +160,6 @@ custom_registries_yaml: |
username: yourusername username: yourusername
password: yourpassword password: yourpassword
# On some distros like Diet Pi, there is no dbus installed. dbus required by the default reboot command.
# Uncomment if you need a custom reboot command
# custom_reboot_command: /usr/sbin/shutdown -r now
# Only enable and configure these if you access the internet through a proxy # Only enable and configure these if you access the internet through a proxy
# proxy_env: # proxy_env:
# HTTP_PROXY: "http://proxy.domain.local:3128" # HTTP_PROXY: "http://proxy.domain.local:3128"

View File

@@ -6,5 +6,4 @@
- name: Reboot the nodes (and Wait upto 5 mins max) - name: Reboot the nodes (and Wait upto 5 mins max)
become: true become: true
reboot: reboot:
reboot_command: "{{ custom_reboot_command | default(omit) }}"
reboot_timeout: 300 reboot_timeout: 300

View File

@@ -6,7 +6,7 @@
# #
ansible-compat==4.1.11 ansible-compat==4.1.11
# via molecule # via molecule
ansible-core==2.16.4 ansible-core==2.16.3
# via # via
# -r requirements.in # -r requirements.in
# ansible-compat # ansible-compat
@@ -77,7 +77,7 @@ molecule==6.0.3
# via # via
# -r requirements.in # -r requirements.in
# molecule-plugins # molecule-plugins
molecule-plugins[vagrant]==23.5.3 molecule-plugins[vagrant]==23.5.0
# via -r requirements.in # via -r requirements.in
netaddr==0.10.1 netaddr==0.10.1
# via -r requirements.in # via -r requirements.in
@@ -96,7 +96,7 @@ platformdirs==4.1.0
# via virtualenv # via virtualenv
pluggy==1.3.0 pluggy==1.3.0
# via molecule # via molecule
pre-commit==3.6.2 pre-commit==3.6.0
# via -r requirements.in # via -r requirements.in
pre-commit-hooks==4.5.0 pre-commit-hooks==4.5.0
# via -r requirements.in # via -r requirements.in

View File

@@ -12,7 +12,6 @@
- name: Reboot and wait for node to come back up - name: Reboot and wait for node to come back up
become: true become: true
reboot: reboot:
reboot_command: "{{ custom_reboot_command | default(omit) }}"
reboot_timeout: 3600 reboot_timeout: 3600
- name: Revert changes to Proxmox cluster - name: Revert changes to Proxmox cluster

View File

@@ -10,7 +10,7 @@
- name: Download vip rbac manifest to first master - name: Download vip rbac manifest to first master
ansible.builtin.get_url: ansible.builtin.get_url:
url: "https://kube-vip.io/manifests/rbac.yaml" url: "https://raw.githubusercontent.com/kube-vip/kube-vip/{{ kube_vip_tag_version }}/docs/manifests/rbac.yaml"
dest: "/var/lib/rancher/k3s/server/manifests/vip-rbac.yaml" dest: "/var/lib/rancher/k3s/server/manifests/vip-rbac.yaml"
owner: root owner: root
group: root group: root

View File

@@ -48,7 +48,7 @@
k3s kubectl wait {{ item.type }}/{{ item.name }} k3s kubectl wait {{ item.type }}/{{ item.name }}
--namespace='tigera-operator' --namespace='tigera-operator'
--for=condition=Available=True --for=condition=Available=True
--timeout=30s --timeout=7s
register: tigera_result register: tigera_result
changed_when: false changed_when: false
until: tigera_result is succeeded until: tigera_result is succeeded
@@ -87,7 +87,7 @@
--namespace='{{ item.namespace }}' --namespace='{{ item.namespace }}'
--for=condition=Available --for=condition=Available
{% endif %} {% endif %}
--timeout=30s --timeout=7s
register: cr_result register: cr_result
changed_when: false changed_when: false
until: cr_result is succeeded until: cr_result is succeeded

View File

@@ -185,7 +185,7 @@
--helm-set bpf.loadBalancer.mode={{ bpf_lb_mode | default("hybrid") }} --helm-set bpf.loadBalancer.mode={{ bpf_lb_mode | default("hybrid") }}
{% endif %} {% endif %}
environment: environment:
KUBECONFIG: "{{ ansible_user_dir }}/.kube/config" KUBECONFIG: /home/{{ ansible_user }}/.kube/config
register: cilium_install_result register: cilium_install_result
changed_when: cilium_install_result.rc == 0 changed_when: cilium_install_result.rc == 0
when: cilium_installed.rc != 0 or cilium_needs_update when: cilium_installed.rc != 0 or cilium_needs_update
@@ -202,7 +202,7 @@
--namespace=kube-system --namespace=kube-system
--for=condition=Available --for=condition=Available
{% endif %} {% endif %}
--timeout=30s --timeout=7s
register: cr_result register: cr_result
changed_when: false changed_when: false
until: cr_result is succeeded until: cr_result is succeeded

View File

@@ -2,5 +2,4 @@
- name: Reboot server - name: Reboot server
become: true become: true
reboot: reboot:
reboot_command: "{{ custom_reboot_command | default(omit) }}"
listen: reboot server listen: reboot server

View File

@@ -1,5 +1,4 @@
--- ---
- name: Reboot - name: Reboot
reboot: reboot:
reboot_command: "{{ custom_reboot_command | default(omit) }}"
listen: reboot listen: reboot

View File

@@ -1,27 +1,7 @@
--- ---
- name: Test for cmdline path
stat:
path: /boot/firmware/cmdline.txt
register: boot_cmdline_path
failed_when: false
changed_when: false
- name: Set cmdline path based on Debian version and command result
set_fact:
cmdline_path: >-
{{
(
boot_cmdline_path.stat.exists and
ansible_facts.lsb.description | default('') is match('Debian.*(?!(bookworm|sid))')
) | ternary(
'/boot/firmware/cmdline.txt',
'/boot/cmdline.txt'
)
}}
- name: Activating cgroup support - name: Activating cgroup support
lineinfile: lineinfile:
path: "{{ cmdline_path }}" path: /boot/cmdline.txt
regexp: '^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$' regexp: '^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$'
line: '\1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory' line: '\1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory'
backrefs: true backrefs: true

View File

@@ -1,13 +1,4 @@
--- ---
- name: Pre tasks
hosts: all
pre_tasks:
- name: Verify Ansible is version 2.11 or above. (If this fails you may need to update Ansible)
assert:
that: "ansible_version.full is version_compare('2.11', '>=')"
msg: >
"Ansible is out of date. See here for more info: https://docs.technotim.live/posts/ansible-automation/"
- name: Prepare Proxmox cluster - name: Prepare Proxmox cluster
hosts: proxmox hosts: proxmox
gather_facts: true gather_facts: true