mirror of
https://github.com/techno-tim/k3s-ansible.git
synced 2025-12-28 19:52:40 +01:00
Compare commits
2 Commits
v1.30.2+k3
...
f1475136c3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f1475136c3 | ||
|
|
0b94640930 |
17
.github/download-boxes.sh
vendored
17
.github/download-boxes.sh
vendored
@@ -9,17 +9,12 @@ set -euo pipefail
|
|||||||
GIT_ROOT=$(git rev-parse --show-toplevel)
|
GIT_ROOT=$(git rev-parse --show-toplevel)
|
||||||
PROVIDER=virtualbox
|
PROVIDER=virtualbox
|
||||||
|
|
||||||
yq --version
|
# Read all boxes for all platforms from the "molecule.yml" files
|
||||||
|
all_boxes=$(cat "${GIT_ROOT}"/molecule/*/molecule.yml |
|
||||||
# Define the path to the molecule.yml files
|
yq -r '.platforms[].box' | # Read the "box" property of each node under "platforms"
|
||||||
MOLECULE_YML_PATH="${GIT_ROOT}/molecule/*/molecule.yml"
|
grep --invert-match --regexp=--- | # Filter out file separators
|
||||||
|
sort |
|
||||||
# Extract and sort unique boxes from all molecule.yml files
|
uniq)
|
||||||
all_boxes=$(for file in $MOLECULE_YML_PATH; do
|
|
||||||
yq eval '.platforms[].box' "$file"
|
|
||||||
done | sort -u)
|
|
||||||
|
|
||||||
echo all_boxes: "$all_boxes"
|
|
||||||
|
|
||||||
# Read the boxes that are currently present on the system (for the current provider)
|
# Read the boxes that are currently present on the system (for the current provider)
|
||||||
present_boxes=$(
|
present_boxes=$(
|
||||||
|
|||||||
6
.github/workflows/cache.yml
vendored
6
.github/workflows/cache.yml
vendored
@@ -11,19 +11,19 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Check out the codebase
|
- name: Check out the codebase
|
||||||
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # 4.1.7
|
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # 4.1.1
|
||||||
with:
|
with:
|
||||||
ref: ${{ github.event.pull_request.head.sha }}
|
ref: ${{ github.event.pull_request.head.sha }}
|
||||||
|
|
||||||
- name: Set up Python ${{ env.PYTHON_VERSION }}
|
- name: Set up Python ${{ env.PYTHON_VERSION }}
|
||||||
uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # 5.1.1
|
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # 5.0.0
|
||||||
with:
|
with:
|
||||||
python-version: ${{ env.PYTHON_VERSION }}
|
python-version: ${{ env.PYTHON_VERSION }}
|
||||||
cache: 'pip' # caching pip dependencies
|
cache: 'pip' # caching pip dependencies
|
||||||
|
|
||||||
- name: Cache Vagrant boxes
|
- name: Cache Vagrant boxes
|
||||||
id: cache-vagrant
|
id: cache-vagrant
|
||||||
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # 4.0.2
|
uses: actions/cache@ab5e6d0c87105b4c9c2047343972218f562e4319 # 4.0
|
||||||
with:
|
with:
|
||||||
lookup-only: true #if it exists, we don't need to restore and can skip the next step
|
lookup-only: true #if it exists, we don't need to restore and can skip the next step
|
||||||
path: |
|
path: |
|
||||||
|
|||||||
10
.github/workflows/lint.yml
vendored
10
.github/workflows/lint.yml
vendored
@@ -11,18 +11,18 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Check out the codebase
|
- name: Check out the codebase
|
||||||
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # 4.1.7
|
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # 4.1.1
|
||||||
with:
|
with:
|
||||||
ref: ${{ github.event.pull_request.head.sha }}
|
ref: ${{ github.event.pull_request.head.sha }}
|
||||||
|
|
||||||
- name: Set up Python ${{ env.PYTHON_VERSION }}
|
- name: Set up Python ${{ env.PYTHON_VERSION }}
|
||||||
uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # 5.1.1
|
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # 5.0.0
|
||||||
with:
|
with:
|
||||||
python-version: ${{ env.PYTHON_VERSION }}
|
python-version: ${{ env.PYTHON_VERSION }}
|
||||||
cache: 'pip' # caching pip dependencies
|
cache: 'pip' # caching pip dependencies
|
||||||
|
|
||||||
- name: Restore Ansible cache
|
- name: Restore Ansible cache
|
||||||
uses: actions/cache/restore@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # 4.0.2
|
uses: actions/cache/restore@ab5e6d0c87105b4c9c2047343972218f562e4319 # 4.0
|
||||||
with:
|
with:
|
||||||
path: ~/.ansible/collections
|
path: ~/.ansible/collections
|
||||||
key: ansible-${{ hashFiles('collections/requirements.yml') }}
|
key: ansible-${{ hashFiles('collections/requirements.yml') }}
|
||||||
@@ -45,9 +45,9 @@ jobs:
|
|||||||
runs-on: self-hosted
|
runs-on: self-hosted
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # 4.1.7
|
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # 4.1.1
|
||||||
- name: Ensure SHA pinned actions
|
- name: Ensure SHA pinned actions
|
||||||
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@b88cd0aad2c36a63e42c71f81cb1958fed95ac87 # 3.0.10
|
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@ba37328d4ea95eaf8b3bd6c6cef308f709a5f2ec # 3.0.3
|
||||||
with:
|
with:
|
||||||
allowlist: |
|
allowlist: |
|
||||||
aws-actions/
|
aws-actions/
|
||||||
|
|||||||
10
.github/workflows/test.yml
vendored
10
.github/workflows/test.yml
vendored
@@ -10,7 +10,7 @@ jobs:
|
|||||||
matrix:
|
matrix:
|
||||||
scenario:
|
scenario:
|
||||||
- default
|
- default
|
||||||
# - ipv6
|
- ipv6
|
||||||
- single_node
|
- single_node
|
||||||
- calico
|
- calico
|
||||||
- cilium
|
- cilium
|
||||||
@@ -21,7 +21,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Check out the codebase
|
- name: Check out the codebase
|
||||||
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # 4.1.7
|
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # 4.1.1
|
||||||
with:
|
with:
|
||||||
ref: ${{ github.event.pull_request.head.sha }}
|
ref: ${{ github.event.pull_request.head.sha }}
|
||||||
|
|
||||||
@@ -59,13 +59,13 @@ jobs:
|
|||||||
EOF
|
EOF
|
||||||
|
|
||||||
- name: Set up Python ${{ env.PYTHON_VERSION }}
|
- name: Set up Python ${{ env.PYTHON_VERSION }}
|
||||||
uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # 5.1.1
|
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # 5.0.0
|
||||||
with:
|
with:
|
||||||
python-version: ${{ env.PYTHON_VERSION }}
|
python-version: ${{ env.PYTHON_VERSION }}
|
||||||
cache: 'pip' # caching pip dependencies
|
cache: 'pip' # caching pip dependencies
|
||||||
|
|
||||||
- name: Restore vagrant Boxes cache
|
- name: Restore vagrant Boxes cache
|
||||||
uses: actions/cache/restore@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # 4.0.2
|
uses: actions/cache/restore@ab5e6d0c87105b4c9c2047343972218f562e4319 # 4.0
|
||||||
with:
|
with:
|
||||||
path: ~/.vagrant.d/boxes
|
path: ~/.vagrant.d/boxes
|
||||||
key: vagrant-boxes-${{ hashFiles('**/molecule.yml') }}
|
key: vagrant-boxes-${{ hashFiles('**/molecule.yml') }}
|
||||||
@@ -118,7 +118,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Upload log files
|
- name: Upload log files
|
||||||
if: always() # do this even if a step before has failed
|
if: always() # do this even if a step before has failed
|
||||||
uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # 4.3.4
|
uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # 4.3.1
|
||||||
with:
|
with:
|
||||||
name: logs
|
name: logs
|
||||||
path: |
|
path: |
|
||||||
|
|||||||
16
README.md
16
README.md
@@ -96,22 +96,8 @@ ansible-playbook reset.yml -i inventory/my-cluster/hosts.ini
|
|||||||
To copy your `kube config` locally so that you can access your **Kubernetes** cluster run:
|
To copy your `kube config` locally so that you can access your **Kubernetes** cluster run:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
scp debian@master_ip:/etc/rancher/k3s/k3s.yaml ~/.kube/config
|
scp debian@master_ip:~/.kube/config ~/.kube/config
|
||||||
```
|
```
|
||||||
If you get file Permission denied, go into the node and temporarly run:
|
|
||||||
```bash
|
|
||||||
sudo chmod 777 /etc/rancher/k3s/k3s.yaml
|
|
||||||
```
|
|
||||||
Then copy with the scp command and reset the permissions back to:
|
|
||||||
```bash
|
|
||||||
sudo chmod 600 /etc/rancher/k3s/k3s.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
You'll then want to modify the config to point to master IP by running:
|
|
||||||
```bash
|
|
||||||
sudo nano ~/.kube/config
|
|
||||||
```
|
|
||||||
Then change `server: https://127.0.0.1:6443` to match your master IP: `server: https://192.168.1.222:6443`
|
|
||||||
|
|
||||||
### 🔨 Testing your cluster
|
### 🔨 Testing your cluster
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
---
|
---
|
||||||
k3s_version: v1.30.2+k3s2
|
k3s_version: v1.29.0+k3s1
|
||||||
# this is the user that has ssh access to these machines
|
# this is the user that has ssh access to these machines
|
||||||
ansible_user: ansibleuser
|
ansible_user: ansibleuser
|
||||||
systemd_dir: /etc/systemd/system
|
systemd_dir: /etc/systemd/system
|
||||||
@@ -13,13 +13,13 @@ flannel_iface: "eth0"
|
|||||||
# uncomment calico_iface to use tigera operator/calico cni instead of flannel https://docs.tigera.io/calico/latest/about
|
# uncomment calico_iface to use tigera operator/calico cni instead of flannel https://docs.tigera.io/calico/latest/about
|
||||||
# calico_iface: "eth0"
|
# calico_iface: "eth0"
|
||||||
calico_ebpf: false # use eBPF dataplane instead of iptables
|
calico_ebpf: false # use eBPF dataplane instead of iptables
|
||||||
calico_tag: "v3.28.0" # calico version tag
|
calico_tag: "v3.27.2" # calico version tag
|
||||||
|
|
||||||
# uncomment cilium_iface to use cilium cni instead of flannel or calico
|
# uncomment cilium_iface to use cilium cni instead of flannel or calico
|
||||||
# ensure v4.19.57, v5.1.16, v5.2.0 or more recent kernel
|
# ensure v4.19.57, v5.1.16, v5.2.0 or more recent kernel
|
||||||
# cilium_iface: "eth0"
|
# cilium_iface: "eth0"
|
||||||
cilium_mode: "native" # native when nodes on same subnet or using bgp, else set routed
|
cilium_mode: "native" # native when nodes on same subnet or using bgp, else set routed
|
||||||
cilium_tag: "v1.16.0" # cilium version tag
|
cilium_tag: "v1.15.1" # cilium version tag
|
||||||
cilium_hubble: true # enable hubble observability relay and ui
|
cilium_hubble: true # enable hubble observability relay and ui
|
||||||
|
|
||||||
# if using calico or cilium, you may specify the cluster pod cidr pool
|
# if using calico or cilium, you may specify the cluster pod cidr pool
|
||||||
@@ -72,7 +72,7 @@ extra_agent_args: >-
|
|||||||
{{ extra_args }}
|
{{ extra_args }}
|
||||||
|
|
||||||
# image tag for kube-vip
|
# image tag for kube-vip
|
||||||
kube_vip_tag_version: "v0.8.2"
|
kube_vip_tag_version: "v0.7.1"
|
||||||
|
|
||||||
# tag for kube-vip-cloud-provider manifest
|
# tag for kube-vip-cloud-provider manifest
|
||||||
# kube_vip_cloud_provider_tag_version: "main"
|
# kube_vip_cloud_provider_tag_version: "main"
|
||||||
@@ -93,8 +93,8 @@ metal_lb_mode: "layer2"
|
|||||||
# metal_lb_bgp_peer_address: "192.168.30.1"
|
# metal_lb_bgp_peer_address: "192.168.30.1"
|
||||||
|
|
||||||
# image tag for metal lb
|
# image tag for metal lb
|
||||||
metal_lb_speaker_tag_version: "v0.14.8"
|
metal_lb_speaker_tag_version: "v0.14.3"
|
||||||
metal_lb_controller_tag_version: "v0.14.8"
|
metal_lb_controller_tag_version: "v0.14.3"
|
||||||
|
|
||||||
# metallb ip range for load balancer
|
# metallb ip range for load balancer
|
||||||
metal_lb_ip_range: "192.168.30.80-192.168.30.90"
|
metal_lb_ip_range: "192.168.30.80-192.168.30.90"
|
||||||
|
|||||||
@@ -6,7 +6,7 @@
|
|||||||
#
|
#
|
||||||
ansible-compat==4.1.11
|
ansible-compat==4.1.11
|
||||||
# via molecule
|
# via molecule
|
||||||
ansible-core==2.17.2
|
ansible-core==2.16.4
|
||||||
# via
|
# via
|
||||||
# -r requirements.in
|
# -r requirements.in
|
||||||
# ansible-compat
|
# ansible-compat
|
||||||
@@ -96,9 +96,9 @@ platformdirs==4.1.0
|
|||||||
# via virtualenv
|
# via virtualenv
|
||||||
pluggy==1.3.0
|
pluggy==1.3.0
|
||||||
# via molecule
|
# via molecule
|
||||||
pre-commit==3.8.0
|
pre-commit==3.6.2
|
||||||
# via -r requirements.in
|
# via -r requirements.in
|
||||||
pre-commit-hooks==4.6.0
|
pre-commit-hooks==4.5.0
|
||||||
# via -r requirements.in
|
# via -r requirements.in
|
||||||
pyasn1==0.5.1
|
pyasn1==0.5.1
|
||||||
# via
|
# via
|
||||||
|
|||||||
@@ -4,9 +4,6 @@
|
|||||||
# will determine the right interface automatically at runtime.
|
# will determine the right interface automatically at runtime.
|
||||||
kube_vip_iface: null
|
kube_vip_iface: null
|
||||||
|
|
||||||
# Enables ARP broadcasts from Leader
|
|
||||||
kube_vip_arp: true
|
|
||||||
|
|
||||||
# Name of the master group
|
# Name of the master group
|
||||||
group_name_master: master
|
group_name_master: master
|
||||||
|
|
||||||
|
|||||||
@@ -27,7 +27,7 @@ spec:
|
|||||||
- manager
|
- manager
|
||||||
env:
|
env:
|
||||||
- name: vip_arp
|
- name: vip_arp
|
||||||
value: "{{ 'true' if kube_vip_arp | bool else 'false' }}"
|
value: "true"
|
||||||
- name: port
|
- name: port
|
||||||
value: "6443"
|
value: "6443"
|
||||||
{% if kube_vip_iface %}
|
{% if kube_vip_iface %}
|
||||||
|
|||||||
@@ -48,7 +48,7 @@
|
|||||||
k3s kubectl wait {{ item.type }}/{{ item.name }}
|
k3s kubectl wait {{ item.type }}/{{ item.name }}
|
||||||
--namespace='tigera-operator'
|
--namespace='tigera-operator'
|
||||||
--for=condition=Available=True
|
--for=condition=Available=True
|
||||||
--timeout=30s
|
--timeout=7s
|
||||||
register: tigera_result
|
register: tigera_result
|
||||||
changed_when: false
|
changed_when: false
|
||||||
until: tigera_result is succeeded
|
until: tigera_result is succeeded
|
||||||
@@ -87,7 +87,7 @@
|
|||||||
--namespace='{{ item.namespace }}'
|
--namespace='{{ item.namespace }}'
|
||||||
--for=condition=Available
|
--for=condition=Available
|
||||||
{% endif %}
|
{% endif %}
|
||||||
--timeout=30s
|
--timeout=7s
|
||||||
register: cr_result
|
register: cr_result
|
||||||
changed_when: false
|
changed_when: false
|
||||||
until: cr_result is succeeded
|
until: cr_result is succeeded
|
||||||
|
|||||||
@@ -202,7 +202,7 @@
|
|||||||
--namespace=kube-system
|
--namespace=kube-system
|
||||||
--for=condition=Available
|
--for=condition=Available
|
||||||
{% endif %}
|
{% endif %}
|
||||||
--timeout=30s
|
--timeout=7s
|
||||||
register: cr_result
|
register: cr_result
|
||||||
changed_when: false
|
changed_when: false
|
||||||
until: cr_result is succeeded
|
until: cr_result is succeeded
|
||||||
|
|||||||
@@ -83,23 +83,9 @@
|
|||||||
loop_control:
|
loop_control:
|
||||||
label: "{{ item.description }}"
|
label: "{{ item.description }}"
|
||||||
|
|
||||||
- name: Set metallb webhook service name
|
|
||||||
set_fact:
|
|
||||||
metallb_webhook_service_name: >-
|
|
||||||
{{
|
|
||||||
(
|
|
||||||
(metal_lb_controller_tag_version | regex_replace('^v', ''))
|
|
||||||
is
|
|
||||||
version('0.14.4', '<', version_type='semver')
|
|
||||||
) | ternary(
|
|
||||||
'webhook-service',
|
|
||||||
'metallb-webhook-service'
|
|
||||||
)
|
|
||||||
}}
|
|
||||||
|
|
||||||
- name: Test metallb-system webhook-service endpoint
|
- name: Test metallb-system webhook-service endpoint
|
||||||
command: >-
|
command: >-
|
||||||
k3s kubectl -n metallb-system get endpoints {{ metallb_webhook_service_name }}
|
k3s kubectl -n metallb-system get endpoints webhook-service
|
||||||
changed_when: false
|
changed_when: false
|
||||||
with_items: "{{ groups[group_name_master | default('master')] }}"
|
with_items: "{{ groups[group_name_master | default('master')] }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|||||||
@@ -25,10 +25,5 @@ kind: CiliumLoadBalancerIPPool
|
|||||||
metadata:
|
metadata:
|
||||||
name: "01-lb-pool"
|
name: "01-lb-pool"
|
||||||
spec:
|
spec:
|
||||||
blocks:
|
cidrs:
|
||||||
{% if "/" in cilium_bgp_lb_cidr %}
|
- cidr: "{{ cilium_bgp_lb_cidr }}"
|
||||||
- cidr: {{ cilium_bgp_lb_cidr }}
|
|
||||||
{% else %}
|
|
||||||
- start: {{ cilium_bgp_lb_cidr.split('-')[0] }}
|
|
||||||
stop: {{ cilium_bgp_lb_cidr.split('-')[1] }}
|
|
||||||
{% endif %}
|
|
||||||
|
|||||||
Reference in New Issue
Block a user