mirror of
https://github.com/techno-tim/k3s-ansible.git
synced 2025-12-25 18:23:05 +01:00
Compare commits
22 Commits
v1.26.12+k
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
| f1277d4e8d | |||
|
|
bcd37a6904 | ||
|
|
8dd3ffc825 | ||
|
|
f6ba208b5c | ||
|
|
a22d8f7aaf | ||
|
|
05fb6b566d | ||
|
|
3aeb7d69ea | ||
|
|
61bf3971ef | ||
|
|
3f06a11c8d | ||
|
|
3888a29bb1 | ||
|
|
98ef696f31 | ||
|
|
de26a79a4c | ||
|
|
ab7ca9b551 | ||
|
|
c5f71c9e2e | ||
|
|
0f23e7e258 | ||
|
|
121061d875 | ||
|
|
db53f595fd | ||
|
|
7b6b24ce4d | ||
|
|
a5728da35e | ||
|
|
cda7c92203 | ||
|
|
d910b83bf3 | ||
|
|
101313f880 |
8
.github/ISSUE_TEMPLATE.md
vendored
8
.github/ISSUE_TEMPLATE.md
vendored
@@ -37,6 +37,11 @@ systemd_dir: ""
|
||||
|
||||
flannel_iface: ""
|
||||
|
||||
#calico_iface: ""
|
||||
calico_ebpf: ""
|
||||
calico_cidr: ""
|
||||
calico_tag: ""
|
||||
|
||||
apiserver_endpoint: ""
|
||||
|
||||
k3s_token: "NA"
|
||||
@@ -46,6 +51,9 @@ extra_agent_args: ""
|
||||
|
||||
kube_vip_tag_version: ""
|
||||
|
||||
kube_vip_cloud_provider_tag_version: ""
|
||||
kube_vip_lb_ip_range: ""
|
||||
|
||||
metal_lb_speaker_tag_version: ""
|
||||
metal_lb_controller_tag_version: ""
|
||||
|
||||
|
||||
15
.github/dependabot.yml
vendored
15
.github/dependabot.yml
vendored
@@ -9,3 +9,18 @@ updates:
|
||||
ignore:
|
||||
- dependency-name: "*"
|
||||
update-types: ["version-update:semver-major"]
|
||||
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
rebase-strategy: "auto"
|
||||
|
||||
- package-ecosystem: "docker"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
rebase-strategy: "auto"
|
||||
ignore:
|
||||
- dependency-name: "*"
|
||||
update-types: ["version-update:semver-major"]
|
||||
|
||||
4
.github/workflows/cache.yml
vendored
4
.github/workflows/cache.yml
vendored
@@ -11,12 +11,12 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Check out the codebase
|
||||
uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v3 2.5.0
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v3 4.1.1
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
|
||||
- name: Set up Python ${{ env.PYTHON_VERSION }}
|
||||
uses: actions/setup-python@75f3110429a8c05be0e1bf360334e4cced2b63fa # 2.3.3
|
||||
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # 5.0.0
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
cache: 'pip' # caching pip dependencies
|
||||
|
||||
17
.github/workflows/ci.yml
vendored
17
.github/workflows/ci.yml
vendored
@@ -2,11 +2,20 @@
|
||||
name: "CI"
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
types:
|
||||
- opened
|
||||
- synchronize
|
||||
paths-ignore:
|
||||
- '**/README.md'
|
||||
- '**/.gitignore'
|
||||
- '**/FUNDING.yml'
|
||||
- '**/host.ini'
|
||||
- '**/*.md'
|
||||
- '**/.editorconfig'
|
||||
- '**/ansible.example.cfg'
|
||||
- '**/deploy.sh'
|
||||
- '**/LICENSE'
|
||||
- '**/reboot.sh'
|
||||
- '**/reset.sh'
|
||||
jobs:
|
||||
pre:
|
||||
uses: ./.github/workflows/cache.yml
|
||||
|
||||
8
.github/workflows/lint.yml
vendored
8
.github/workflows/lint.yml
vendored
@@ -11,12 +11,12 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Check out the codebase
|
||||
uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v3 2.5.0
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v3 4.1.1
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
|
||||
- name: Set up Python ${{ env.PYTHON_VERSION }}
|
||||
uses: actions/setup-python@75f3110429a8c05be0e1bf360334e4cced2b63fa # 2.3.3
|
||||
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # 5.0.0
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
cache: 'pip' # caching pip dependencies
|
||||
@@ -45,9 +45,9 @@ jobs:
|
||||
runs-on: self-hosted
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v3 2.5.0
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v3 4.1.1
|
||||
- name: Ensure SHA pinned actions
|
||||
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@af2eb3226618e2494e3d9084f515ad6dcf16e229 # 2.0.1
|
||||
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@ba37328d4ea95eaf8b3bd6c6cef308f709a5f2ec # 3.0.3
|
||||
with:
|
||||
allowlist: |
|
||||
aws-actions/
|
||||
|
||||
63
.github/workflows/test.yml
vendored
63
.github/workflows/test.yml
vendored
@@ -12,16 +12,43 @@ jobs:
|
||||
- default
|
||||
- ipv6
|
||||
- single_node
|
||||
- calico
|
||||
- kube-vip
|
||||
fail-fast: false
|
||||
env:
|
||||
PYTHON_VERSION: "3.11"
|
||||
|
||||
steps:
|
||||
- name: Check out the codebase
|
||||
uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v3 2.5.0
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v3 4.1.1
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
|
||||
# these steps are necessary if not using ephemeral nodes
|
||||
- name: Delete old Vagrant box versions
|
||||
if: always() # do this even if a step before has failed
|
||||
run: vagrant box prune --force
|
||||
|
||||
- name: Remove all local Vagrant boxes
|
||||
if: always() # do this even if a step before has failed
|
||||
run: if vagrant box list 2>/dev/null; then vagrant box list | cut -f 1 -d ' ' | xargs -L 1 vagrant box remove -f 2>/dev/null && echo "All Vagrant boxes removed." || echo "No Vagrant boxes found."; else echo "No Vagrant boxes found."; fi
|
||||
|
||||
- name: Remove all Virtualbox VMs
|
||||
if: always() # do this even if a step before has failed
|
||||
run: VBoxManage list vms | awk -F'"' '{print $2}' | xargs -I {} VBoxManage unregistervm --delete "{}"
|
||||
|
||||
- name: Remove all Virtualbox HDs
|
||||
if: always() # do this even if a step before has failed
|
||||
run: VBoxManage list hdds | awk -F':' '/^UUID:/ {print $2}' | xargs -I {} VBoxManage closemedium disk "{}" --delete
|
||||
|
||||
- name: Remove all Virtualbox Networks
|
||||
if: always() # do this even if a step before has failed
|
||||
run: VBoxManage list hostonlyifs | grep '^Name:' | awk '{print $2}' | grep '^vboxnet' | xargs -I {} VBoxManage hostonlyif remove {}
|
||||
|
||||
- name: Remove Virtualbox network config
|
||||
if: always() # do this even if a step before has failed
|
||||
run: sudo rm /etc/vbox/networks.conf || true
|
||||
|
||||
- name: Configure VirtualBox
|
||||
run: |-
|
||||
sudo mkdir -p /etc/vbox
|
||||
@@ -31,7 +58,7 @@ jobs:
|
||||
EOF
|
||||
|
||||
- name: Set up Python ${{ env.PYTHON_VERSION }}
|
||||
uses: actions/setup-python@75f3110429a8c05be0e1bf360334e4cced2b63fa # 2.3.3
|
||||
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # 5.0.0
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
cache: 'pip' # caching pip dependencies
|
||||
@@ -63,14 +90,36 @@ jobs:
|
||||
PY_COLORS: 1
|
||||
ANSIBLE_FORCE_COLOR: 1
|
||||
|
||||
# these steps are necessary if not using ephemeral nodes
|
||||
- name: Delete old Vagrant box versions
|
||||
if: always() # do this even if a step before has failed
|
||||
run: vagrant box prune --force
|
||||
|
||||
- name: Remove all local Vagrant boxes
|
||||
if: always() # do this even if a step before has failed
|
||||
run: if vagrant box list 2>/dev/null; then vagrant box list | cut -f 1 -d ' ' | xargs -L 1 vagrant box remove -f 2>/dev/null && echo "All Vagrant boxes removed." || echo "No Vagrant boxes found."; else echo "No Vagrant boxes found."; fi
|
||||
|
||||
- name: Remove all Virtualbox VMs
|
||||
if: always() # do this even if a step before has failed
|
||||
run: VBoxManage list vms | awk -F'"' '{print $2}' | xargs -I {} VBoxManage unregistervm --delete "{}"
|
||||
|
||||
- name: Remove all Virtualbox HDs
|
||||
if: always() # do this even if a step before has failed
|
||||
run: VBoxManage list hdds | awk -F':' '/^UUID:/ {print $2}' | xargs -I {} VBoxManage closemedium disk "{}" --delete
|
||||
|
||||
- name: Remove all Virtualbox Networks
|
||||
if: always() # do this even if a step before has failed
|
||||
run: VBoxManage list hostonlyifs | grep '^Name:' | awk '{print $2}' | grep '^vboxnet' | xargs -I {} VBoxManage hostonlyif remove {}
|
||||
|
||||
- name: Remove Virtualbox network config
|
||||
if: always() # do this even if a step before has failed
|
||||
run: sudo rm /etc/vbox/networks.conf || true
|
||||
|
||||
- name: Upload log files
|
||||
if: always() # do this even if a step before has failed
|
||||
uses: actions/upload-artifact@83fd05a356d7e2593de66fc9913b3002723633cb # 3.1.1
|
||||
uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 # 4.3.0
|
||||
with:
|
||||
name: logs
|
||||
path: |
|
||||
${{ runner.temp }}/logs
|
||||
|
||||
- name: Delete old box versions
|
||||
if: always() # do this even if a step before has failed
|
||||
run: vagrant box prune --force
|
||||
overwrite: true
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1,3 +1,4 @@
|
||||
.env/
|
||||
*.log
|
||||
ansible.cfg
|
||||
kubeconfig
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
k3s_version: v1.26.12+k3s1
|
||||
k3s_version: v1.29.0+k3s1
|
||||
# this is the user that has ssh access to these machines
|
||||
ansible_user: ansibleuser
|
||||
systemd_dir: /etc/systemd/system
|
||||
@@ -10,6 +10,12 @@ system_timezone: "Your/Timezone"
|
||||
# interface which will be used for flannel
|
||||
flannel_iface: "eth0"
|
||||
|
||||
# uncomment calico_iface to use tigera operator/calico cni instead of flannel https://docs.tigera.io/calico/latest/about
|
||||
# calico_iface: "eth0"
|
||||
calico_ebpf: false # use eBPF dataplane instead of iptables
|
||||
calico_cidr: "10.52.0.0/16" # calico cluster pod cidr pool
|
||||
calico_tag: "v3.27.0" # calico version tag
|
||||
|
||||
# apiserver_endpoint is virtual ip-address which will be configured on each master
|
||||
apiserver_endpoint: "192.168.30.222"
|
||||
|
||||
@@ -20,28 +26,42 @@ k3s_token: "some-SUPER-DEDEUPER-secret-password"
|
||||
# The IP on which the node is reachable in the cluster.
|
||||
# Here, a sensible default is provided, you can still override
|
||||
# it for each of your hosts, though.
|
||||
k3s_node_ip: '{{ ansible_facts[flannel_iface]["ipv4"]["address"] }}'
|
||||
k3s_node_ip: "{{ ansible_facts[(calico_iface | default(flannel_iface))]['ipv4']['address'] }}"
|
||||
|
||||
# Disable the taint manually by setting: k3s_master_taint = false
|
||||
k3s_master_taint: "{{ true if groups['node'] | default([]) | length >= 1 else false }}"
|
||||
|
||||
# these arguments are recommended for servers as well as agents:
|
||||
extra_args: >-
|
||||
--flannel-iface={{ flannel_iface }}
|
||||
{{ '--flannel-iface=' + flannel_iface if calico_iface is not defined else '' }}
|
||||
--node-ip={{ k3s_node_ip }}
|
||||
|
||||
# change these to your liking, the only required are: --disable servicelb, --tls-san {{ apiserver_endpoint }}
|
||||
# the contents of the if block is also required if using calico
|
||||
extra_server_args: >-
|
||||
{{ extra_args }}
|
||||
{{ '--node-taint node-role.kubernetes.io/master=true:NoSchedule' if k3s_master_taint else '' }}
|
||||
{% if calico_iface is defined %}
|
||||
--flannel-backend=none
|
||||
--disable-network-policy
|
||||
--cluster-cidr={{ calico_cidr | default('10.52.0.0/16') }}
|
||||
{% endif %}
|
||||
--tls-san {{ apiserver_endpoint }}
|
||||
--disable servicelb
|
||||
--disable traefik
|
||||
|
||||
extra_agent_args: >-
|
||||
{{ extra_args }}
|
||||
|
||||
# image tag for kube-vip
|
||||
kube_vip_tag_version: "v0.5.12"
|
||||
kube_vip_tag_version: "v0.6.4"
|
||||
|
||||
# tag for kube-vip-cloud-provider manifest
|
||||
# kube_vip_cloud_provider_tag_version: "main"
|
||||
|
||||
# kube-vip ip range for load balancer
|
||||
# (uncomment to use kube-vip for services instead of MetalLB)
|
||||
# kube_vip_lb_ip_range: "192.168.30.80-192.168.30.90"
|
||||
|
||||
# metallb type frr or native
|
||||
metal_lb_type: "native"
|
||||
@@ -55,8 +75,8 @@ metal_lb_mode: "layer2"
|
||||
# metal_lb_bgp_peer_address: "192.168.30.1"
|
||||
|
||||
# image tag for metal lb
|
||||
metal_lb_speaker_tag_version: "v0.13.9"
|
||||
metal_lb_controller_tag_version: "v0.13.9"
|
||||
metal_lb_speaker_tag_version: "v0.13.12"
|
||||
metal_lb_controller_tag_version: "v0.13.12"
|
||||
|
||||
# metallb ip range for load balancer
|
||||
metal_lb_ip_range: "192.168.30.80-192.168.30.90"
|
||||
|
||||
@@ -13,6 +13,10 @@ We have these scenarios:
|
||||
To save a bit of test time, this cluster is _not_ highly available, it consists of only one control and one worker node.
|
||||
- **single_node**:
|
||||
Very similar to the default scenario, but uses only a single node for all cluster functionality.
|
||||
- **calico**:
|
||||
The same as single node, but uses calico cni instead of flannel.
|
||||
- **kube-vip**
|
||||
The same as single node, but uses kube-vip as service loadbalancer instead of MetalLB
|
||||
|
||||
## How to execute
|
||||
|
||||
|
||||
49
molecule/calico/molecule.yml
Normal file
49
molecule/calico/molecule.yml
Normal file
@@ -0,0 +1,49 @@
|
||||
---
|
||||
dependency:
|
||||
name: galaxy
|
||||
driver:
|
||||
name: vagrant
|
||||
platforms:
|
||||
- name: control1
|
||||
box: generic/ubuntu2204
|
||||
memory: 4096
|
||||
cpus: 4
|
||||
config_options:
|
||||
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||
# see: https://github.com/chef/bento/issues/1405
|
||||
ssh.username: "vagrant"
|
||||
ssh.password: "vagrant"
|
||||
groups:
|
||||
- k3s_cluster
|
||||
- master
|
||||
interfaces:
|
||||
- network_name: private_network
|
||||
ip: 192.168.30.62
|
||||
provisioner:
|
||||
name: ansible
|
||||
env:
|
||||
ANSIBLE_VERBOSITY: 1
|
||||
playbooks:
|
||||
converge: ../resources/converge.yml
|
||||
side_effect: ../resources/reset.yml
|
||||
verify: ../resources/verify.yml
|
||||
inventory:
|
||||
links:
|
||||
group_vars: ../../inventory/sample/group_vars
|
||||
scenario:
|
||||
test_sequence:
|
||||
- dependency
|
||||
- cleanup
|
||||
- destroy
|
||||
- syntax
|
||||
- create
|
||||
- prepare
|
||||
- converge
|
||||
# idempotence is not possible with the playbook in its current form.
|
||||
- verify
|
||||
# We are repurposing side_effect here to test the reset playbook.
|
||||
# This is why we do not run it before verify (which tests the cluster),
|
||||
# but after the verify step.
|
||||
- side_effect
|
||||
- cleanup
|
||||
- destroy
|
||||
16
molecule/calico/overrides.yml
Normal file
16
molecule/calico/overrides.yml
Normal file
@@ -0,0 +1,16 @@
|
||||
---
|
||||
- name: Apply overrides
|
||||
hosts: all
|
||||
tasks:
|
||||
- name: Override host variables
|
||||
ansible.builtin.set_fact:
|
||||
# See:
|
||||
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
|
||||
calico_iface: eth1
|
||||
|
||||
# The test VMs might be a bit slow, so we give them more time to join the cluster:
|
||||
retry_count: 45
|
||||
|
||||
# Make sure that our IP ranges do not collide with those of the other scenarios
|
||||
apiserver_endpoint: "192.168.30.224"
|
||||
metal_lb_ip_range: "192.168.30.100-192.168.30.109"
|
||||
@@ -22,7 +22,7 @@ platforms:
|
||||
ssh.password: "vagrant"
|
||||
|
||||
- name: control2
|
||||
box: generic/debian11
|
||||
box: generic/debian12
|
||||
memory: 1024
|
||||
cpus: 2
|
||||
groups:
|
||||
|
||||
49
molecule/kube-vip/molecule.yml
Normal file
49
molecule/kube-vip/molecule.yml
Normal file
@@ -0,0 +1,49 @@
|
||||
---
|
||||
dependency:
|
||||
name: galaxy
|
||||
driver:
|
||||
name: vagrant
|
||||
platforms:
|
||||
- name: control1
|
||||
box: generic/ubuntu2204
|
||||
memory: 4096
|
||||
cpus: 4
|
||||
config_options:
|
||||
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||
# see: https://github.com/chef/bento/issues/1405
|
||||
ssh.username: "vagrant"
|
||||
ssh.password: "vagrant"
|
||||
groups:
|
||||
- k3s_cluster
|
||||
- master
|
||||
interfaces:
|
||||
- network_name: private_network
|
||||
ip: 192.168.30.62
|
||||
provisioner:
|
||||
name: ansible
|
||||
env:
|
||||
ANSIBLE_VERBOSITY: 1
|
||||
playbooks:
|
||||
converge: ../resources/converge.yml
|
||||
side_effect: ../resources/reset.yml
|
||||
verify: ../resources/verify.yml
|
||||
inventory:
|
||||
links:
|
||||
group_vars: ../../inventory/sample/group_vars
|
||||
scenario:
|
||||
test_sequence:
|
||||
- dependency
|
||||
- cleanup
|
||||
- destroy
|
||||
- syntax
|
||||
- create
|
||||
- prepare
|
||||
- converge
|
||||
# idempotence is not possible with the playbook in its current form.
|
||||
- verify
|
||||
# We are repurposing side_effect here to test the reset playbook.
|
||||
# This is why we do not run it before verify (which tests the cluster),
|
||||
# but after the verify step.
|
||||
- side_effect
|
||||
- cleanup
|
||||
- destroy
|
||||
17
molecule/kube-vip/overrides.yml
Normal file
17
molecule/kube-vip/overrides.yml
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
- name: Apply overrides
|
||||
hosts: all
|
||||
tasks:
|
||||
- name: Override host variables
|
||||
ansible.builtin.set_fact:
|
||||
# See:
|
||||
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
|
||||
flannel_iface: eth1
|
||||
|
||||
# The test VMs might be a bit slow, so we give them more time to join the cluster:
|
||||
retry_count: 45
|
||||
|
||||
# Make sure that our IP ranges do not collide with those of the other scenarios
|
||||
apiserver_endpoint: "192.168.30.225"
|
||||
# Use kube-vip instead of MetalLB
|
||||
kube_vip_lb_ip_range: "192.168.30.110-192.168.30.119"
|
||||
27
roles/k3s_server/tasks/kube-vip.yml
Normal file
27
roles/k3s_server/tasks/kube-vip.yml
Normal file
@@ -0,0 +1,27 @@
|
||||
---
|
||||
- name: Create manifests directory on first master
|
||||
file:
|
||||
path: /var/lib/rancher/k3s/server/manifests
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||
|
||||
- name: Download vip cloud provider manifest to first master
|
||||
ansible.builtin.get_url:
|
||||
url: "https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/{{ kube_vip_cloud_provider_tag_version | default('main') }}/manifest/kube-vip-cloud-controller.yaml" # noqa yaml[line-length]
|
||||
dest: "/var/lib/rancher/k3s/server/manifests/kube-vip-cloud-controller.yaml"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||
|
||||
- name: Copy kubevip configMap manifest to first master
|
||||
template:
|
||||
src: "kubevip.yaml.j2"
|
||||
dest: "/var/lib/rancher/k3s/server/manifests/kubevip.yaml"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||
@@ -29,6 +29,12 @@
|
||||
- name: Deploy metallb manifest
|
||||
include_tasks: metallb.yml
|
||||
tags: metallb
|
||||
when: kube_vip_lb_ip_range is not defined
|
||||
|
||||
- name: Deploy kube-vip manifest
|
||||
include_tasks: kube-vip.yml
|
||||
tags: kubevip
|
||||
when: kube_vip_lb_ip_range is defined
|
||||
|
||||
- name: Init cluster inside the transient k3s-init service
|
||||
command:
|
||||
|
||||
13
roles/k3s_server/templates/kubevip.yaml.j2
Normal file
13
roles/k3s_server/templates/kubevip.yaml.j2
Normal file
@@ -0,0 +1,13 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kubevip
|
||||
namespace: kube-system
|
||||
data:
|
||||
{% if kube_vip_lb_ip_range is string %}
|
||||
{# kube_vip_lb_ip_range was used in the legacy way: single string instead of a list #}
|
||||
{# => transform to list with single element #}
|
||||
{% set kube_vip_lb_ip_range = [kube_vip_lb_ip_range] %}
|
||||
{% endif %}
|
||||
range-global: {{ kube_vip_lb_ip_range | join(',') }}
|
||||
@@ -43,7 +43,7 @@ spec:
|
||||
- name: vip_ddns
|
||||
value: "false"
|
||||
- name: svc_enable
|
||||
value: "false"
|
||||
value: "{{ 'true' if kube_vip_lb_ip_range is defined else 'false' }}"
|
||||
- name: vip_leaderelection
|
||||
value: "true"
|
||||
- name: vip_leaseduration
|
||||
|
||||
114
roles/k3s_server_post/tasks/calico.yml
Normal file
114
roles/k3s_server_post/tasks/calico.yml
Normal file
@@ -0,0 +1,114 @@
|
||||
---
|
||||
- name: Deploy Calico to cluster
|
||||
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||
run_once: true
|
||||
block:
|
||||
- name: Create manifests directory on first master
|
||||
file:
|
||||
path: /tmp/k3s
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0755
|
||||
|
||||
- name: "Download to first master: manifest for Tigera Operator and Calico CRDs"
|
||||
ansible.builtin.get_url:
|
||||
url: "https://raw.githubusercontent.com/projectcalico/calico/{{ calico_tag }}/manifests/tigera-operator.yaml"
|
||||
dest: "/tmp/k3s/tigera-operator.yaml"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0755
|
||||
|
||||
- name: Copy Calico custom resources manifest to first master
|
||||
ansible.builtin.template:
|
||||
src: "calico.crs.j2"
|
||||
dest: /tmp/k3s/custom-resources.yaml
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0755
|
||||
|
||||
- name: Deploy or replace Tigera Operator
|
||||
block:
|
||||
- name: Deploy Tigera Operator
|
||||
ansible.builtin.command:
|
||||
cmd: kubectl create -f /tmp/k3s/tigera-operator.yaml
|
||||
register: create_operator
|
||||
changed_when: "'created' in create_operator.stdout"
|
||||
failed_when: "'Error' in create_operator.stderr and 'already exists' not in create_operator.stderr"
|
||||
rescue:
|
||||
- name: Replace existing Tigera Operator
|
||||
ansible.builtin.command:
|
||||
cmd: kubectl replace -f /tmp/k3s/tigera-operator.yaml
|
||||
register: replace_operator
|
||||
changed_when: "'replaced' in replace_operator.stdout"
|
||||
failed_when: "'Error' in replace_operator.stderr"
|
||||
|
||||
- name: Wait for Tigera Operator resources
|
||||
command: >-
|
||||
k3s kubectl wait {{ item.type }}/{{ item.name }}
|
||||
--namespace='tigera-operator'
|
||||
--for=condition=Available=True
|
||||
--timeout=7s
|
||||
register: tigera_result
|
||||
changed_when: false
|
||||
until: tigera_result is succeeded
|
||||
retries: 7
|
||||
delay: 7
|
||||
with_items:
|
||||
- {name: tigera-operator, type: deployment}
|
||||
loop_control:
|
||||
label: "{{ item.type }}/{{ item.name }}"
|
||||
|
||||
- name: Deploy Calico custom resources
|
||||
block:
|
||||
- name: Deploy custom resources for Calico
|
||||
ansible.builtin.command:
|
||||
cmd: kubectl create -f /tmp/k3s/custom-resources.yaml
|
||||
register: create_cr
|
||||
changed_when: "'created' in create_cr.stdout"
|
||||
failed_when: "'Error' in create_cr.stderr and 'already exists' not in create_cr.stderr"
|
||||
rescue:
|
||||
- name: Apply new Calico custom resource manifest
|
||||
ansible.builtin.command:
|
||||
cmd: kubectl apply -f /tmp/k3s/custom-resources.yaml
|
||||
register: apply_cr
|
||||
changed_when: "'configured' in apply_cr.stdout or 'created' in apply_cr.stdout"
|
||||
failed_when: "'Error' in apply_cr.stderr"
|
||||
|
||||
- name: Wait for Calico system resources to be available
|
||||
command: >-
|
||||
{% if item.type == 'daemonset' %}
|
||||
k3s kubectl wait pods
|
||||
--namespace='{{ item.namespace }}'
|
||||
--selector={{ item.selector }}
|
||||
--for=condition=Ready
|
||||
{% else %}
|
||||
k3s kubectl wait {{ item.type }}/{{ item.name }}
|
||||
--namespace='{{ item.namespace }}'
|
||||
--for=condition=Available
|
||||
{% endif %}
|
||||
--timeout=7s
|
||||
register: cr_result
|
||||
changed_when: false
|
||||
until: cr_result is succeeded
|
||||
retries: 30
|
||||
delay: 7
|
||||
with_items:
|
||||
- {name: calico-typha, type: deployment, namespace: calico-system}
|
||||
- {name: calico-kube-controllers, type: deployment, namespace: calico-system}
|
||||
- {name: csi-node-driver, type: daemonset, selector: 'k8s-app=csi-node-driver', namespace: calico-system}
|
||||
- {name: calico-node, type: daemonset, selector: 'k8s-app=calico-node', namespace: calico-system}
|
||||
- {name: calico-apiserver, type: deployment, namespace: calico-apiserver}
|
||||
loop_control:
|
||||
label: "{{ item.type }}/{{ item.name }}"
|
||||
|
||||
- name: Patch Felix configuration for eBPF mode
|
||||
ansible.builtin.command:
|
||||
cmd: >
|
||||
kubectl patch felixconfiguration default
|
||||
--type='merge'
|
||||
--patch='{"spec": {"bpfKubeProxyIptablesCleanupEnabled": false}}'
|
||||
register: patch_result
|
||||
changed_when: "'felixconfiguration.projectcalico.org/default patched' in patch_result.stdout"
|
||||
failed_when: "'Error' in patch_result.stderr"
|
||||
when: calico_ebpf
|
||||
@@ -1,7 +1,13 @@
|
||||
---
|
||||
- name: Deploy calico
|
||||
include_tasks: calico.yml
|
||||
tags: calico
|
||||
when: calico_iface is defined
|
||||
|
||||
- name: Deploy metallb pool
|
||||
include_tasks: metallb.yml
|
||||
tags: metallb
|
||||
when: kube_vip_lb_ip_range is not defined
|
||||
|
||||
- name: Remove tmp directory used for manifests
|
||||
file:
|
||||
|
||||
@@ -8,6 +8,27 @@
|
||||
with_items: "{{ groups[group_name_master | default('master')] }}"
|
||||
run_once: true
|
||||
|
||||
- name: Delete outdated metallb replicas
|
||||
shell: |-
|
||||
set -o pipefail
|
||||
|
||||
REPLICAS=$(k3s kubectl --namespace='metallb-system' get replicasets \
|
||||
-l 'component=controller,app=metallb' \
|
||||
-o jsonpath='{.items[0].spec.template.spec.containers[0].image}, {.items[0].metadata.name}' 2>/dev/null || true)
|
||||
REPLICAS_SETS=$(echo ${REPLICAS} | grep -v '{{ metal_lb_controller_tag_version }}' | sed -e "s/^.*\s//g")
|
||||
if [ -n "${REPLICAS_SETS}" ] ; then
|
||||
for REPLICAS in "${REPLICAS_SETS}"
|
||||
do
|
||||
k3s kubectl --namespace='metallb-system' \
|
||||
delete rs "${REPLICAS}"
|
||||
done
|
||||
fi
|
||||
args:
|
||||
executable: /bin/bash
|
||||
changed_when: false
|
||||
run_once: true
|
||||
with_items: "{{ groups[group_name_master | default('master')] }}"
|
||||
|
||||
- name: Copy metallb CRs manifest to first master
|
||||
template:
|
||||
src: "metallb.crs.j2"
|
||||
|
||||
41
roles/k3s_server_post/templates/calico.crs.j2
Normal file
41
roles/k3s_server_post/templates/calico.crs.j2
Normal file
@@ -0,0 +1,41 @@
|
||||
# This section includes base Calico installation configuration.
|
||||
# For more information, see: https://docs.tigera.io/calico/latest/reference/installation/api#operator.tigera.io/v1.Installation
|
||||
apiVersion: operator.tigera.io/v1
|
||||
kind: Installation
|
||||
metadata:
|
||||
name: default
|
||||
spec:
|
||||
# Configures Calico networking.
|
||||
calicoNetwork:
|
||||
# Note: The ipPools section cannot be modified post-install.
|
||||
ipPools:
|
||||
- blockSize: {{ calico_blockSize | default('26') }}
|
||||
cidr: {{ calico_cidr | default('10.52.0.0/16') }}
|
||||
encapsulation: {{ calico_encapsulation | default('VXLANCrossSubnet') }}
|
||||
natOutgoing: {{ calico_natOutgoing | default('Enabled') }}
|
||||
nodeSelector: {{ calico_nodeSelector | default('all()') }}
|
||||
nodeAddressAutodetectionV4:
|
||||
interface: {{ calico_iface }}
|
||||
linuxDataplane: {{ 'BPF' if calico_ebpf else 'Iptables' }}
|
||||
|
||||
---
|
||||
|
||||
# This section configures the Calico API server.
|
||||
# For more information, see: https://docs.tigera.io/calico/latest/reference/installation/api#operator.tigera.io/v1.APIServer
|
||||
apiVersion: operator.tigera.io/v1
|
||||
kind: APIServer
|
||||
metadata:
|
||||
name: default
|
||||
spec: {}
|
||||
|
||||
{% if calico_ebpf %}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: kubernetes-services-endpoint
|
||||
namespace: tigera-operator
|
||||
data:
|
||||
KUBERNETES_SERVICE_HOST: '{{ apiserver_endpoint }}'
|
||||
KUBERNETES_SERVICE_PORT: '6443'
|
||||
{% endif %}
|
||||
@@ -31,6 +31,14 @@
|
||||
- raspberry_pi|default(false)
|
||||
- ansible_facts.lsb.description|default("") is match(allowed_descriptions | join('|'))
|
||||
|
||||
- name: Set detected_distribution to Raspbian (ARM64 on Debian Bookworm)
|
||||
set_fact:
|
||||
detected_distribution: Raspbian
|
||||
when:
|
||||
- ansible_facts.architecture is search("aarch64")
|
||||
- raspberry_pi|default(false)
|
||||
- ansible_facts.lsb.description|default("") is match("Debian.*bookworm")
|
||||
|
||||
- name: Set detected_distribution_major_version
|
||||
set_fact:
|
||||
detected_distribution_major_version: "{{ ansible_facts.lsb.major_release }}"
|
||||
|
||||
@@ -45,6 +45,7 @@
|
||||
- /var/lib/rancher/k3s
|
||||
- /var/lib/rancher/
|
||||
- /var/lib/cni/
|
||||
- /etc/cni/net.d
|
||||
|
||||
- name: Remove K3s http_proxy files
|
||||
file:
|
||||
|
||||
11
site.yml
11
site.yml
@@ -46,3 +46,14 @@
|
||||
roles:
|
||||
- role: k3s_server_post
|
||||
become: true
|
||||
|
||||
- name: Storing kubeconfig in the playbook directory
|
||||
hosts: master
|
||||
environment: "{{ proxy_env | default({}) }}"
|
||||
tasks:
|
||||
- name: Copying kubeconfig from {{ hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] }}
|
||||
ansible.builtin.fetch:
|
||||
src: "{{ ansible_user_dir }}/.kube/config"
|
||||
dest: ./kubeconfig
|
||||
flat: true
|
||||
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||
|
||||
Reference in New Issue
Block a user