Merge branch 'k3s-1-27' of github.com:techno-tim/k3s-ansible into k3s-1-27

This commit is contained in:
Timothy Stewart
2023-08-17 22:41:02 -05:00
46 changed files with 128 additions and 159 deletions

View File

@@ -7,7 +7,7 @@ jobs:
name: Pre-Commit
runs-on: ubuntu-latest
env:
PYTHON_VERSION: "3.10"
PYTHON_VERSION: "3.11"
steps:
- name: Check out the codebase

View File

@@ -14,7 +14,7 @@ jobs:
- single_node
fail-fast: false
env:
PYTHON_VERSION: "3.10"
PYTHON_VERSION: "3.11"
steps:
- name: Check out the codebase

View File

@@ -1,7 +1,7 @@
---
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: 3298ddab3c13dd77d6ce1fc0baf97691430d84b0 # v4.3.0
rev: f71fa2c1f9cf5cb705f73dffe4b21f7c61470ba9 # frozen: v4.4.0
hooks:
- id: requirements-txt-fixer
- id: sort-simple-yaml
@@ -12,24 +12,24 @@ repos:
- id: trailing-whitespace
args: [--markdown-linebreak-ext=md]
- repo: https://github.com/adrienverge/yamllint.git
rev: 9cce2940414e9560ae4c8518ddaee2ac1863a4d2 # v1.28.0
rev: b05e028c5881819161d11cb543fd96a30c06cceb # frozen: v1.32.0
hooks:
- id: yamllint
args: [-c=.yamllint]
- repo: https://github.com/ansible-community/ansible-lint.git
rev: a058554b9bcf88f12ad09ab9fb93b267a214368f # v6.8.6
rev: 3293b64b939c0de16ef8cb81dd49255e475bf89a # frozen: v6.17.2
hooks:
- id: ansible-lint
- repo: https://github.com/shellcheck-py/shellcheck-py
rev: 4c7c3dd7161ef39e984cb295e93a968236dc8e8a # v0.8.0.4
rev: 375289a39f5708101b1f916eb729e8d6da96993f # frozen: v0.9.0.5
hooks:
- id: shellcheck
- repo: https://github.com/Lucas-C/pre-commit-hooks
rev: 04618e68aa2380828a36a23ff5f65a06ae8f59b9 # v1.3.1
rev: 12885e376b93dc4536ad68d156065601e4433665 # frozen: v1.5.1
hooks:
- id: remove-crlf
- id: remove-tabs
- repo: https://github.com/sirosen/texthooks
rev: 30d9af95631de0d7cff4e282bde9160d38bb0359 # 0.4.0
rev: c4ffd3e31669dd4fa4d31a23436cc13839730084 # frozen: 0.5.0
hooks:
- id: fix-smartquotes

View File

@@ -8,7 +8,7 @@ This is based on the work from [this fork](https://github.com/212850a/k3s-ansibl
If you want more context on how this works, see:
📄 [Documentation](https://docs.technotim.live/posts/k3s-etcd-ansible/) (including example commands)
📄 [Documentation](https://technotim.live/posts/k3s-etcd-ansible/) (including example commands)
📺 [Watch the Video](https://www.youtube.com/watch?v=CbkEWcUZ7zM)
@@ -28,7 +28,7 @@ on processor architecture:
## ✅ System requirements
- Control Node (the machine you are running `ansible` commands) must have Ansible 2.11+ If you need a quick primer on Ansible [you can check out my docs and setting up Ansible](https://docs.technotim.live/posts/ansible-automation/).
- Control Node (the machine you are running `ansible` commands) must have Ansible 2.11+ If you need a quick primer on Ansible [you can check out my docs and setting up Ansible](https://technotim.live/posts/ansible-automation/).
- You will also need to install collections that this playbook uses by running `ansible-galaxy collection install -r ./collections/requirements.yml` (important❗)
@@ -101,7 +101,7 @@ scp debian@master_ip:~/.kube/config ~/.kube/config
### 🔨 Testing your cluster
See the commands [here](https://docs.technotim.live/posts/k3s-etcd-ansible/#testing-your-cluster).
See the commands [here](https://technotim.live/posts/k3s-etcd-ansible/#testing-your-cluster).
### Troubleshooting

View File

@@ -55,7 +55,6 @@ metal_lb_mode: "layer2"
# metal_lb_bgp_peer_address: "192.168.30.1"
# image tag for metal lb
metal_lb_frr_tag_version: "v7.5.1"
metal_lb_speaker_tag_version: "v0.13.9"
metal_lb_controller_tag_version: "v0.13.9"

View File

@@ -4,7 +4,8 @@
tasks:
- name: Override host variables
ansible.builtin.set_fact:
# See: https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant # noqa yaml[line-length]
# See:
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
flannel_iface: eth1
# The test VMs might be a bit slow, so we give them more time to join the cluster:

View File

@@ -4,7 +4,8 @@
tasks:
- name: Override host variables (1/2)
ansible.builtin.set_fact:
# See: https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant # noqa yaml[line-length]
# See:
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
flannel_iface: eth1
# In this scenario, we have multiple interfaces that the VIP could be

View File

@@ -2,4 +2,4 @@
- name: Verify
hosts: all
roles:
- verify/from_outside
- verify_from_outside

View File

@@ -6,4 +6,4 @@ outside_host: localhost
testing_namespace: molecule-verify-from-outside
# The directory in which the example manifests reside
example_manifests_path: ../../../../example
example_manifests_path: ../../../example

View File

@@ -34,14 +34,14 @@
- name: Assert that the nginx welcome page is available
ansible.builtin.uri:
url: http://{{ ip | ansible.utils.ipwrap }}:{{ port }}/
url: http://{{ ip | ansible.utils.ipwrap }}:{{ port_ }}/
return_content: yes
register: result
failed_when: "'Welcome to nginx!' not in result.content"
vars:
ip: >-
{{ nginx_services.resources[0].status.loadBalancer.ingress[0].ip }}
port: >-
port_: >-
{{ nginx_services.resources[0].spec.ports[0].port }}
# Deactivated linter rules:
# - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap

View File

@@ -4,7 +4,8 @@
tasks:
- name: Override host variables
ansible.builtin.set_fact:
# See: https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant # noqa yaml[line-length]
# See:
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
flannel_iface: eth1
# The test VMs might be a bit slow, so we give them more time to join the cluster:

0
reboot.sh Normal file → Executable file
View File

View File

@@ -1,5 +1,4 @@
ansible-core>=2.13.5
ansible-lint>=6.8.6
jmespath>=1.0.1
jsonpatch>=1.32
kubernetes>=25.3.0
@@ -9,4 +8,3 @@ netaddr>=0.8.0
pre-commit>=2.20.0
pre-commit-hooks>=1.3.1
pyyaml>=6.0
yamllint>=1.28.0

View File

@@ -1,28 +1,21 @@
#
# This file is autogenerated by pip-compile with python 3.8
# To update, run:
# This file is autogenerated by pip-compile with Python 3.11
# by the following command:
#
# pip-compile requirements.in
#
ansible-compat==3.0.1
# via molecule
ansible-core==2.14.5
ansible-core==2.15.2
# via
# -r requirements.in
# ansible-compat
# ansible-lint
ansible-lint==6.15.0
# via -r requirements.in
arrow==1.2.3
# via jinja2-time
attrs==22.1.0
# via jsonschema
binaryornot==0.4.4
# via cookiecutter
black==22.10.0
# via ansible-lint
bracex==2.3.post1
# via wcmatch
cachetools==5.2.0
# via google-auth
certifi==2022.9.24
@@ -39,7 +32,6 @@ charset-normalizer==2.1.1
# via requests
click==8.1.3
# via
# black
# click-help-colors
# cookiecutter
# molecule
@@ -58,9 +50,7 @@ distro==1.8.0
enrich==1.2.7
# via molecule
filelock==3.8.0
# via
# ansible-lint
# virtualenv
# via virtualenv
google-auth==2.14.0
# via kubernetes
identify==2.5.8
@@ -78,14 +68,13 @@ jinja2-time==0.2.0
# via cookiecutter
jmespath==1.0.1
# via -r requirements.in
jsonpatch==1.32
jsonpatch==1.33
# via -r requirements.in
jsonpointer==2.3
# via jsonpatch
jsonschema==4.17.0
# via
# ansible-compat
# ansible-lint
# molecule
kubernetes==25.3.0
# via -r requirements.in
@@ -97,8 +86,6 @@ molecule==4.0.4
# molecule-vagrant
molecule-vagrant==1.0.0
# via -r requirements.in
mypy-extensions==0.4.3
# via black
netaddr==0.8.0
# via -r requirements.in
nodeenv==1.7.0
@@ -109,16 +96,9 @@ packaging==21.3
# via
# ansible-compat
# ansible-core
# ansible-lint
# molecule
pathspec==0.10.1
# via
# black
# yamllint
platformdirs==2.5.2
# via
# black
# virtualenv
# via virtualenv
pluggy==1.0.0
# via molecule
pre-commit==2.21.0
@@ -147,18 +127,16 @@ python-slugify==6.1.2
# via cookiecutter
python-vagrant==1.0.0
# via molecule-vagrant
pyyaml==6.0
pyyaml==6.0.1
# via
# -r requirements.in
# ansible-compat
# ansible-core
# ansible-lint
# cookiecutter
# kubernetes
# molecule
# molecule-vagrant
# pre-commit
# yamllint
requests==2.28.1
# via
# cookiecutter
@@ -170,15 +148,12 @@ resolvelib==0.8.1
# via ansible-core
rich==12.6.0
# via
# ansible-lint
# enrich
# molecule
rsa==4.9
# via google-auth
ruamel-yaml==0.17.21
# via
# ansible-lint
# pre-commit-hooks
# via pre-commit-hooks
selinux==0.2.1
# via molecule-vagrant
six==1.16.0
@@ -187,9 +162,7 @@ six==1.16.0
# kubernetes
# python-dateutil
subprocess-tee==0.4.1
# via
# ansible-compat
# ansible-lint
# via ansible-compat
text-unidecode==1.3
# via python-slugify
urllib3==1.26.12
@@ -198,14 +171,8 @@ urllib3==1.26.12
# requests
virtualenv==20.16.6
# via pre-commit
wcmatch==8.4.1
# via ansible-lint
websocket-client==1.4.2
# via kubernetes
yamllint==1.31.0
# via
# -r requirements.in
# ansible-lint
# The following packages are considered to be unsafe in a requirements file:
# setuptools

View File

@@ -1,6 +1,6 @@
---
- hosts: k3s_cluster
- name: Reset k3s cluster
hosts: k3s_cluster
gather_facts: yes
roles:
- role: reset
@@ -14,7 +14,8 @@
reboot:
reboot_timeout: 3600
- hosts: proxmox
- name: Revert changes to Proxmox cluster
hosts: proxmox
gather_facts: true
become: yes
remote_user: "{{ proxmox_lxc_ssh_user }}"

View File

@@ -1,16 +0,0 @@
---
# If you want to explicitly define an interface that ALL control nodes
# should use to propagate the VIP, define it here. Otherwise, kube-vip
# will determine the right interface automatically at runtime.
kube_vip_iface: null
server_init_args: >-
{% if groups['master'] | length > 1 %}
{% if ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] %}
--cluster-init
{% else %}
--server https://{{ hostvars[groups['master'][0]].k3s_node_ip | split(",") | first | ansible.utils.ipwrap }}:6443
{% endif %}
--token {{ k3s_token }}
{% endif %}
{{ extra_server_args | default('') }}

View File

@@ -0,0 +1,3 @@
---
# Name of the master group
group_name_master: master

View File

@@ -7,7 +7,7 @@ After=network-online.target
Type=notify
ExecStartPre=-/sbin/modprobe br_netfilter
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/local/bin/k3s agent --server https://{{ apiserver_endpoint | ansible.utils.ipwrap }}:6443 --token {{ hostvars[groups['master'][0]]['token'] | default(k3s_token) }} {{ extra_agent_args | default("") }}
ExecStart=/usr/local/bin/k3s agent --server https://{{ apiserver_endpoint | ansible.utils.ipwrap }}:6443 --token {{ hostvars[groups[group_name_master | default('master')][0]]['token'] | default(k3s_token) }} {{ extra_agent_args | default("") }}
KillMode=process
Delegate=yes
# Having non-zero Limit*s causes performance problems due to accounting overhead

View File

@@ -0,0 +1,20 @@
---
# If you want to explicitly define an interface that ALL control nodes
# should use to propagate the VIP, define it here. Otherwise, kube-vip
# will determine the right interface automatically at runtime.
kube_vip_iface: null
# Name of the master group
group_name_master: master
# yamllint disable rule:line-length
server_init_args: >-
{% if groups[group_name_master | default('master')] | length > 1 %}
{% if ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] %}
--cluster-init
{% else %}
--server https://{{ hostvars[groups[group_name_master | default('master')][0]].k3s_node_ip | split(",") | first | ansible.utils.ipwrap }}:6443
{% endif %}
--token {{ k3s_token }}
{% endif %}
{{ extra_server_args | default('') }}

View File

@@ -1,17 +1,16 @@
---
- name: Clean previous runs of k3s-init
- name: Stop k3s-init
systemd:
name: k3s-init
state: stopped
failed_when: false
- name: Clean previous runs of k3s-init
- name: Clean previous runs of k3s-init # noqa command-instead-of-module
# The systemd module does not support "reset-failed", so we need to resort to command.
command: systemctl reset-failed k3s-init
failed_when: false
changed_when: false
args:
warn: false # The ansible systemd module does not support reset-failed
- name: Deploy vip manifest
include_tasks: vip.yml
@@ -28,12 +27,13 @@
creates: "{{ systemd_dir }}/k3s.service"
- name: Verification
when: not ansible_check_mode
block:
- name: Verify that all nodes actually joined (check k3s-init.service if this fails)
command:
cmd: k3s kubectl get nodes -l "node-role.kubernetes.io/master=true" -o=jsonpath="{.items[*].metadata.name}"
register: nodes
until: nodes.rc == 0 and (nodes.stdout.split() | length) == (groups['master'] | length)
until: nodes.rc == 0 and (nodes.stdout.split() | length) == (groups[group_name_master | default('master')] | length) # yamllint disable-line rule:line-length
retries: "{{ retry_count | default(20) }}"
delay: 10
changed_when: false
@@ -49,7 +49,6 @@
name: k3s-init
state: stopped
failed_when: false
when: not ansible_check_mode
- name: Copy K3s service file
register: k3s_service

View File

@@ -6,16 +6,16 @@
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
- name: "Download to first master: manifest for metallb-{{ metal_lb_type }}"
ansible.builtin.get_url:
url: "https://raw.githubusercontent.com/metallb/metallb/{{ metal_lb_controller_tag_version }}/config/manifests/metallb-{{metal_lb_type}}.yaml" # noqa yaml[line-length]
url: "https://raw.githubusercontent.com/metallb/metallb/{{ metal_lb_controller_tag_version }}/config/manifests/metallb-{{ metal_lb_type }}.yaml" # noqa yaml[line-length]
dest: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml"
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
- name: Set image versions in manifest for metallb-{{ metal_lb_type }}
ansible.builtin.replace:
@@ -27,4 +27,4 @@
to: "metallb/speaker:{{ metal_lb_speaker_tag_version }}"
loop_control:
label: "{{ item.change }} => {{ item.to }}"
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']

View File

@@ -6,7 +6,7 @@
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
- name: Download vip rbac manifest to first master
ansible.builtin.get_url:
@@ -15,7 +15,7 @@
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
- name: Copy vip manifest to first master
template:
@@ -24,4 +24,4 @@
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']

View File

@@ -1,3 +1,6 @@
---
# Timeout to wait for MetalLB services to come up
metal_lb_available_timeout: 120s
# Name of the master group
group_name_master: master

View File

@@ -5,7 +5,7 @@
state: directory
owner: "{{ ansible_user_id }}"
mode: 0755
with_items: "{{ groups['master'] }}"
with_items: "{{ groups[group_name_master | default('master')] }}"
run_once: true
- name: Copy metallb CRs manifest to first master
@@ -14,14 +14,14 @@
dest: "/tmp/k3s/metallb-crs.yaml"
owner: "{{ ansible_user_id }}"
mode: 0755
with_items: "{{ groups['master'] }}"
with_items: "{{ groups[group_name_master | default('master')] }}"
run_once: true
- name: Test metallb-system namespace
command: >-
k3s kubectl -n metallb-system
changed_when: false
with_items: "{{ groups['master'] }}"
with_items: "{{ groups[group_name_master | default('master')] }}"
run_once: true
- name: Wait for MetalLB resources
@@ -66,7 +66,7 @@
command: >-
k3s kubectl -n metallb-system get endpoints webhook-service
changed_when: false
with_items: "{{ groups['master'] }}"
with_items: "{{ groups[group_name_master | default('master')] }}"
run_once: true
- name: Apply metallb CRs

View File

@@ -1,4 +1,4 @@
---
- name: reboot server
- name: Reboot server
become: true
reboot:

View File

@@ -1,30 +1,30 @@
---
- name: Set same timezone on every Server
timezone:
community.general.timezone:
name: "{{ system_timezone }}"
when: (system_timezone is defined) and (system_timezone != "Your/Timezone")
- name: Set SELinux to disabled state
selinux:
ansible.posix.selinux:
state: disabled
when: ansible_os_family == "RedHat"
- name: Enable IPv4 forwarding
sysctl:
ansible.posix.sysctl:
name: net.ipv4.ip_forward
value: "1"
state: present
reload: yes
- name: Enable IPv6 forwarding
sysctl:
ansible.posix.sysctl:
name: net.ipv6.conf.all.forwarding
value: "1"
state: present
reload: yes
- name: Enable IPv6 router advertisements
sysctl:
ansible.posix.sysctl:
name: net.ipv6.conf.all.accept_ra
value: "2"
state: present
@@ -38,13 +38,13 @@
when: ansible_os_family == "RedHat"
- name: Load br_netfilter
modprobe:
community.general.modprobe:
name: br_netfilter
state: present
when: ansible_os_family == "RedHat"
- name: Set bridge-nf-call-iptables (just to be sure)
sysctl:
ansible.posix.sysctl:
name: "{{ item }}"
value: "1"
state: present

View File

@@ -1,5 +1,11 @@
---
- name: reboot containers
command:
"pct reboot {{ item }}"
- name: Reboot containers
block:
- name: Get container ids from filtered files
set_fact:
proxmox_lxc_filtered_ids: >-
{{ proxmox_lxc_filtered_files | map("split", "/") | map("last") | map("split", ".") | map("first") }}
- name: Reboot container
command: "pct reboot {{ item }}"
loop: "{{ proxmox_lxc_filtered_ids }}"
changed_when: true

View File

@@ -1,21 +1,15 @@
---
- name: check for container files that exist on this host
- name: Check for container files that exist on this host
stat:
path: "/etc/pve/lxc/{{ item }}.conf"
loop: "{{ proxmox_lxc_ct_ids }}"
register: stat_results
- name: filter out files that do not exist
- name: Filter out files that do not exist
set_fact:
proxmox_lxc_filtered_files:
'{{ stat_results.results | rejectattr("stat.exists", "false") | map(attribute="stat.path") }}'
# used for the reboot handler
- name: get container ids from filtered files
set_fact:
proxmox_lxc_filtered_ids:
'{{ proxmox_lxc_filtered_files | map("split", "/") | map("last") | map("split", ".") | map("first") }}'
# https://gist.github.com/triangletodd/02f595cd4c0dc9aac5f7763ca2264185
- name: Ensure lxc config has the right apparmor profile
lineinfile:

View File

@@ -1,3 +1,3 @@
---
- name: reboot
- name: Reboot
reboot:

View File

@@ -47,20 +47,16 @@
- raspberry_pi|default(false)
- ansible_facts.lsb.description|default("") is match("Debian.*bullseye")
- name: execute OS related tasks on the Raspberry Pi - {{ action }}
- name: Execute OS related tasks on the Raspberry Pi - {{ action_ }}
include_tasks: "{{ item }}"
with_first_found:
- "{{ action }}/{{ detected_distribution }}-{{ detected_distribution_major_version }}.yml"
- "{{ action }}/{{ detected_distribution }}.yml"
- "{{ action }}/{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml"
- "{{ action }}/{{ ansible_distribution }}.yml"
- "{{ action }}/default.yml"
- "{{ action_ }}/{{ detected_distribution }}-{{ detected_distribution_major_version }}.yml"
- "{{ action_ }}/{{ detected_distribution }}.yml"
- "{{ action_ }}/{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml"
- "{{ action_ }}/{{ ansible_distribution }}.yml"
- "{{ action_ }}/default.yml"
vars:
action: >-
{% if state == "present" -%}
setup
{%- else -%}
teardown
{%- endif %}
action_: >-
{% if state == "present" %}setup{% else %}teardown{% endif %}
when:
- raspberry_pi|default(false)

View File

@@ -8,20 +8,22 @@
notify: reboot
- name: Install iptables
apt: name=iptables state=present
apt:
name: iptables
state: present
- name: Flush iptables before changing to iptables-legacy
iptables:
flush: true
- name: Changing to iptables-legacy
alternatives:
community.general.alternatives:
path: /usr/sbin/iptables-legacy
name: iptables
register: ip4_legacy
- name: Changing to ip6tables-legacy
alternatives:
community.general.alternatives:
path: /usr/sbin/ip6tables-legacy
name: ip6tables
register: ip6_legacy

View File

@@ -9,7 +9,7 @@
check_mode: false
- name: Umount filesystem
mount:
ansible.posix.mount:
path: "{{ item }}"
state: unmounted
with_items:

View File

@@ -1,5 +0,0 @@
---
- name: reboot containers
command:
"pct reboot {{ item }}"
loop: "{{ proxmox_lxc_filtered_ids }}"

View File

@@ -0,0 +1 @@
../../proxmox_lxc/handlers/main.yml

View File

@@ -1,21 +1,15 @@
---
- name: check for container files that exist on this host
- name: Check for container files that exist on this host
stat:
path: "/etc/pve/lxc/{{ item }}.conf"
loop: "{{ proxmox_lxc_ct_ids }}"
register: stat_results
- name: filter out files that do not exist
- name: Filter out files that do not exist
set_fact:
proxmox_lxc_filtered_files:
'{{ stat_results.results | rejectattr("stat.exists", "false") | map(attribute="stat.path") }}'
# used for the reboot handler
- name: get container ids from filtered files
set_fact:
proxmox_lxc_filtered_ids:
'{{ proxmox_lxc_filtered_files | map("split", "/") | map("last") | map("split", ".") | map("first") }}'
- name: Remove LXC apparmor profile
lineinfile:
dest: "{{ item }}"

View File

@@ -1,13 +1,14 @@
---
- hosts: proxmox
- name: Prepare Proxmox cluster
hosts: proxmox
gather_facts: true
become: yes
roles:
- role: proxmox_lxc
when: proxmox_lxc_configure
- hosts: k3s_cluster
- name: Prepare k3s nodes
hosts: k3s_cluster
gather_facts: yes
roles:
- role: lxc
@@ -20,17 +21,20 @@
- role: raspberrypi
become: true
- hosts: master
- name: Setup k3s servers
hosts: master
roles:
- role: k3s/master
- role: k3s_server
become: true
- hosts: node
- name: Setup k3s agents
hosts: node
roles:
- role: k3s/node
- role: k3s_agent
become: true
- hosts: master
- name: Configure k3s cluster
hosts: master
roles:
- role: k3s/post
- role: k3s_server_post
become: true