diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index ac69399..b43f5bb 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -7,7 +7,7 @@ jobs: name: Pre-Commit runs-on: ubuntu-latest env: - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.11" steps: - name: Check out the codebase diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 5ca606f..10b6135 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -14,7 +14,7 @@ jobs: - single_node fail-fast: false env: - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.11" steps: - name: Check out the codebase diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e6fbf59..b5b4ddb 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,7 +1,7 @@ --- repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: 3298ddab3c13dd77d6ce1fc0baf97691430d84b0 # v4.3.0 + rev: f71fa2c1f9cf5cb705f73dffe4b21f7c61470ba9 # frozen: v4.4.0 hooks: - id: requirements-txt-fixer - id: sort-simple-yaml @@ -12,24 +12,24 @@ repos: - id: trailing-whitespace args: [--markdown-linebreak-ext=md] - repo: https://github.com/adrienverge/yamllint.git - rev: 9cce2940414e9560ae4c8518ddaee2ac1863a4d2 # v1.28.0 + rev: b05e028c5881819161d11cb543fd96a30c06cceb # frozen: v1.32.0 hooks: - id: yamllint args: [-c=.yamllint] - repo: https://github.com/ansible-community/ansible-lint.git - rev: a058554b9bcf88f12ad09ab9fb93b267a214368f # v6.8.6 + rev: 3293b64b939c0de16ef8cb81dd49255e475bf89a # frozen: v6.17.2 hooks: - id: ansible-lint - repo: https://github.com/shellcheck-py/shellcheck-py - rev: 4c7c3dd7161ef39e984cb295e93a968236dc8e8a # v0.8.0.4 + rev: 375289a39f5708101b1f916eb729e8d6da96993f # frozen: v0.9.0.5 hooks: - id: shellcheck - repo: https://github.com/Lucas-C/pre-commit-hooks - rev: 04618e68aa2380828a36a23ff5f65a06ae8f59b9 # v1.3.1 + rev: 12885e376b93dc4536ad68d156065601e4433665 # frozen: v1.5.1 hooks: - id: remove-crlf - id: remove-tabs - repo: https://github.com/sirosen/texthooks - rev: 30d9af95631de0d7cff4e282bde9160d38bb0359 # 0.4.0 + rev: c4ffd3e31669dd4fa4d31a23436cc13839730084 # frozen: 0.5.0 hooks: - id: fix-smartquotes diff --git a/README.md b/README.md index ceff956..ff3f1e7 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ This is based on the work from [this fork](https://github.com/212850a/k3s-ansibl If you want more context on how this works, see: -๐Ÿ“„ [Documentation](https://docs.technotim.live/posts/k3s-etcd-ansible/) (including example commands) +๐Ÿ“„ [Documentation](https://technotim.live/posts/k3s-etcd-ansible/) (including example commands) ๐Ÿ“บ [Watch the Video](https://www.youtube.com/watch?v=CbkEWcUZ7zM) @@ -28,7 +28,7 @@ on processor architecture: ## โœ… System requirements -- Control Node (the machine you are running `ansible` commands) must have Ansible 2.11+ If you need a quick primer on Ansible [you can check out my docs and setting up Ansible](https://docs.technotim.live/posts/ansible-automation/). +- Control Node (the machine you are running `ansible` commands) must have Ansible 2.11+ If you need a quick primer on Ansible [you can check out my docs and setting up Ansible](https://technotim.live/posts/ansible-automation/). - You will also need to install collections that this playbook uses by running `ansible-galaxy collection install -r ./collections/requirements.yml` (importantโ—) @@ -101,7 +101,7 @@ scp debian@master_ip:~/.kube/config ~/.kube/config ### ๐Ÿ”จ Testing your cluster -See the commands [here](https://docs.technotim.live/posts/k3s-etcd-ansible/#testing-your-cluster). +See the commands [here](https://technotim.live/posts/k3s-etcd-ansible/#testing-your-cluster). ### Troubleshooting diff --git a/inventory/sample/group_vars/all.yml b/inventory/sample/group_vars/all.yml index 54c4423..b5016d8 100644 --- a/inventory/sample/group_vars/all.yml +++ b/inventory/sample/group_vars/all.yml @@ -55,7 +55,6 @@ metal_lb_mode: "layer2" # metal_lb_bgp_peer_address: "192.168.30.1" # image tag for metal lb -metal_lb_frr_tag_version: "v7.5.1" metal_lb_speaker_tag_version: "v0.13.9" metal_lb_controller_tag_version: "v0.13.9" diff --git a/molecule/default/overrides.yml b/molecule/default/overrides.yml index 3c47c63..4eea472 100644 --- a/molecule/default/overrides.yml +++ b/molecule/default/overrides.yml @@ -4,7 +4,8 @@ tasks: - name: Override host variables ansible.builtin.set_fact: - # See: https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant # noqa yaml[line-length] + # See: + # https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant flannel_iface: eth1 # The test VMs might be a bit slow, so we give them more time to join the cluster: diff --git a/molecule/ipv6/overrides.yml b/molecule/ipv6/overrides.yml index d701d24..44bbc07 100644 --- a/molecule/ipv6/overrides.yml +++ b/molecule/ipv6/overrides.yml @@ -4,7 +4,8 @@ tasks: - name: Override host variables (1/2) ansible.builtin.set_fact: - # See: https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant # noqa yaml[line-length] + # See: + # https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant flannel_iface: eth1 # In this scenario, we have multiple interfaces that the VIP could be diff --git a/molecule/resources/verify.yml b/molecule/resources/verify.yml index ce0cccb..ef7ea52 100644 --- a/molecule/resources/verify.yml +++ b/molecule/resources/verify.yml @@ -2,4 +2,4 @@ - name: Verify hosts: all roles: - - verify/from_outside + - verify_from_outside diff --git a/molecule/resources/verify/from_outside/defaults/main.yml b/molecule/resources/verify_from_outside/defaults/main.yml similarity index 85% rename from molecule/resources/verify/from_outside/defaults/main.yml rename to molecule/resources/verify_from_outside/defaults/main.yml index f8db768..104fda4 100644 --- a/molecule/resources/verify/from_outside/defaults/main.yml +++ b/molecule/resources/verify_from_outside/defaults/main.yml @@ -6,4 +6,4 @@ outside_host: localhost testing_namespace: molecule-verify-from-outside # The directory in which the example manifests reside -example_manifests_path: ../../../../example +example_manifests_path: ../../../example diff --git a/molecule/resources/verify/from_outside/tasks/kubecfg-cleanup.yml b/molecule/resources/verify_from_outside/tasks/kubecfg-cleanup.yml similarity index 100% rename from molecule/resources/verify/from_outside/tasks/kubecfg-cleanup.yml rename to molecule/resources/verify_from_outside/tasks/kubecfg-cleanup.yml diff --git a/molecule/resources/verify/from_outside/tasks/kubecfg-fetch.yml b/molecule/resources/verify_from_outside/tasks/kubecfg-fetch.yml similarity index 100% rename from molecule/resources/verify/from_outside/tasks/kubecfg-fetch.yml rename to molecule/resources/verify_from_outside/tasks/kubecfg-fetch.yml diff --git a/molecule/resources/verify/from_outside/tasks/main.yml b/molecule/resources/verify_from_outside/tasks/main.yml similarity index 100% rename from molecule/resources/verify/from_outside/tasks/main.yml rename to molecule/resources/verify_from_outside/tasks/main.yml diff --git a/molecule/resources/verify/from_outside/tasks/test/deploy-example.yml b/molecule/resources/verify_from_outside/tasks/test/deploy-example.yml similarity index 95% rename from molecule/resources/verify/from_outside/tasks/test/deploy-example.yml rename to molecule/resources/verify_from_outside/tasks/test/deploy-example.yml index 9248be7..61c4cec 100644 --- a/molecule/resources/verify/from_outside/tasks/test/deploy-example.yml +++ b/molecule/resources/verify_from_outside/tasks/test/deploy-example.yml @@ -34,14 +34,14 @@ - name: Assert that the nginx welcome page is available ansible.builtin.uri: - url: http://{{ ip | ansible.utils.ipwrap }}:{{ port }}/ + url: http://{{ ip | ansible.utils.ipwrap }}:{{ port_ }}/ return_content: yes register: result failed_when: "'Welcome to nginx!' not in result.content" vars: ip: >- {{ nginx_services.resources[0].status.loadBalancer.ingress[0].ip }} - port: >- + port_: >- {{ nginx_services.resources[0].spec.ports[0].port }} # Deactivated linter rules: # - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap diff --git a/molecule/resources/verify/from_outside/tasks/test/get-nodes.yml b/molecule/resources/verify_from_outside/tasks/test/get-nodes.yml similarity index 100% rename from molecule/resources/verify/from_outside/tasks/test/get-nodes.yml rename to molecule/resources/verify_from_outside/tasks/test/get-nodes.yml diff --git a/molecule/single_node/overrides.yml b/molecule/single_node/overrides.yml index 777ef4b..799275e 100644 --- a/molecule/single_node/overrides.yml +++ b/molecule/single_node/overrides.yml @@ -4,7 +4,8 @@ tasks: - name: Override host variables ansible.builtin.set_fact: - # See: https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant # noqa yaml[line-length] + # See: + # https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant flannel_iface: eth1 # The test VMs might be a bit slow, so we give them more time to join the cluster: diff --git a/reboot.sh b/reboot.sh old mode 100644 new mode 100755 diff --git a/requirements.in b/requirements.in index 151033d..715153b 100644 --- a/requirements.in +++ b/requirements.in @@ -1,5 +1,4 @@ ansible-core>=2.13.5 -ansible-lint>=6.8.6 jmespath>=1.0.1 jsonpatch>=1.32 kubernetes>=25.3.0 @@ -9,4 +8,3 @@ netaddr>=0.8.0 pre-commit>=2.20.0 pre-commit-hooks>=1.3.1 pyyaml>=6.0 -yamllint>=1.28.0 diff --git a/requirements.txt b/requirements.txt index 999a343..0604a64 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,28 +1,21 @@ # -# This file is autogenerated by pip-compile with python 3.8 -# To update, run: +# This file is autogenerated by pip-compile with Python 3.11 +# by the following command: # # pip-compile requirements.in # ansible-compat==3.0.1 # via molecule -ansible-core==2.14.5 +ansible-core==2.15.2 # via # -r requirements.in # ansible-compat - # ansible-lint -ansible-lint==6.15.0 - # via -r requirements.in arrow==1.2.3 # via jinja2-time attrs==22.1.0 # via jsonschema binaryornot==0.4.4 # via cookiecutter -black==22.10.0 - # via ansible-lint -bracex==2.3.post1 - # via wcmatch cachetools==5.2.0 # via google-auth certifi==2022.9.24 @@ -39,7 +32,6 @@ charset-normalizer==2.1.1 # via requests click==8.1.3 # via - # black # click-help-colors # cookiecutter # molecule @@ -58,9 +50,7 @@ distro==1.8.0 enrich==1.2.7 # via molecule filelock==3.8.0 - # via - # ansible-lint - # virtualenv + # via virtualenv google-auth==2.14.0 # via kubernetes identify==2.5.8 @@ -78,14 +68,13 @@ jinja2-time==0.2.0 # via cookiecutter jmespath==1.0.1 # via -r requirements.in -jsonpatch==1.32 +jsonpatch==1.33 # via -r requirements.in jsonpointer==2.3 # via jsonpatch jsonschema==4.17.0 # via # ansible-compat - # ansible-lint # molecule kubernetes==25.3.0 # via -r requirements.in @@ -97,8 +86,6 @@ molecule==4.0.4 # molecule-vagrant molecule-vagrant==1.0.0 # via -r requirements.in -mypy-extensions==0.4.3 - # via black netaddr==0.8.0 # via -r requirements.in nodeenv==1.7.0 @@ -109,16 +96,9 @@ packaging==21.3 # via # ansible-compat # ansible-core - # ansible-lint # molecule -pathspec==0.10.1 - # via - # black - # yamllint platformdirs==2.5.2 - # via - # black - # virtualenv + # via virtualenv pluggy==1.0.0 # via molecule pre-commit==2.21.0 @@ -147,18 +127,16 @@ python-slugify==6.1.2 # via cookiecutter python-vagrant==1.0.0 # via molecule-vagrant -pyyaml==6.0 +pyyaml==6.0.1 # via # -r requirements.in # ansible-compat # ansible-core - # ansible-lint # cookiecutter # kubernetes # molecule # molecule-vagrant # pre-commit - # yamllint requests==2.28.1 # via # cookiecutter @@ -170,15 +148,12 @@ resolvelib==0.8.1 # via ansible-core rich==12.6.0 # via - # ansible-lint # enrich # molecule rsa==4.9 # via google-auth ruamel-yaml==0.17.21 - # via - # ansible-lint - # pre-commit-hooks + # via pre-commit-hooks selinux==0.2.1 # via molecule-vagrant six==1.16.0 @@ -187,9 +162,7 @@ six==1.16.0 # kubernetes # python-dateutil subprocess-tee==0.4.1 - # via - # ansible-compat - # ansible-lint + # via ansible-compat text-unidecode==1.3 # via python-slugify urllib3==1.26.12 @@ -198,14 +171,8 @@ urllib3==1.26.12 # requests virtualenv==20.16.6 # via pre-commit -wcmatch==8.4.1 - # via ansible-lint websocket-client==1.4.2 # via kubernetes -yamllint==1.31.0 - # via - # -r requirements.in - # ansible-lint # The following packages are considered to be unsafe in a requirements file: # setuptools diff --git a/reset.yml b/reset.yml index 2cf6efb..02d4d89 100644 --- a/reset.yml +++ b/reset.yml @@ -1,6 +1,6 @@ --- - -- hosts: k3s_cluster +- name: Reset k3s cluster + hosts: k3s_cluster gather_facts: yes roles: - role: reset @@ -14,7 +14,8 @@ reboot: reboot_timeout: 3600 -- hosts: proxmox +- name: Revert changes to Proxmox cluster + hosts: proxmox gather_facts: true become: yes remote_user: "{{ proxmox_lxc_ssh_user }}" diff --git a/roles/k3s/master/defaults/main.yml b/roles/k3s/master/defaults/main.yml deleted file mode 100644 index 9e2fe63..0000000 --- a/roles/k3s/master/defaults/main.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -# If you want to explicitly define an interface that ALL control nodes -# should use to propagate the VIP, define it here. Otherwise, kube-vip -# will determine the right interface automatically at runtime. -kube_vip_iface: null - -server_init_args: >- - {% if groups['master'] | length > 1 %} - {% if ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] %} - --cluster-init - {% else %} - --server https://{{ hostvars[groups['master'][0]].k3s_node_ip | split(",") | first | ansible.utils.ipwrap }}:6443 - {% endif %} - --token {{ k3s_token }} - {% endif %} - {{ extra_server_args | default('') }} diff --git a/roles/k3s/node/defaults/main.yml b/roles/k3s/node/defaults/main.yml new file mode 100644 index 0000000..a07af66 --- /dev/null +++ b/roles/k3s/node/defaults/main.yml @@ -0,0 +1,3 @@ +--- +# Name of the master group +group_name_master: master diff --git a/roles/k3s/node/tasks/main.yml b/roles/k3s_agent/tasks/main.yml similarity index 100% rename from roles/k3s/node/tasks/main.yml rename to roles/k3s_agent/tasks/main.yml diff --git a/roles/k3s/node/templates/k3s.service.j2 b/roles/k3s_agent/templates/k3s.service.j2 similarity index 85% rename from roles/k3s/node/templates/k3s.service.j2 rename to roles/k3s_agent/templates/k3s.service.j2 index 67abadb..3be92e3 100644 --- a/roles/k3s/node/templates/k3s.service.j2 +++ b/roles/k3s_agent/templates/k3s.service.j2 @@ -7,7 +7,7 @@ After=network-online.target Type=notify ExecStartPre=-/sbin/modprobe br_netfilter ExecStartPre=-/sbin/modprobe overlay -ExecStart=/usr/local/bin/k3s agent --server https://{{ apiserver_endpoint | ansible.utils.ipwrap }}:6443 --token {{ hostvars[groups['master'][0]]['token'] | default(k3s_token) }} {{ extra_agent_args | default("") }} +ExecStart=/usr/local/bin/k3s agent --server https://{{ apiserver_endpoint | ansible.utils.ipwrap }}:6443 --token {{ hostvars[groups[group_name_master | default('master')][0]]['token'] | default(k3s_token) }} {{ extra_agent_args | default("") }} KillMode=process Delegate=yes # Having non-zero Limit*s causes performance problems due to accounting overhead diff --git a/roles/k3s_server/defaults/main.yml b/roles/k3s_server/defaults/main.yml new file mode 100644 index 0000000..46f1528 --- /dev/null +++ b/roles/k3s_server/defaults/main.yml @@ -0,0 +1,20 @@ +--- +# If you want to explicitly define an interface that ALL control nodes +# should use to propagate the VIP, define it here. Otherwise, kube-vip +# will determine the right interface automatically at runtime. +kube_vip_iface: null + +# Name of the master group +group_name_master: master + +# yamllint disable rule:line-length +server_init_args: >- + {% if groups[group_name_master | default('master')] | length > 1 %} + {% if ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] %} + --cluster-init + {% else %} + --server https://{{ hostvars[groups[group_name_master | default('master')][0]].k3s_node_ip | split(",") | first | ansible.utils.ipwrap }}:6443 + {% endif %} + --token {{ k3s_token }} + {% endif %} + {{ extra_server_args | default('') }} diff --git a/roles/k3s/master/tasks/fetch_k3s_init_logs.yml b/roles/k3s_server/tasks/fetch_k3s_init_logs.yml similarity index 100% rename from roles/k3s/master/tasks/fetch_k3s_init_logs.yml rename to roles/k3s_server/tasks/fetch_k3s_init_logs.yml diff --git a/roles/k3s/master/tasks/main.yml b/roles/k3s_server/tasks/main.yml similarity index 93% rename from roles/k3s/master/tasks/main.yml rename to roles/k3s_server/tasks/main.yml index 76b910c..0a8c4b5 100644 --- a/roles/k3s/master/tasks/main.yml +++ b/roles/k3s_server/tasks/main.yml @@ -1,17 +1,16 @@ --- -- name: Clean previous runs of k3s-init +- name: Stop k3s-init systemd: name: k3s-init state: stopped failed_when: false -- name: Clean previous runs of k3s-init +- name: Clean previous runs of k3s-init # noqa command-instead-of-module + # The systemd module does not support "reset-failed", so we need to resort to command. command: systemctl reset-failed k3s-init failed_when: false changed_when: false - args: - warn: false # The ansible systemd module does not support reset-failed - name: Deploy vip manifest include_tasks: vip.yml @@ -28,12 +27,13 @@ creates: "{{ systemd_dir }}/k3s.service" - name: Verification + when: not ansible_check_mode block: - name: Verify that all nodes actually joined (check k3s-init.service if this fails) command: cmd: k3s kubectl get nodes -l "node-role.kubernetes.io/master=true" -o=jsonpath="{.items[*].metadata.name}" register: nodes - until: nodes.rc == 0 and (nodes.stdout.split() | length) == (groups['master'] | length) + until: nodes.rc == 0 and (nodes.stdout.split() | length) == (groups[group_name_master | default('master')] | length) # yamllint disable-line rule:line-length retries: "{{ retry_count | default(20) }}" delay: 10 changed_when: false @@ -49,7 +49,6 @@ name: k3s-init state: stopped failed_when: false - when: not ansible_check_mode - name: Copy K3s service file register: k3s_service diff --git a/roles/k3s/master/tasks/metallb.yml b/roles/k3s_server/tasks/metallb.yml similarity index 68% rename from roles/k3s/master/tasks/metallb.yml rename to roles/k3s_server/tasks/metallb.yml index 917b4a8..10ff6b5 100644 --- a/roles/k3s/master/tasks/metallb.yml +++ b/roles/k3s_server/tasks/metallb.yml @@ -6,16 +6,16 @@ owner: root group: root mode: 0644 - when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] + when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] - name: "Download to first master: manifest for metallb-{{ metal_lb_type }}" ansible.builtin.get_url: - url: "https://raw.githubusercontent.com/metallb/metallb/{{ metal_lb_controller_tag_version }}/config/manifests/metallb-{{metal_lb_type}}.yaml" # noqa yaml[line-length] + url: "https://raw.githubusercontent.com/metallb/metallb/{{ metal_lb_controller_tag_version }}/config/manifests/metallb-{{ metal_lb_type }}.yaml" # noqa yaml[line-length] dest: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml" owner: root group: root mode: 0644 - when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] + when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] - name: Set image versions in manifest for metallb-{{ metal_lb_type }} ansible.builtin.replace: @@ -27,4 +27,4 @@ to: "metallb/speaker:{{ metal_lb_speaker_tag_version }}" loop_control: label: "{{ item.change }} => {{ item.to }}" - when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] + when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] diff --git a/roles/k3s/master/tasks/vip.yml b/roles/k3s_server/tasks/vip.yml similarity index 67% rename from roles/k3s/master/tasks/vip.yml rename to roles/k3s_server/tasks/vip.yml index dcdc039..88d4383 100644 --- a/roles/k3s/master/tasks/vip.yml +++ b/roles/k3s_server/tasks/vip.yml @@ -6,7 +6,7 @@ owner: root group: root mode: 0644 - when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] + when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] - name: Download vip rbac manifest to first master ansible.builtin.get_url: @@ -15,7 +15,7 @@ owner: root group: root mode: 0644 - when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] + when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] - name: Copy vip manifest to first master template: @@ -24,4 +24,4 @@ owner: root group: root mode: 0644 - when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] + when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] diff --git a/roles/k3s/master/templates/content.j2 b/roles/k3s_server/templates/content.j2 similarity index 100% rename from roles/k3s/master/templates/content.j2 rename to roles/k3s_server/templates/content.j2 diff --git a/roles/k3s/master/templates/k3s.service.j2 b/roles/k3s_server/templates/k3s.service.j2 similarity index 100% rename from roles/k3s/master/templates/k3s.service.j2 rename to roles/k3s_server/templates/k3s.service.j2 diff --git a/roles/k3s/master/templates/vip.yaml.j2 b/roles/k3s_server/templates/vip.yaml.j2 similarity index 100% rename from roles/k3s/master/templates/vip.yaml.j2 rename to roles/k3s_server/templates/vip.yaml.j2 diff --git a/roles/k3s/post/defaults/main.yml b/roles/k3s_server_post/defaults/main.yml similarity index 61% rename from roles/k3s/post/defaults/main.yml rename to roles/k3s_server_post/defaults/main.yml index a20f9cf..1c458fa 100644 --- a/roles/k3s/post/defaults/main.yml +++ b/roles/k3s_server_post/defaults/main.yml @@ -1,3 +1,6 @@ --- # Timeout to wait for MetalLB services to come up metal_lb_available_timeout: 120s + +# Name of the master group +group_name_master: master diff --git a/roles/k3s/post/tasks/main.yml b/roles/k3s_server_post/tasks/main.yml similarity index 100% rename from roles/k3s/post/tasks/main.yml rename to roles/k3s_server_post/tasks/main.yml diff --git a/roles/k3s/post/tasks/metallb.yml b/roles/k3s_server_post/tasks/metallb.yml similarity index 91% rename from roles/k3s/post/tasks/metallb.yml rename to roles/k3s_server_post/tasks/metallb.yml index 9a6454e..2421947 100644 --- a/roles/k3s/post/tasks/metallb.yml +++ b/roles/k3s_server_post/tasks/metallb.yml @@ -5,7 +5,7 @@ state: directory owner: "{{ ansible_user_id }}" mode: 0755 - with_items: "{{ groups['master'] }}" + with_items: "{{ groups[group_name_master | default('master')] }}" run_once: true - name: Copy metallb CRs manifest to first master @@ -14,14 +14,14 @@ dest: "/tmp/k3s/metallb-crs.yaml" owner: "{{ ansible_user_id }}" mode: 0755 - with_items: "{{ groups['master'] }}" + with_items: "{{ groups[group_name_master | default('master')] }}" run_once: true - name: Test metallb-system namespace command: >- k3s kubectl -n metallb-system changed_when: false - with_items: "{{ groups['master'] }}" + with_items: "{{ groups[group_name_master | default('master')] }}" run_once: true - name: Wait for MetalLB resources @@ -66,7 +66,7 @@ command: >- k3s kubectl -n metallb-system get endpoints webhook-service changed_when: false - with_items: "{{ groups['master'] }}" + with_items: "{{ groups[group_name_master | default('master')] }}" run_once: true - name: Apply metallb CRs diff --git a/roles/k3s/post/templates/metallb.crs.j2 b/roles/k3s_server_post/templates/metallb.crs.j2 similarity index 100% rename from roles/k3s/post/templates/metallb.crs.j2 rename to roles/k3s_server_post/templates/metallb.crs.j2 diff --git a/roles/lxc/handlers/main.yml b/roles/lxc/handlers/main.yml index 20013cc..7d73985 100644 --- a/roles/lxc/handlers/main.yml +++ b/roles/lxc/handlers/main.yml @@ -1,4 +1,4 @@ --- -- name: reboot server +- name: Reboot server become: true reboot: diff --git a/roles/prereq/tasks/main.yml b/roles/prereq/tasks/main.yml index dcab613..b85ae0d 100644 --- a/roles/prereq/tasks/main.yml +++ b/roles/prereq/tasks/main.yml @@ -1,30 +1,30 @@ --- - name: Set same timezone on every Server - timezone: + community.general.timezone: name: "{{ system_timezone }}" when: (system_timezone is defined) and (system_timezone != "Your/Timezone") - name: Set SELinux to disabled state - selinux: + ansible.posix.selinux: state: disabled when: ansible_os_family == "RedHat" - name: Enable IPv4 forwarding - sysctl: + ansible.posix.sysctl: name: net.ipv4.ip_forward value: "1" state: present reload: yes - name: Enable IPv6 forwarding - sysctl: + ansible.posix.sysctl: name: net.ipv6.conf.all.forwarding value: "1" state: present reload: yes - name: Enable IPv6 router advertisements - sysctl: + ansible.posix.sysctl: name: net.ipv6.conf.all.accept_ra value: "2" state: present @@ -38,13 +38,13 @@ when: ansible_os_family == "RedHat" - name: Load br_netfilter - modprobe: + community.general.modprobe: name: br_netfilter state: present when: ansible_os_family == "RedHat" - name: Set bridge-nf-call-iptables (just to be sure) - sysctl: + ansible.posix.sysctl: name: "{{ item }}" value: "1" state: present diff --git a/roles/proxmox_lxc/handlers/main.yml b/roles/proxmox_lxc/handlers/main.yml index 9b99cb2..565c882 100644 --- a/roles/proxmox_lxc/handlers/main.yml +++ b/roles/proxmox_lxc/handlers/main.yml @@ -1,5 +1,11 @@ --- -- name: reboot containers - command: - "pct reboot {{ item }}" - loop: "{{ proxmox_lxc_filtered_ids }}" +- name: Reboot containers + block: + - name: Get container ids from filtered files + set_fact: + proxmox_lxc_filtered_ids: >- + {{ proxmox_lxc_filtered_files | map("split", "/") | map("last") | map("split", ".") | map("first") }} + - name: Reboot container + command: "pct reboot {{ item }}" + loop: "{{ proxmox_lxc_filtered_ids }}" + changed_when: true diff --git a/roles/proxmox_lxc/tasks/main.yml b/roles/proxmox_lxc/tasks/main.yml index 76d43a0..8ca1b3d 100644 --- a/roles/proxmox_lxc/tasks/main.yml +++ b/roles/proxmox_lxc/tasks/main.yml @@ -1,21 +1,15 @@ --- -- name: check for container files that exist on this host +- name: Check for container files that exist on this host stat: path: "/etc/pve/lxc/{{ item }}.conf" loop: "{{ proxmox_lxc_ct_ids }}" register: stat_results -- name: filter out files that do not exist +- name: Filter out files that do not exist set_fact: proxmox_lxc_filtered_files: '{{ stat_results.results | rejectattr("stat.exists", "false") | map(attribute="stat.path") }}' -# used for the reboot handler -- name: get container ids from filtered files - set_fact: - proxmox_lxc_filtered_ids: - '{{ proxmox_lxc_filtered_files | map("split", "/") | map("last") | map("split", ".") | map("first") }}' - # https://gist.github.com/triangletodd/02f595cd4c0dc9aac5f7763ca2264185 - name: Ensure lxc config has the right apparmor profile lineinfile: diff --git a/roles/raspberrypi/handlers/main.yml b/roles/raspberrypi/handlers/main.yml index d25cf90..ac385a7 100644 --- a/roles/raspberrypi/handlers/main.yml +++ b/roles/raspberrypi/handlers/main.yml @@ -1,3 +1,3 @@ --- -- name: reboot +- name: Reboot reboot: diff --git a/roles/raspberrypi/tasks/main.yml b/roles/raspberrypi/tasks/main.yml index 50c4af4..29f824a 100644 --- a/roles/raspberrypi/tasks/main.yml +++ b/roles/raspberrypi/tasks/main.yml @@ -47,20 +47,16 @@ - raspberry_pi|default(false) - ansible_facts.lsb.description|default("") is match("Debian.*bullseye") -- name: execute OS related tasks on the Raspberry Pi - {{ action }} +- name: Execute OS related tasks on the Raspberry Pi - {{ action_ }} include_tasks: "{{ item }}" with_first_found: - - "{{ action }}/{{ detected_distribution }}-{{ detected_distribution_major_version }}.yml" - - "{{ action }}/{{ detected_distribution }}.yml" - - "{{ action }}/{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml" - - "{{ action }}/{{ ansible_distribution }}.yml" - - "{{ action }}/default.yml" + - "{{ action_ }}/{{ detected_distribution }}-{{ detected_distribution_major_version }}.yml" + - "{{ action_ }}/{{ detected_distribution }}.yml" + - "{{ action_ }}/{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml" + - "{{ action_ }}/{{ ansible_distribution }}.yml" + - "{{ action_ }}/default.yml" vars: - action: >- - {% if state == "present" -%} - setup - {%- else -%} - teardown - {%- endif %} + action_: >- + {% if state == "present" %}setup{% else %}teardown{% endif %} when: - raspberry_pi|default(false) diff --git a/roles/raspberrypi/tasks/setup/Raspbian.yml b/roles/raspberrypi/tasks/setup/Raspbian.yml index 371a255..03fd943 100644 --- a/roles/raspberrypi/tasks/setup/Raspbian.yml +++ b/roles/raspberrypi/tasks/setup/Raspbian.yml @@ -8,20 +8,22 @@ notify: reboot - name: Install iptables - apt: name=iptables state=present + apt: + name: iptables + state: present - name: Flush iptables before changing to iptables-legacy iptables: flush: true - name: Changing to iptables-legacy - alternatives: + community.general.alternatives: path: /usr/sbin/iptables-legacy name: iptables register: ip4_legacy - name: Changing to ip6tables-legacy - alternatives: + community.general.alternatives: path: /usr/sbin/ip6tables-legacy name: ip6tables register: ip6_legacy diff --git a/roles/reset/tasks/umount_with_children.yml b/roles/reset/tasks/umount_with_children.yml index 5883b70..e0f9d5c 100644 --- a/roles/reset/tasks/umount_with_children.yml +++ b/roles/reset/tasks/umount_with_children.yml @@ -9,7 +9,7 @@ check_mode: false - name: Umount filesystem - mount: + ansible.posix.mount: path: "{{ item }}" state: unmounted with_items: diff --git a/roles/reset_proxmox_lxc/handlers/main.yml b/roles/reset_proxmox_lxc/handlers/main.yml deleted file mode 100644 index 9b99cb2..0000000 --- a/roles/reset_proxmox_lxc/handlers/main.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -- name: reboot containers - command: - "pct reboot {{ item }}" - loop: "{{ proxmox_lxc_filtered_ids }}" diff --git a/roles/reset_proxmox_lxc/handlers/main.yml b/roles/reset_proxmox_lxc/handlers/main.yml new file mode 120000 index 0000000..7f79c4b --- /dev/null +++ b/roles/reset_proxmox_lxc/handlers/main.yml @@ -0,0 +1 @@ +../../proxmox_lxc/handlers/main.yml \ No newline at end of file diff --git a/roles/reset_proxmox_lxc/tasks/main.yml b/roles/reset_proxmox_lxc/tasks/main.yml index d9f402d..74036b3 100644 --- a/roles/reset_proxmox_lxc/tasks/main.yml +++ b/roles/reset_proxmox_lxc/tasks/main.yml @@ -1,21 +1,15 @@ --- -- name: check for container files that exist on this host +- name: Check for container files that exist on this host stat: path: "/etc/pve/lxc/{{ item }}.conf" loop: "{{ proxmox_lxc_ct_ids }}" register: stat_results -- name: filter out files that do not exist +- name: Filter out files that do not exist set_fact: proxmox_lxc_filtered_files: '{{ stat_results.results | rejectattr("stat.exists", "false") | map(attribute="stat.path") }}' -# used for the reboot handler -- name: get container ids from filtered files - set_fact: - proxmox_lxc_filtered_ids: - '{{ proxmox_lxc_filtered_files | map("split", "/") | map("last") | map("split", ".") | map("first") }}' - - name: Remove LXC apparmor profile lineinfile: dest: "{{ item }}" diff --git a/site.yml b/site.yml index 5104cce..8f24982 100644 --- a/site.yml +++ b/site.yml @@ -1,13 +1,14 @@ --- - -- hosts: proxmox +- name: Prepare Proxmox cluster + hosts: proxmox gather_facts: true become: yes roles: - role: proxmox_lxc when: proxmox_lxc_configure -- hosts: k3s_cluster +- name: Prepare k3s nodes + hosts: k3s_cluster gather_facts: yes roles: - role: lxc @@ -20,17 +21,20 @@ - role: raspberrypi become: true -- hosts: master +- name: Setup k3s servers + hosts: master roles: - - role: k3s/master + - role: k3s_server become: true -- hosts: node +- name: Setup k3s agents + hosts: node roles: - - role: k3s/node + - role: k3s_agent become: true -- hosts: master +- name: Configure k3s cluster + hosts: master roles: - - role: k3s/post + - role: k3s_server_post become: true