diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index c25f6cf4..5ca606fd 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -71,6 +71,7 @@ jobs: - name: Test with molecule run: molecule test --scenario-name ${{ matrix.scenario }} + timeout-minutes: 90 env: ANSIBLE_K3S_LOG_DIR: ${{ runner.temp }}/logs/k3s-ansible/${{ matrix.scenario }} ANSIBLE_SSH_RETRIES: 4 diff --git a/README.md b/README.md index e6df6841..ceff956d 100644 --- a/README.md +++ b/README.md @@ -28,7 +28,7 @@ on processor architecture: ## ✅ System requirements -- Deployment environment must have Ansible 2.4.0+. If you need a quick primer on Ansible [you can check out my docs and setting up Ansible](https://docs.technotim.live/posts/ansible-automation/). +- Control Node (the machine you are running `ansible` commands) must have Ansible 2.11+ If you need a quick primer on Ansible [you can check out my docs and setting up Ansible](https://docs.technotim.live/posts/ansible-automation/). - You will also need to install collections that this playbook uses by running `ansible-galaxy collection install -r ./collections/requirements.yml` (important❗) diff --git a/molecule/ipv6/host_vars/control2.yml b/molecule/ipv6/host_vars/control2.yml new file mode 100644 index 00000000..97fbc81a --- /dev/null +++ b/molecule/ipv6/host_vars/control2.yml @@ -0,0 +1,3 @@ +--- +node_ipv4: 192.168.123.12 +node_ipv6: fdad:bad:ba55::de:12 diff --git a/molecule/ipv6/molecule.yml b/molecule/ipv6/molecule.yml index d6935cb7..2ad64234 100644 --- a/molecule/ipv6/molecule.yml +++ b/molecule/ipv6/molecule.yml @@ -4,7 +4,6 @@ dependency: driver: name: vagrant platforms: - - name: control1 box: generic/ubuntu2204 memory: 2048 @@ -21,6 +20,22 @@ platforms: ssh.username: "vagrant" ssh.password: "vagrant" + - name: control2 + box: generic/ubuntu2204 + memory: 2048 + cpus: 2 + groups: + - k3s_cluster + - master + interfaces: + - network_name: private_network + ip: fdad:bad:ba55::de:12 + config_options: + # We currently can not use public-key based authentication on Ubuntu 22.04, + # see: https://github.com/chef/bento/issues/1405 + ssh.username: "vagrant" + ssh.password: "vagrant" + - name: node1 box: generic/ubuntu2204 memory: 2048 diff --git a/molecule/ipv6/overrides.yml b/molecule/ipv6/overrides.yml index fea629ea..d701d24c 100644 --- a/molecule/ipv6/overrides.yml +++ b/molecule/ipv6/overrides.yml @@ -7,6 +7,11 @@ # See: https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant # noqa yaml[line-length] flannel_iface: eth1 + # In this scenario, we have multiple interfaces that the VIP could be + # broadcasted on. Since we have assigned a dedicated private network + # here, let's make sure that it is used. + kube_vip_iface: eth1 + # The test VMs might be a bit slow, so we give them more time to join the cluster: retry_count: 45 diff --git a/requirements.txt b/requirements.txt index 297c4fa7..1027404d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,15 +5,13 @@ # pip-compile requirements.in # ansible-compat==3.0.1 - # via - # ansible-lint - # molecule -ansible-core==2.14.2 + # via molecule +ansible-core==2.14.3 # via # -r requirements.in # ansible-compat # ansible-lint -ansible-lint==6.12.1 +ansible-lint==6.14.2 # via -r requirements.in arrow==1.2.3 # via jinja2-time @@ -189,7 +187,9 @@ six==1.16.0 # kubernetes # python-dateutil subprocess-tee==0.4.1 - # via ansible-compat + # via + # ansible-compat + # ansible-lint text-unidecode==1.3 # via python-slugify urllib3==1.26.12 diff --git a/reset.yml b/reset.yml index 18846bb9..2cf6efbd 100644 --- a/reset.yml +++ b/reset.yml @@ -13,3 +13,11 @@ become: true reboot: reboot_timeout: 3600 + +- hosts: proxmox + gather_facts: true + become: yes + remote_user: "{{ proxmox_lxc_ssh_user }}" + roles: + - role: reset_proxmox_lxc + when: proxmox_lxc_configure diff --git a/roles/k3s/master/defaults/main.yml b/roles/k3s/master/defaults/main.yml index 24e4a6a9..9e2fe633 100644 --- a/roles/k3s/master/defaults/main.yml +++ b/roles/k3s/master/defaults/main.yml @@ -1,10 +1,15 @@ --- +# If you want to explicitly define an interface that ALL control nodes +# should use to propagate the VIP, define it here. Otherwise, kube-vip +# will determine the right interface automatically at runtime. +kube_vip_iface: null + server_init_args: >- {% if groups['master'] | length > 1 %} {% if ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] %} --cluster-init {% else %} - --server https://{{ hostvars[groups['master'][0]].k3s_node_ip }}:6443 + --server https://{{ hostvars[groups['master'][0]].k3s_node_ip | split(",") | first | ansible.utils.ipwrap }}:6443 {% endif %} --token {{ k3s_token }} {% endif %} diff --git a/roles/k3s/master/tasks/metallb.yml b/roles/k3s/master/tasks/metallb.yml index fa1089fb..917b4a82 100644 --- a/roles/k3s/master/tasks/metallb.yml +++ b/roles/k3s/master/tasks/metallb.yml @@ -8,25 +8,16 @@ mode: 0644 when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] -- name: Download metallb manifest to first master +- name: "Download to first master: manifest for metallb-{{ metal_lb_type }}" ansible.builtin.get_url: - url: "https://raw.githubusercontent.com/metallb/metallb/{{ metal_lb_controller_tag_version }}/config/manifests/metallb-native.yaml" # noqa yaml[line-length] + url: "https://raw.githubusercontent.com/metallb/metallb/{{ metal_lb_controller_tag_version }}/config/manifests/metallb-{{metal_lb_type}}.yaml" # noqa yaml[line-length] dest: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml" owner: root group: root mode: 0644 - when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] and metal_lb_type == "native" + when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] -- name: Download metallb-frr manifest to first master - ansible.builtin.get_url: - url: "https://raw.githubusercontent.com/metallb/metallb/{{ metal_lb_controller_tag_version }}/config/manifests/metallb-frr.yaml" # noqa yaml[line-length] - dest: "/var/lib/rancher/k3s/server/manifests/metallb-frr-crds.yaml" - owner: root - group: root - mode: 0644 - when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] and metal_lb_type == "frr" - -- name: Set image versions for metallb manifest +- name: Set image versions in manifest for metallb-{{ metal_lb_type }} ansible.builtin.replace: path: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml" regexp: "{{ item.change | ansible.builtin.regex_escape }}" @@ -36,16 +27,4 @@ to: "metallb/speaker:{{ metal_lb_speaker_tag_version }}" loop_control: label: "{{ item.change }} => {{ item.to }}" - when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] and metal_lb_type == "native" - -- name: Set image versions for metallb-frr manifest - ansible.builtin.replace: - path: "/var/lib/rancher/k3s/server/manifests/metallb-frr-crds.yaml" - regexp: "{{ item.change | ansible.builtin.regex_escape }}" - replace: "{{ item.to }}" - with_items: - - change: "metallb/speaker:{{ metal_lb_controller_tag_version }}" - to: "metallb/speaker:{{ metal_lb_speaker_tag_version }}" - loop_control: - label: "{{ item.change }} => {{ item.to }}" - when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] and metal_lb_type == "frr" + when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] diff --git a/roles/k3s/master/templates/vip.yaml.j2 b/roles/k3s/master/templates/vip.yaml.j2 index 2629398d..862aee64 100644 --- a/roles/k3s/master/templates/vip.yaml.j2 +++ b/roles/k3s/master/templates/vip.yaml.j2 @@ -30,8 +30,10 @@ spec: value: "true" - name: port value: "6443" +{% if kube_vip_iface %} - name: vip_interface - value: {{ flannel_iface }} + value: {{ kube_vip_iface }} +{% endif %} - name: vip_cidr value: "{{ apiserver_endpoint | ansible.utils.ipsubnet | ansible.utils.ipaddr('prefix') }}" - name: cp_enable diff --git a/roles/lxc/handlers/main.yml b/roles/lxc/handlers/main.yml index 1bba5c88..20013cc5 100644 --- a/roles/lxc/handlers/main.yml +++ b/roles/lxc/handlers/main.yml @@ -1,3 +1,4 @@ --- - name: reboot server + become: true reboot: diff --git a/roles/lxc/tasks/main.yml b/roles/lxc/tasks/main.yml index d47200a5..5a792a4f 100644 --- a/roles/lxc/tasks/main.yml +++ b/roles/lxc/tasks/main.yml @@ -1,7 +1,21 @@ --- -- name: configure rc.local for proxmox lxc containers - copy: - src: "{{ playbook_dir }}/scripts/rc.local" - dest: "/etc/rc.local" +- name: Check for rc.local file + stat: + path: /etc/rc.local + register: rcfile + +- name: Create rc.local if needed + lineinfile: + path: /etc/rc.local + line: "#!/bin/sh -e" + create: true + insertbefore: BOF mode: "u=rwx,g=rx,o=rx" + when: not rcfile.stat.exists + +- name: Write rc.local file + blockinfile: + path: /etc/rc.local + content: "{{ lookup('template', 'templates/rc.local.j2') }}" + state: present notify: reboot server diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index 537839d4..b9cdd6ed 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -54,3 +54,31 @@ file: path: /tmp/k3s state: absent + +- name: Check if rc.local exists + stat: + path: /etc/rc.local + register: rcfile + +- name: Remove rc.local modifications for proxmox lxc containers + become: true + blockinfile: + path: /etc/rc.local + content: "{{ lookup('template', 'templates/rc.local.j2') }}" + create: false + state: absent + when: proxmox_lxc_configure and rclocal.stat.exists + +- name: Check rc.local for cleanup + become: true + slurp: + src: /etc/rc.local + register: rcslurp + when: proxmox_lxc_configure and rclocal.stat.exists + +- name: Cleanup rc.local if we only have a Shebang line + become: true + file: + path: /etc/rc.local + state: absent + when: proxmox_lxc_configure and rclocal.stat.exists and ((rcslurp.content | b64decode).splitlines() | length) <= 1 diff --git a/roles/reset_proxmox_lxc/handlers/main.yml b/roles/reset_proxmox_lxc/handlers/main.yml new file mode 100644 index 00000000..9b99cb2f --- /dev/null +++ b/roles/reset_proxmox_lxc/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: reboot containers + command: + "pct reboot {{ item }}" + loop: "{{ proxmox_lxc_filtered_ids }}" diff --git a/roles/reset_proxmox_lxc/tasks/main.yml b/roles/reset_proxmox_lxc/tasks/main.yml new file mode 100644 index 00000000..d9f402d9 --- /dev/null +++ b/roles/reset_proxmox_lxc/tasks/main.yml @@ -0,0 +1,53 @@ +--- +- name: check for container files that exist on this host + stat: + path: "/etc/pve/lxc/{{ item }}.conf" + loop: "{{ proxmox_lxc_ct_ids }}" + register: stat_results + +- name: filter out files that do not exist + set_fact: + proxmox_lxc_filtered_files: + '{{ stat_results.results | rejectattr("stat.exists", "false") | map(attribute="stat.path") }}' + +# used for the reboot handler +- name: get container ids from filtered files + set_fact: + proxmox_lxc_filtered_ids: + '{{ proxmox_lxc_filtered_files | map("split", "/") | map("last") | map("split", ".") | map("first") }}' + +- name: Remove LXC apparmor profile + lineinfile: + dest: "{{ item }}" + regexp: "^lxc.apparmor.profile" + line: "lxc.apparmor.profile: unconfined" + state: absent + loop: "{{ proxmox_lxc_filtered_files }}" + notify: reboot containers + +- name: Remove lxc cgroups + lineinfile: + dest: "{{ item }}" + regexp: "^lxc.cgroup.devices.allow" + line: "lxc.cgroup.devices.allow: a" + state: absent + loop: "{{ proxmox_lxc_filtered_files }}" + notify: reboot containers + +- name: Remove lxc cap drop + lineinfile: + dest: "{{ item }}" + regexp: "^lxc.cap.drop" + line: "lxc.cap.drop: " + state: absent + loop: "{{ proxmox_lxc_filtered_files }}" + notify: reboot containers + +- name: Remove lxc mounts + lineinfile: + dest: "{{ item }}" + regexp: "^lxc.mount.auto" + line: 'lxc.mount.auto: "proc:rw sys:rw"' + state: absent + loop: "{{ proxmox_lxc_filtered_files }}" + notify: reboot containers diff --git a/site.yml b/site.yml index f6f0b092..d8e4325f 100644 --- a/site.yml +++ b/site.yml @@ -12,6 +12,7 @@ gather_facts: yes roles: - role: lxc + become: true when: proxmox_lxc_configure - role: prereq become: true diff --git a/scripts/rc.local b/templates/rc.local.j2 similarity index 95% rename from scripts/rc.local rename to templates/rc.local.j2 index daa6c778..16ca666e 100644 --- a/scripts/rc.local +++ b/templates/rc.local.j2 @@ -1,5 +1,3 @@ -#!/bin/sh -e - # Kubeadm 1.15 needs /dev/kmsg to be there, but it's not in lxc, but we can just use /dev/console instead # see: https://github.com/kubernetes-sigs/kind/issues/662 if [ ! -e /dev/kmsg ]; then