forked from tim/k3s-ansible
Merge branch 'master' into k3s-1-26
This commit is contained in:
1
.github/workflows/test.yml
vendored
1
.github/workflows/test.yml
vendored
@@ -71,6 +71,7 @@ jobs:
|
||||
|
||||
- name: Test with molecule
|
||||
run: molecule test --scenario-name ${{ matrix.scenario }}
|
||||
timeout-minutes: 90
|
||||
env:
|
||||
ANSIBLE_K3S_LOG_DIR: ${{ runner.temp }}/logs/k3s-ansible/${{ matrix.scenario }}
|
||||
ANSIBLE_SSH_RETRIES: 4
|
||||
|
||||
@@ -28,7 +28,7 @@ on processor architecture:
|
||||
|
||||
## ✅ System requirements
|
||||
|
||||
- Deployment environment must have Ansible 2.4.0+. If you need a quick primer on Ansible [you can check out my docs and setting up Ansible](https://docs.technotim.live/posts/ansible-automation/).
|
||||
- Control Node (the machine you are running `ansible` commands) must have Ansible 2.11+ If you need a quick primer on Ansible [you can check out my docs and setting up Ansible](https://docs.technotim.live/posts/ansible-automation/).
|
||||
|
||||
- You will also need to install collections that this playbook uses by running `ansible-galaxy collection install -r ./collections/requirements.yml` (important❗)
|
||||
|
||||
|
||||
3
molecule/ipv6/host_vars/control2.yml
Normal file
3
molecule/ipv6/host_vars/control2.yml
Normal file
@@ -0,0 +1,3 @@
|
||||
---
|
||||
node_ipv4: 192.168.123.12
|
||||
node_ipv6: fdad:bad:ba55::de:12
|
||||
@@ -4,7 +4,6 @@ dependency:
|
||||
driver:
|
||||
name: vagrant
|
||||
platforms:
|
||||
|
||||
- name: control1
|
||||
box: generic/ubuntu2204
|
||||
memory: 2048
|
||||
@@ -21,6 +20,22 @@ platforms:
|
||||
ssh.username: "vagrant"
|
||||
ssh.password: "vagrant"
|
||||
|
||||
- name: control2
|
||||
box: generic/ubuntu2204
|
||||
memory: 2048
|
||||
cpus: 2
|
||||
groups:
|
||||
- k3s_cluster
|
||||
- master
|
||||
interfaces:
|
||||
- network_name: private_network
|
||||
ip: fdad:bad:ba55::de:12
|
||||
config_options:
|
||||
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||
# see: https://github.com/chef/bento/issues/1405
|
||||
ssh.username: "vagrant"
|
||||
ssh.password: "vagrant"
|
||||
|
||||
- name: node1
|
||||
box: generic/ubuntu2204
|
||||
memory: 2048
|
||||
|
||||
@@ -7,6 +7,11 @@
|
||||
# See: https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant # noqa yaml[line-length]
|
||||
flannel_iface: eth1
|
||||
|
||||
# In this scenario, we have multiple interfaces that the VIP could be
|
||||
# broadcasted on. Since we have assigned a dedicated private network
|
||||
# here, let's make sure that it is used.
|
||||
kube_vip_iface: eth1
|
||||
|
||||
# The test VMs might be a bit slow, so we give them more time to join the cluster:
|
||||
retry_count: 45
|
||||
|
||||
|
||||
@@ -5,15 +5,13 @@
|
||||
# pip-compile requirements.in
|
||||
#
|
||||
ansible-compat==3.0.1
|
||||
# via
|
||||
# ansible-lint
|
||||
# molecule
|
||||
ansible-core==2.14.2
|
||||
# via molecule
|
||||
ansible-core==2.14.3
|
||||
# via
|
||||
# -r requirements.in
|
||||
# ansible-compat
|
||||
# ansible-lint
|
||||
ansible-lint==6.12.1
|
||||
ansible-lint==6.14.2
|
||||
# via -r requirements.in
|
||||
arrow==1.2.3
|
||||
# via jinja2-time
|
||||
@@ -189,7 +187,9 @@ six==1.16.0
|
||||
# kubernetes
|
||||
# python-dateutil
|
||||
subprocess-tee==0.4.1
|
||||
# via ansible-compat
|
||||
# via
|
||||
# ansible-compat
|
||||
# ansible-lint
|
||||
text-unidecode==1.3
|
||||
# via python-slugify
|
||||
urllib3==1.26.12
|
||||
|
||||
@@ -13,3 +13,11 @@
|
||||
become: true
|
||||
reboot:
|
||||
reboot_timeout: 3600
|
||||
|
||||
- hosts: proxmox
|
||||
gather_facts: true
|
||||
become: yes
|
||||
remote_user: "{{ proxmox_lxc_ssh_user }}"
|
||||
roles:
|
||||
- role: reset_proxmox_lxc
|
||||
when: proxmox_lxc_configure
|
||||
|
||||
@@ -1,10 +1,15 @@
|
||||
---
|
||||
# If you want to explicitly define an interface that ALL control nodes
|
||||
# should use to propagate the VIP, define it here. Otherwise, kube-vip
|
||||
# will determine the right interface automatically at runtime.
|
||||
kube_vip_iface: null
|
||||
|
||||
server_init_args: >-
|
||||
{% if groups['master'] | length > 1 %}
|
||||
{% if ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] %}
|
||||
--cluster-init
|
||||
{% else %}
|
||||
--server https://{{ hostvars[groups['master'][0]].k3s_node_ip }}:6443
|
||||
--server https://{{ hostvars[groups['master'][0]].k3s_node_ip | split(",") | first | ansible.utils.ipwrap }}:6443
|
||||
{% endif %}
|
||||
--token {{ k3s_token }}
|
||||
{% endif %}
|
||||
|
||||
@@ -8,25 +8,16 @@
|
||||
mode: 0644
|
||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
||||
|
||||
- name: Download metallb manifest to first master
|
||||
- name: "Download to first master: manifest for metallb-{{ metal_lb_type }}"
|
||||
ansible.builtin.get_url:
|
||||
url: "https://raw.githubusercontent.com/metallb/metallb/{{ metal_lb_controller_tag_version }}/config/manifests/metallb-native.yaml" # noqa yaml[line-length]
|
||||
url: "https://raw.githubusercontent.com/metallb/metallb/{{ metal_lb_controller_tag_version }}/config/manifests/metallb-{{metal_lb_type}}.yaml" # noqa yaml[line-length]
|
||||
dest: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] and metal_lb_type == "native"
|
||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
||||
|
||||
- name: Download metallb-frr manifest to first master
|
||||
ansible.builtin.get_url:
|
||||
url: "https://raw.githubusercontent.com/metallb/metallb/{{ metal_lb_controller_tag_version }}/config/manifests/metallb-frr.yaml" # noqa yaml[line-length]
|
||||
dest: "/var/lib/rancher/k3s/server/manifests/metallb-frr-crds.yaml"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] and metal_lb_type == "frr"
|
||||
|
||||
- name: Set image versions for metallb manifest
|
||||
- name: Set image versions in manifest for metallb-{{ metal_lb_type }}
|
||||
ansible.builtin.replace:
|
||||
path: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml"
|
||||
regexp: "{{ item.change | ansible.builtin.regex_escape }}"
|
||||
@@ -36,16 +27,4 @@
|
||||
to: "metallb/speaker:{{ metal_lb_speaker_tag_version }}"
|
||||
loop_control:
|
||||
label: "{{ item.change }} => {{ item.to }}"
|
||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] and metal_lb_type == "native"
|
||||
|
||||
- name: Set image versions for metallb-frr manifest
|
||||
ansible.builtin.replace:
|
||||
path: "/var/lib/rancher/k3s/server/manifests/metallb-frr-crds.yaml"
|
||||
regexp: "{{ item.change | ansible.builtin.regex_escape }}"
|
||||
replace: "{{ item.to }}"
|
||||
with_items:
|
||||
- change: "metallb/speaker:{{ metal_lb_controller_tag_version }}"
|
||||
to: "metallb/speaker:{{ metal_lb_speaker_tag_version }}"
|
||||
loop_control:
|
||||
label: "{{ item.change }} => {{ item.to }}"
|
||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] and metal_lb_type == "frr"
|
||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
||||
|
||||
@@ -30,8 +30,10 @@ spec:
|
||||
value: "true"
|
||||
- name: port
|
||||
value: "6443"
|
||||
{% if kube_vip_iface %}
|
||||
- name: vip_interface
|
||||
value: {{ flannel_iface }}
|
||||
value: {{ kube_vip_iface }}
|
||||
{% endif %}
|
||||
- name: vip_cidr
|
||||
value: "{{ apiserver_endpoint | ansible.utils.ipsubnet | ansible.utils.ipaddr('prefix') }}"
|
||||
- name: cp_enable
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
- name: reboot server
|
||||
become: true
|
||||
reboot:
|
||||
|
||||
@@ -1,7 +1,21 @@
|
||||
---
|
||||
- name: configure rc.local for proxmox lxc containers
|
||||
copy:
|
||||
src: "{{ playbook_dir }}/scripts/rc.local"
|
||||
dest: "/etc/rc.local"
|
||||
- name: Check for rc.local file
|
||||
stat:
|
||||
path: /etc/rc.local
|
||||
register: rcfile
|
||||
|
||||
- name: Create rc.local if needed
|
||||
lineinfile:
|
||||
path: /etc/rc.local
|
||||
line: "#!/bin/sh -e"
|
||||
create: true
|
||||
insertbefore: BOF
|
||||
mode: "u=rwx,g=rx,o=rx"
|
||||
when: not rcfile.stat.exists
|
||||
|
||||
- name: Write rc.local file
|
||||
blockinfile:
|
||||
path: /etc/rc.local
|
||||
content: "{{ lookup('template', 'templates/rc.local.j2') }}"
|
||||
state: present
|
||||
notify: reboot server
|
||||
|
||||
@@ -54,3 +54,31 @@
|
||||
file:
|
||||
path: /tmp/k3s
|
||||
state: absent
|
||||
|
||||
- name: Check if rc.local exists
|
||||
stat:
|
||||
path: /etc/rc.local
|
||||
register: rcfile
|
||||
|
||||
- name: Remove rc.local modifications for proxmox lxc containers
|
||||
become: true
|
||||
blockinfile:
|
||||
path: /etc/rc.local
|
||||
content: "{{ lookup('template', 'templates/rc.local.j2') }}"
|
||||
create: false
|
||||
state: absent
|
||||
when: proxmox_lxc_configure and rclocal.stat.exists
|
||||
|
||||
- name: Check rc.local for cleanup
|
||||
become: true
|
||||
slurp:
|
||||
src: /etc/rc.local
|
||||
register: rcslurp
|
||||
when: proxmox_lxc_configure and rclocal.stat.exists
|
||||
|
||||
- name: Cleanup rc.local if we only have a Shebang line
|
||||
become: true
|
||||
file:
|
||||
path: /etc/rc.local
|
||||
state: absent
|
||||
when: proxmox_lxc_configure and rclocal.stat.exists and ((rcslurp.content | b64decode).splitlines() | length) <= 1
|
||||
|
||||
5
roles/reset_proxmox_lxc/handlers/main.yml
Normal file
5
roles/reset_proxmox_lxc/handlers/main.yml
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
- name: reboot containers
|
||||
command:
|
||||
"pct reboot {{ item }}"
|
||||
loop: "{{ proxmox_lxc_filtered_ids }}"
|
||||
53
roles/reset_proxmox_lxc/tasks/main.yml
Normal file
53
roles/reset_proxmox_lxc/tasks/main.yml
Normal file
@@ -0,0 +1,53 @@
|
||||
---
|
||||
- name: check for container files that exist on this host
|
||||
stat:
|
||||
path: "/etc/pve/lxc/{{ item }}.conf"
|
||||
loop: "{{ proxmox_lxc_ct_ids }}"
|
||||
register: stat_results
|
||||
|
||||
- name: filter out files that do not exist
|
||||
set_fact:
|
||||
proxmox_lxc_filtered_files:
|
||||
'{{ stat_results.results | rejectattr("stat.exists", "false") | map(attribute="stat.path") }}'
|
||||
|
||||
# used for the reboot handler
|
||||
- name: get container ids from filtered files
|
||||
set_fact:
|
||||
proxmox_lxc_filtered_ids:
|
||||
'{{ proxmox_lxc_filtered_files | map("split", "/") | map("last") | map("split", ".") | map("first") }}'
|
||||
|
||||
- name: Remove LXC apparmor profile
|
||||
lineinfile:
|
||||
dest: "{{ item }}"
|
||||
regexp: "^lxc.apparmor.profile"
|
||||
line: "lxc.apparmor.profile: unconfined"
|
||||
state: absent
|
||||
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||
notify: reboot containers
|
||||
|
||||
- name: Remove lxc cgroups
|
||||
lineinfile:
|
||||
dest: "{{ item }}"
|
||||
regexp: "^lxc.cgroup.devices.allow"
|
||||
line: "lxc.cgroup.devices.allow: a"
|
||||
state: absent
|
||||
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||
notify: reboot containers
|
||||
|
||||
- name: Remove lxc cap drop
|
||||
lineinfile:
|
||||
dest: "{{ item }}"
|
||||
regexp: "^lxc.cap.drop"
|
||||
line: "lxc.cap.drop: "
|
||||
state: absent
|
||||
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||
notify: reboot containers
|
||||
|
||||
- name: Remove lxc mounts
|
||||
lineinfile:
|
||||
dest: "{{ item }}"
|
||||
regexp: "^lxc.mount.auto"
|
||||
line: 'lxc.mount.auto: "proc:rw sys:rw"'
|
||||
state: absent
|
||||
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||
notify: reboot containers
|
||||
1
site.yml
1
site.yml
@@ -12,6 +12,7 @@
|
||||
gather_facts: yes
|
||||
roles:
|
||||
- role: lxc
|
||||
become: true
|
||||
when: proxmox_lxc_configure
|
||||
- role: prereq
|
||||
become: true
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
#!/bin/sh -e
|
||||
|
||||
# Kubeadm 1.15 needs /dev/kmsg to be there, but it's not in lxc, but we can just use /dev/console instead
|
||||
# see: https://github.com/kubernetes-sigs/kind/issues/662
|
||||
if [ ! -e /dev/kmsg ]; then
|
||||
Reference in New Issue
Block a user