mirror of
https://github.com/techno-tim/k3s-ansible.git
synced 2025-12-25 18:23:05 +01:00
Compare commits
24 Commits
v1.24.9+k3
...
v1.24.11+k
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3e16ab6809 | ||
|
|
83fe50797c | ||
|
|
2db0b3024c | ||
|
|
6b2af77e74 | ||
|
|
d1d1bc3d91 | ||
|
|
3a1a7a19aa | ||
|
|
030eeb4b75 | ||
|
|
4aeeb124ef | ||
|
|
511c020bec | ||
|
|
c47da38b53 | ||
|
|
6448948e9f | ||
|
|
7bc198ab26 | ||
|
|
65bbc8e2ac | ||
|
|
dc2976e7f6 | ||
|
|
5a7ba98968 | ||
|
|
10c6ef1d57 | ||
|
|
ed4d888e3d | ||
|
|
49d6d484ae | ||
|
|
96c49c864e | ||
|
|
60adb1de42 | ||
|
|
e023808f2f | ||
|
|
511ec493d6 | ||
|
|
be3e72e173 | ||
|
|
e33cbe52c1 |
1
.github/workflows/test.yml
vendored
1
.github/workflows/test.yml
vendored
@@ -71,6 +71,7 @@ jobs:
|
||||
|
||||
- name: Test with molecule
|
||||
run: molecule test --scenario-name ${{ matrix.scenario }}
|
||||
timeout-minutes: 90
|
||||
env:
|
||||
ANSIBLE_K3S_LOG_DIR: ${{ runner.temp }}/logs/k3s-ansible/${{ matrix.scenario }}
|
||||
ANSIBLE_SSH_RETRIES: 4
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1,2 +1,3 @@
|
||||
.env/
|
||||
*.log
|
||||
ansible.cfg
|
||||
|
||||
@@ -28,7 +28,7 @@ on processor architecture:
|
||||
|
||||
## ✅ System requirements
|
||||
|
||||
- Deployment environment must have Ansible 2.4.0+. If you need a quick primer on Ansible [you can check out my docs and setting up Ansible](https://docs.technotim.live/posts/ansible-automation/).
|
||||
- Control Node (the machine you are running `ansible` commands) must have Ansible 2.11+ If you need a quick primer on Ansible [you can check out my docs and setting up Ansible](https://docs.technotim.live/posts/ansible-automation/).
|
||||
|
||||
- You will also need to install collections that this playbook uses by running `ansible-galaxy collection install -r ./collections/requirements.yml` (important❗)
|
||||
|
||||
@@ -67,6 +67,8 @@ node
|
||||
|
||||
If multiple hosts are in the master group, the playbook will automatically set up k3s in [HA mode with etcd](https://rancher.com/docs/k3s/latest/en/installation/ha-embedded/).
|
||||
|
||||
Finally, copy `ansible.example.cfg` to `ansible.cfg` and adapt the inventory path to match the files that you just created.
|
||||
|
||||
This requires at least k3s version `1.19.1` however the version is configurable by using the `k3s_version` variable.
|
||||
|
||||
If needed, you can also edit `inventory/my-cluster/group_vars/all.yml` to match your environment.
|
||||
|
||||
23
ansible.cfg
23
ansible.cfg
@@ -1,23 +0,0 @@
|
||||
[defaults]
|
||||
nocows = True
|
||||
roles_path = ./roles
|
||||
inventory = ./hosts.ini
|
||||
stdout_callback = yaml
|
||||
|
||||
remote_tmp = $HOME/.ansible/tmp
|
||||
local_tmp = $HOME/.ansible/tmp
|
||||
timeout = 60
|
||||
host_key_checking = False
|
||||
deprecation_warnings = False
|
||||
callbacks_enabled = profile_tasks
|
||||
log_path = ./ansible.log
|
||||
|
||||
[privilege_escalation]
|
||||
become = True
|
||||
|
||||
[ssh_connection]
|
||||
scp_if_ssh = smart
|
||||
retries = 3
|
||||
ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o Compression=yes -o ServerAliveInterval=15s
|
||||
pipelining = True
|
||||
control_path = %(directory)s/%%h-%%r
|
||||
2
ansible.example.cfg
Normal file
2
ansible.example.cfg
Normal file
@@ -0,0 +1,2 @@
|
||||
[defaults]
|
||||
inventory = inventory/my-cluster/hosts.ini ; Adapt this to the path to your inventory file
|
||||
@@ -1,3 +1,3 @@
|
||||
#!/bin/bash
|
||||
|
||||
ansible-playbook site.yml -i inventory/my-cluster/hosts.ini
|
||||
ansible-playbook site.yml
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
k3s_version: v1.24.9+k3s1
|
||||
k3s_version: v1.24.11+k3s1
|
||||
# this is the user that has ssh access to these machines
|
||||
ansible_user: ansibleuser
|
||||
systemd_dir: /etc/systemd/system
|
||||
@@ -41,11 +41,44 @@ extra_agent_args: >-
|
||||
{{ extra_args }}
|
||||
|
||||
# image tag for kube-vip
|
||||
kube_vip_tag_version: "v0.5.7"
|
||||
kube_vip_tag_version: "v0.5.11"
|
||||
|
||||
# metallb type frr or native
|
||||
metal_lb_type: "native"
|
||||
|
||||
# metallb mode layer2 or bgp
|
||||
metal_lb_mode: "layer2"
|
||||
|
||||
# bgp options
|
||||
# metal_lb_bgp_my_asn: "64513"
|
||||
# metal_lb_bgp_peer_asn: "64512"
|
||||
# metal_lb_bgp_peer_address: "192.168.30.1"
|
||||
|
||||
# image tag for metal lb
|
||||
metal_lb_speaker_tag_version: "v0.13.7"
|
||||
metal_lb_controller_tag_version: "v0.13.7"
|
||||
metal_lb_frr_tag_version: "v7.5.1"
|
||||
metal_lb_speaker_tag_version: "v0.13.9"
|
||||
metal_lb_controller_tag_version: "v0.13.9"
|
||||
|
||||
# metallb ip range for load balancer
|
||||
metal_lb_ip_range: "192.168.30.80-192.168.30.90"
|
||||
|
||||
# Only enable if your nodes are proxmox LXC nodes, make sure to configure your proxmox nodes
|
||||
# in your hosts.ini file.
|
||||
# Please read https://gist.github.com/triangletodd/02f595cd4c0dc9aac5f7763ca2264185 before using this.
|
||||
# Most notably, your containers must be privileged, and must not have nesting set to true.
|
||||
# Please note this script disables most of the security of lxc containers, with the trade off being that lxc
|
||||
# containers are significantly more resource efficent compared to full VMs.
|
||||
# Mixing and matching VMs and lxc containers is not supported, ymmv if you want to do this.
|
||||
# I would only really recommend using this if you have partiularly low powered proxmox nodes where the overhead of
|
||||
# VMs would use a significant portion of your available resources.
|
||||
proxmox_lxc_configure: false
|
||||
# the user that you would use to ssh into the host, for example if you run ssh some-user@my-proxmox-host,
|
||||
# set this value to some-user
|
||||
proxmox_lxc_ssh_user: root
|
||||
# the unique proxmox ids for all of the containers in the cluster, both worker and master nodes
|
||||
proxmox_lxc_ct_ids:
|
||||
- 200
|
||||
- 201
|
||||
- 202
|
||||
- 203
|
||||
- 204
|
||||
|
||||
@@ -7,6 +7,11 @@
|
||||
192.168.30.41
|
||||
192.168.30.42
|
||||
|
||||
# only required if proxmox_lxc_configure: true
|
||||
# must contain all proxmox instances that have a master or worker node
|
||||
# [proxmox]
|
||||
# 192.168.30.43
|
||||
|
||||
[k3s_cluster:children]
|
||||
master
|
||||
node
|
||||
|
||||
3
molecule/ipv6/host_vars/control2.yml
Normal file
3
molecule/ipv6/host_vars/control2.yml
Normal file
@@ -0,0 +1,3 @@
|
||||
---
|
||||
node_ipv4: 192.168.123.12
|
||||
node_ipv6: fdad:bad:ba55::de:12
|
||||
@@ -4,7 +4,6 @@ dependency:
|
||||
driver:
|
||||
name: vagrant
|
||||
platforms:
|
||||
|
||||
- name: control1
|
||||
box: generic/ubuntu2204
|
||||
memory: 2048
|
||||
@@ -21,6 +20,22 @@ platforms:
|
||||
ssh.username: "vagrant"
|
||||
ssh.password: "vagrant"
|
||||
|
||||
- name: control2
|
||||
box: generic/ubuntu2204
|
||||
memory: 2048
|
||||
cpus: 2
|
||||
groups:
|
||||
- k3s_cluster
|
||||
- master
|
||||
interfaces:
|
||||
- network_name: private_network
|
||||
ip: fdad:bad:ba55::de:12
|
||||
config_options:
|
||||
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||
# see: https://github.com/chef/bento/issues/1405
|
||||
ssh.username: "vagrant"
|
||||
ssh.password: "vagrant"
|
||||
|
||||
- name: node1
|
||||
box: generic/ubuntu2204
|
||||
memory: 2048
|
||||
|
||||
@@ -7,6 +7,11 @@
|
||||
# See: https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant # noqa yaml[line-length]
|
||||
flannel_iface: eth1
|
||||
|
||||
# In this scenario, we have multiple interfaces that the VIP could be
|
||||
# broadcasted on. Since we have assigned a dedicated private network
|
||||
# here, let's make sure that it is used.
|
||||
kube_vip_iface: eth1
|
||||
|
||||
# The test VMs might be a bit slow, so we give them more time to join the cluster:
|
||||
retry_count: 45
|
||||
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
#!/bin/bash
|
||||
|
||||
ansible-playbook reboot.yml -i inventory/my-cluster/hosts.ini
|
||||
ansible-playbook reboot.yml
|
||||
|
||||
@@ -2,8 +2,8 @@
|
||||
- name: Reboot k3s_cluster
|
||||
hosts: k3s_cluster
|
||||
gather_facts: yes
|
||||
become: yes
|
||||
tasks:
|
||||
- name: Reboot the nodes (and Wait upto 5 mins max)
|
||||
become: true
|
||||
reboot:
|
||||
reboot_timeout: 300
|
||||
|
||||
@@ -4,15 +4,14 @@
|
||||
#
|
||||
# pip-compile requirements.in
|
||||
#
|
||||
ansible-compat==2.2.4
|
||||
# via
|
||||
# ansible-lint
|
||||
# molecule
|
||||
ansible-core==2.14.1
|
||||
ansible-compat==3.0.1
|
||||
# via molecule
|
||||
ansible-core==2.14.3
|
||||
# via
|
||||
# -r requirements.in
|
||||
# ansible-compat
|
||||
# ansible-lint
|
||||
ansible-lint==6.8.6
|
||||
ansible-lint==6.14.2
|
||||
# via -r requirements.in
|
||||
arrow==1.2.3
|
||||
# via jinja2-time
|
||||
@@ -187,8 +186,10 @@ six==1.16.0
|
||||
# google-auth
|
||||
# kubernetes
|
||||
# python-dateutil
|
||||
subprocess-tee==0.3.5
|
||||
# via ansible-compat
|
||||
subprocess-tee==0.4.1
|
||||
# via
|
||||
# ansible-compat
|
||||
# ansible-lint
|
||||
text-unidecode==1.3
|
||||
# via python-slugify
|
||||
urllib3==1.26.12
|
||||
|
||||
2
reset.sh
2
reset.sh
@@ -1,3 +1,3 @@
|
||||
#!/bin/bash
|
||||
|
||||
ansible-playbook reset.yml -i inventory/my-cluster/hosts.ini
|
||||
ansible-playbook reset.yml
|
||||
|
||||
12
reset.yml
12
reset.yml
@@ -2,12 +2,22 @@
|
||||
|
||||
- hosts: k3s_cluster
|
||||
gather_facts: yes
|
||||
become: yes
|
||||
roles:
|
||||
- role: reset
|
||||
become: true
|
||||
- role: raspberrypi
|
||||
become: true
|
||||
vars: {state: absent}
|
||||
post_tasks:
|
||||
- name: Reboot and wait for node to come back up
|
||||
become: true
|
||||
reboot:
|
||||
reboot_timeout: 3600
|
||||
|
||||
- hosts: proxmox
|
||||
gather_facts: true
|
||||
become: yes
|
||||
remote_user: "{{ proxmox_lxc_ssh_user }}"
|
||||
roles:
|
||||
- role: reset_proxmox_lxc
|
||||
when: proxmox_lxc_configure
|
||||
|
||||
@@ -1,11 +1,15 @@
|
||||
---
|
||||
ansible_user: root
|
||||
# If you want to explicitly define an interface that ALL control nodes
|
||||
# should use to propagate the VIP, define it here. Otherwise, kube-vip
|
||||
# will determine the right interface automatically at runtime.
|
||||
kube_vip_iface: null
|
||||
|
||||
server_init_args: >-
|
||||
{% if groups['master'] | length > 1 %}
|
||||
{% if ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] %}
|
||||
--cluster-init
|
||||
{% else %}
|
||||
--server https://{{ hostvars[groups['master'][0]].k3s_node_ip }}:6443
|
||||
--server https://{{ hostvars[groups['master'][0]].k3s_node_ip | split(",") | first | ansible.utils.ipwrap }}:6443
|
||||
{% endif %}
|
||||
--token {{ k3s_token }}
|
||||
{% endif %}
|
||||
|
||||
@@ -97,24 +97,24 @@
|
||||
|
||||
- name: Create directory .kube
|
||||
file:
|
||||
path: ~{{ ansible_user }}/.kube
|
||||
path: "{{ ansible_user_dir }}/.kube"
|
||||
state: directory
|
||||
owner: "{{ ansible_user }}"
|
||||
owner: "{{ ansible_user_id }}"
|
||||
mode: "u=rwx,g=rx,o="
|
||||
|
||||
- name: Copy config file to user home directory
|
||||
copy:
|
||||
src: /etc/rancher/k3s/k3s.yaml
|
||||
dest: ~{{ ansible_user }}/.kube/config
|
||||
dest: "{{ ansible_user_dir }}/.kube/config"
|
||||
remote_src: yes
|
||||
owner: "{{ ansible_user }}"
|
||||
owner: "{{ ansible_user_id }}"
|
||||
mode: "u=rw,g=,o="
|
||||
|
||||
- name: Configure kubectl cluster to {{ endpoint_url }}
|
||||
command: >-
|
||||
k3s kubectl config set-cluster default
|
||||
--server={{ endpoint_url }}
|
||||
--kubeconfig ~{{ ansible_user }}/.kube/config
|
||||
--kubeconfig {{ ansible_user_dir }}/.kube/config
|
||||
changed_when: true
|
||||
vars:
|
||||
endpoint_url: >-
|
||||
|
||||
@@ -8,20 +8,23 @@
|
||||
mode: 0644
|
||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
||||
|
||||
- name: Copy metallb namespace to first master
|
||||
template:
|
||||
src: "metallb.namespace.j2"
|
||||
dest: "/var/lib/rancher/k3s/server/manifests/metallb-namespace.yaml"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
||||
|
||||
- name: Copy metallb manifest to first master
|
||||
template:
|
||||
src: "metallb.crds.j2"
|
||||
- name: "Download to first master: manifest for metallb-{{ metal_lb_type }}"
|
||||
ansible.builtin.get_url:
|
||||
url: "https://raw.githubusercontent.com/metallb/metallb/{{ metal_lb_controller_tag_version }}/config/manifests/metallb-{{metal_lb_type}}.yaml" # noqa yaml[line-length]
|
||||
dest: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
||||
|
||||
- name: Set image versions in manifest for metallb-{{ metal_lb_type }}
|
||||
ansible.builtin.replace:
|
||||
path: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml"
|
||||
regexp: "{{ item.change | ansible.builtin.regex_escape }}"
|
||||
replace: "{{ item.to }}"
|
||||
with_items:
|
||||
- change: "metallb/speaker:{{ metal_lb_controller_tag_version }}"
|
||||
to: "metallb/speaker:{{ metal_lb_speaker_tag_version }}"
|
||||
loop_control:
|
||||
label: "{{ item.change }} => {{ item.to }}"
|
||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
||||
|
||||
@@ -8,9 +8,9 @@
|
||||
mode: 0644
|
||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
||||
|
||||
- name: Copy vip rbac manifest to first master
|
||||
template:
|
||||
src: "vip.rbac.yaml.j2"
|
||||
- name: Download vip rbac manifest to first master
|
||||
ansible.builtin.get_url:
|
||||
url: "https://raw.githubusercontent.com/kube-vip/kube-vip/{{ kube_vip_tag_version }}/docs/manifests/rbac.yaml"
|
||||
dest: "/var/lib/rancher/k3s/server/manifests/vip-rbac.yaml"
|
||||
owner: root
|
||||
group: root
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,6 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: metallb-system
|
||||
labels:
|
||||
app: metallb
|
||||
@@ -1,32 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: kube-vip
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations:
|
||||
rbac.authorization.kubernetes.io/autoupdate: "true"
|
||||
name: system:kube-vip-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services", "services/status", "nodes", "endpoints"]
|
||||
verbs: ["list","get","watch", "update"]
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
verbs: ["list", "get", "watch", "update", "create"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: system:kube-vip-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:kube-vip-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kube-vip
|
||||
namespace: kube-system
|
||||
@@ -30,8 +30,10 @@ spec:
|
||||
value: "true"
|
||||
- name: port
|
||||
value: "6443"
|
||||
{% if kube_vip_iface %}
|
||||
- name: vip_interface
|
||||
value: {{ flannel_iface }}
|
||||
value: {{ kube_vip_iface }}
|
||||
{% endif %}
|
||||
- name: vip_cidr
|
||||
value: "{{ apiserver_endpoint | ansible.utils.ipsubnet | ansible.utils.ipaddr('prefix') }}"
|
||||
- name: cp_enable
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
file:
|
||||
path: /tmp/k3s
|
||||
state: directory
|
||||
owner: "{{ ansible_user }}"
|
||||
owner: "{{ ansible_user_id }}"
|
||||
mode: 0755
|
||||
with_items: "{{ groups['master'] }}"
|
||||
run_once: true
|
||||
@@ -12,7 +12,7 @@
|
||||
template:
|
||||
src: "metallb.crs.j2"
|
||||
dest: "/tmp/k3s/metallb-crs.yaml"
|
||||
owner: "{{ ansible_user }}"
|
||||
owner: "{{ ansible_user_id }}"
|
||||
mode: 0755
|
||||
with_items: "{{ groups['master'] }}"
|
||||
run_once: true
|
||||
@@ -79,11 +79,23 @@
|
||||
until: this.rc == 0
|
||||
retries: 5
|
||||
|
||||
- name: Test metallb-system resources
|
||||
- name: Test metallb-system resources for Layer 2 configuration
|
||||
command: >-
|
||||
k3s kubectl -n metallb-system get {{ item }}
|
||||
changed_when: false
|
||||
run_once: true
|
||||
when: metal_lb_mode == "layer2"
|
||||
with_items:
|
||||
- IPAddressPool
|
||||
- L2Advertisement
|
||||
|
||||
- name: Test metallb-system resources for BGP configuration
|
||||
command: >-
|
||||
k3s kubectl -n metallb-system get {{ item }}
|
||||
changed_when: false
|
||||
run_once: true
|
||||
when: metal_lb_mode == "bgp"
|
||||
with_items:
|
||||
- IPAddressPool
|
||||
- BGPPeer
|
||||
- BGPAdvertisement
|
||||
|
||||
@@ -13,9 +13,31 @@ spec:
|
||||
{% for range in metal_lb_ip_range %}
|
||||
- {{ range }}
|
||||
{% endfor %}
|
||||
|
||||
{% if metal_lb_mode == "layer2" %}
|
||||
---
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: L2Advertisement
|
||||
metadata:
|
||||
name: default
|
||||
namespace: metallb-system
|
||||
{% endif %}
|
||||
{% if metal_lb_mode == "bgp" %}
|
||||
---
|
||||
apiVersion: metallb.io/v1beta2
|
||||
kind: BGPPeer
|
||||
metadata:
|
||||
name: default
|
||||
namespace: metallb-system
|
||||
spec:
|
||||
myASN: {{ metal_lb_bgp_my_asn }}
|
||||
peerASN: {{ metal_lb_bgp_peer_asn }}
|
||||
peerAddress: {{ metal_lb_bgp_peer_address }}
|
||||
|
||||
---
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: BGPAdvertisement
|
||||
metadata:
|
||||
name: default
|
||||
namespace: metallb-system
|
||||
{% endif %}
|
||||
|
||||
4
roles/lxc/handlers/main.yml
Normal file
4
roles/lxc/handlers/main.yml
Normal file
@@ -0,0 +1,4 @@
|
||||
---
|
||||
- name: reboot server
|
||||
become: true
|
||||
reboot:
|
||||
21
roles/lxc/tasks/main.yml
Normal file
21
roles/lxc/tasks/main.yml
Normal file
@@ -0,0 +1,21 @@
|
||||
---
|
||||
- name: Check for rc.local file
|
||||
stat:
|
||||
path: /etc/rc.local
|
||||
register: rcfile
|
||||
|
||||
- name: Create rc.local if needed
|
||||
lineinfile:
|
||||
path: /etc/rc.local
|
||||
line: "#!/bin/sh -e"
|
||||
create: true
|
||||
insertbefore: BOF
|
||||
mode: "u=rwx,g=rx,o=rx"
|
||||
when: not rcfile.stat.exists
|
||||
|
||||
- name: Write rc.local file
|
||||
blockinfile:
|
||||
path: /etc/rc.local
|
||||
content: "{{ lookup('template', 'templates/rc.local.j2') }}"
|
||||
state: present
|
||||
notify: reboot server
|
||||
5
roles/proxmox_lxc/handlers/main.yml
Normal file
5
roles/proxmox_lxc/handlers/main.yml
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
- name: reboot containers
|
||||
command:
|
||||
"pct reboot {{ item }}"
|
||||
loop: "{{ proxmox_lxc_filtered_ids }}"
|
||||
50
roles/proxmox_lxc/tasks/main.yml
Normal file
50
roles/proxmox_lxc/tasks/main.yml
Normal file
@@ -0,0 +1,50 @@
|
||||
---
|
||||
- name: check for container files that exist on this host
|
||||
stat:
|
||||
path: "/etc/pve/lxc/{{ item }}.conf"
|
||||
loop: "{{ proxmox_lxc_ct_ids }}"
|
||||
register: stat_results
|
||||
|
||||
- name: filter out files that do not exist
|
||||
set_fact:
|
||||
proxmox_lxc_filtered_files:
|
||||
'{{ stat_results.results | rejectattr("stat.exists", "false") | map(attribute="stat.path") }}'
|
||||
|
||||
# used for the reboot handler
|
||||
- name: get container ids from filtered files
|
||||
set_fact:
|
||||
proxmox_lxc_filtered_ids:
|
||||
'{{ proxmox_lxc_filtered_files | map("split", "/") | map("last") | map("split", ".") | map("first") }}'
|
||||
|
||||
# https://gist.github.com/triangletodd/02f595cd4c0dc9aac5f7763ca2264185
|
||||
- name: Ensure lxc config has the right apparmor profile
|
||||
lineinfile:
|
||||
dest: "{{ item }}"
|
||||
regexp: "^lxc.apparmor.profile"
|
||||
line: "lxc.apparmor.profile: unconfined"
|
||||
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||
notify: reboot containers
|
||||
|
||||
- name: Ensure lxc config has the right cgroup
|
||||
lineinfile:
|
||||
dest: "{{ item }}"
|
||||
regexp: "^lxc.cgroup.devices.allow"
|
||||
line: "lxc.cgroup.devices.allow: a"
|
||||
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||
notify: reboot containers
|
||||
|
||||
- name: Ensure lxc config has the right cap drop
|
||||
lineinfile:
|
||||
dest: "{{ item }}"
|
||||
regexp: "^lxc.cap.drop"
|
||||
line: "lxc.cap.drop: "
|
||||
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||
notify: reboot containers
|
||||
|
||||
- name: Ensure lxc config has the right mounts
|
||||
lineinfile:
|
||||
dest: "{{ item }}"
|
||||
regexp: "^lxc.mount.auto"
|
||||
line: 'lxc.mount.auto: "proc:rw sys:rw"'
|
||||
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||
notify: reboot containers
|
||||
@@ -54,3 +54,31 @@
|
||||
file:
|
||||
path: /tmp/k3s
|
||||
state: absent
|
||||
|
||||
- name: Check if rc.local exists
|
||||
stat:
|
||||
path: /etc/rc.local
|
||||
register: rcfile
|
||||
|
||||
- name: Remove rc.local modifications for proxmox lxc containers
|
||||
become: true
|
||||
blockinfile:
|
||||
path: /etc/rc.local
|
||||
content: "{{ lookup('template', 'templates/rc.local.j2') }}"
|
||||
create: false
|
||||
state: absent
|
||||
when: proxmox_lxc_configure and rclocal.stat.exists
|
||||
|
||||
- name: Check rc.local for cleanup
|
||||
become: true
|
||||
slurp:
|
||||
src: /etc/rc.local
|
||||
register: rcslurp
|
||||
when: proxmox_lxc_configure and rclocal.stat.exists
|
||||
|
||||
- name: Cleanup rc.local if we only have a Shebang line
|
||||
become: true
|
||||
file:
|
||||
path: /etc/rc.local
|
||||
state: absent
|
||||
when: proxmox_lxc_configure and rclocal.stat.exists and ((rcslurp.content | b64decode).splitlines() | length) <= 1
|
||||
|
||||
5
roles/reset_proxmox_lxc/handlers/main.yml
Normal file
5
roles/reset_proxmox_lxc/handlers/main.yml
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
- name: reboot containers
|
||||
command:
|
||||
"pct reboot {{ item }}"
|
||||
loop: "{{ proxmox_lxc_filtered_ids }}"
|
||||
53
roles/reset_proxmox_lxc/tasks/main.yml
Normal file
53
roles/reset_proxmox_lxc/tasks/main.yml
Normal file
@@ -0,0 +1,53 @@
|
||||
---
|
||||
- name: check for container files that exist on this host
|
||||
stat:
|
||||
path: "/etc/pve/lxc/{{ item }}.conf"
|
||||
loop: "{{ proxmox_lxc_ct_ids }}"
|
||||
register: stat_results
|
||||
|
||||
- name: filter out files that do not exist
|
||||
set_fact:
|
||||
proxmox_lxc_filtered_files:
|
||||
'{{ stat_results.results | rejectattr("stat.exists", "false") | map(attribute="stat.path") }}'
|
||||
|
||||
# used for the reboot handler
|
||||
- name: get container ids from filtered files
|
||||
set_fact:
|
||||
proxmox_lxc_filtered_ids:
|
||||
'{{ proxmox_lxc_filtered_files | map("split", "/") | map("last") | map("split", ".") | map("first") }}'
|
||||
|
||||
- name: Remove LXC apparmor profile
|
||||
lineinfile:
|
||||
dest: "{{ item }}"
|
||||
regexp: "^lxc.apparmor.profile"
|
||||
line: "lxc.apparmor.profile: unconfined"
|
||||
state: absent
|
||||
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||
notify: reboot containers
|
||||
|
||||
- name: Remove lxc cgroups
|
||||
lineinfile:
|
||||
dest: "{{ item }}"
|
||||
regexp: "^lxc.cgroup.devices.allow"
|
||||
line: "lxc.cgroup.devices.allow: a"
|
||||
state: absent
|
||||
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||
notify: reboot containers
|
||||
|
||||
- name: Remove lxc cap drop
|
||||
lineinfile:
|
||||
dest: "{{ item }}"
|
||||
regexp: "^lxc.cap.drop"
|
||||
line: "lxc.cap.drop: "
|
||||
state: absent
|
||||
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||
notify: reboot containers
|
||||
|
||||
- name: Remove lxc mounts
|
||||
lineinfile:
|
||||
dest: "{{ item }}"
|
||||
regexp: "^lxc.mount.auto"
|
||||
line: 'lxc.mount.auto: "proc:rw sys:rw"'
|
||||
state: absent
|
||||
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||
notify: reboot containers
|
||||
21
site.yml
21
site.yml
@@ -1,24 +1,37 @@
|
||||
---
|
||||
|
||||
- hosts: proxmox
|
||||
gather_facts: true
|
||||
become: yes
|
||||
remote_user: "{{ proxmox_lxc_ssh_user }}"
|
||||
roles:
|
||||
- role: proxmox_lxc
|
||||
when: proxmox_lxc_configure
|
||||
|
||||
- hosts: k3s_cluster
|
||||
gather_facts: yes
|
||||
become: yes
|
||||
roles:
|
||||
- role: lxc
|
||||
become: true
|
||||
when: proxmox_lxc_configure
|
||||
- role: prereq
|
||||
become: true
|
||||
- role: download
|
||||
become: true
|
||||
- role: raspberrypi
|
||||
become: true
|
||||
|
||||
- hosts: master
|
||||
become: yes
|
||||
roles:
|
||||
- role: k3s/master
|
||||
become: true
|
||||
|
||||
- hosts: node
|
||||
become: yes
|
||||
roles:
|
||||
- role: k3s/node
|
||||
become: true
|
||||
|
||||
- hosts: master
|
||||
become: yes
|
||||
roles:
|
||||
- role: k3s/post
|
||||
become: true
|
||||
|
||||
8
templates/rc.local.j2
Normal file
8
templates/rc.local.j2
Normal file
@@ -0,0 +1,8 @@
|
||||
# Kubeadm 1.15 needs /dev/kmsg to be there, but it's not in lxc, but we can just use /dev/console instead
|
||||
# see: https://github.com/kubernetes-sigs/kind/issues/662
|
||||
if [ ! -e /dev/kmsg ]; then
|
||||
ln -s /dev/console /dev/kmsg
|
||||
fi
|
||||
|
||||
# https://medium.com/@kvaps/run-kubernetes-in-lxc-container-f04aa94b6c9c
|
||||
mount --make-rshared /
|
||||
Reference in New Issue
Block a user