Merge branch 'master' into k3s-1-26

This commit is contained in:
Techno Tim
2023-02-06 22:27:41 -06:00
committed by GitHub
26 changed files with 159 additions and 1894 deletions

1
.gitignore vendored
View File

@@ -1,2 +1,3 @@
.env/
*.log
ansible.cfg

View File

@@ -67,6 +67,8 @@ node
If multiple hosts are in the master group, the playbook will automatically set up k3s in [HA mode with etcd](https://rancher.com/docs/k3s/latest/en/installation/ha-embedded/).
Finally, copy `ansible.example.cfg` to `ansible.cfg` and adapt the inventory path to match the files that you just created.
This requires at least k3s version `1.19.1` however the version is configurable by using the `k3s_version` variable.
If needed, you can also edit `inventory/my-cluster/group_vars/all.yml` to match your environment.

View File

@@ -1,23 +0,0 @@
[defaults]
nocows = True
roles_path = ./roles
inventory = ./hosts.ini
stdout_callback = yaml
remote_tmp = $HOME/.ansible/tmp
local_tmp = $HOME/.ansible/tmp
timeout = 60
host_key_checking = False
deprecation_warnings = False
callbacks_enabled = profile_tasks
log_path = ./ansible.log
[privilege_escalation]
become = True
[ssh_connection]
scp_if_ssh = smart
retries = 3
ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o Compression=yes -o ServerAliveInterval=15s
pipelining = True
control_path = %(directory)s/%%h-%%r

2
ansible.example.cfg Normal file
View File

@@ -0,0 +1,2 @@
[defaults]
inventory = inventory/my-cluster/hosts.ini ; Adapt this to the path to your inventory file

View File

@@ -1,3 +1,3 @@
#!/bin/bash
ansible-playbook site.yml -i inventory/my-cluster/hosts.ini
ansible-playbook site.yml

View File

@@ -49,3 +49,24 @@ metal_lb_controller_tag_version: "v0.13.7"
# metallb ip range for load balancer
metal_lb_ip_range: "192.168.30.80-192.168.30.90"
# Only enable if your nodes are proxmox LXC nodes, make sure to configure your proxmox nodes
# in your hosts.ini file.
# Please read https://gist.github.com/triangletodd/02f595cd4c0dc9aac5f7763ca2264185 before using this.
# Most notably, your containers must be privileged, and must not have nesting set to true.
# Please note this script disables most of the security of lxc containers, with the trade off being that lxc
# containers are significantly more resource efficent compared to full VMs.
# Mixing and matching VMs and lxc containers is not supported, ymmv if you want to do this.
# I would only really recommend using this if you have partiularly low powered proxmox nodes where the overhead of
# VMs would use a significant portion of your available resources.
proxmox_lxc_configure: false
# the user that you would use to ssh into the host, for example if you run ssh some-user@my-proxmox-host,
# set this value to some-user
proxmox_lxc_ssh_user: root
# the unique proxmox ids for all of the containers in the cluster, both worker and master nodes
proxmox_lxc_ct_ids:
- 200
- 201
- 202
- 203
- 204

View File

@@ -7,6 +7,11 @@
192.168.30.41
192.168.30.42
# only required if proxmox_lxc_configure: true
# must contain all proxmox instances that have a master or worker node
# [proxmox]
# 192.168.30.43
[k3s_cluster:children]
master
node

View File

@@ -1,3 +1,3 @@
#!/bin/bash
ansible-playbook reboot.yml -i inventory/my-cluster/hosts.ini
ansible-playbook reboot.yml

View File

@@ -2,8 +2,8 @@
- name: Reboot k3s_cluster
hosts: k3s_cluster
gather_facts: yes
become: yes
tasks:
- name: Reboot the nodes (and Wait upto 5 mins max)
become: true
reboot:
reboot_timeout: 300

View File

@@ -4,15 +4,16 @@
#
# pip-compile requirements.in
#
ansible-compat==2.2.4
ansible-compat==3.0.1
# via
# ansible-lint
# molecule
ansible-core==2.14.1
ansible-core==2.14.2
# via
# -r requirements.in
# ansible-compat
# ansible-lint
ansible-lint==6.8.6
ansible-lint==6.12.0
# via -r requirements.in
arrow==1.2.3
# via jinja2-time
@@ -187,7 +188,7 @@ six==1.16.0
# google-auth
# kubernetes
# python-dateutil
subprocess-tee==0.3.5
subprocess-tee==0.4.1
# via ansible-compat
text-unidecode==1.3
# via python-slugify

View File

@@ -1,3 +1,3 @@
#!/bin/bash
ansible-playbook reset.yml -i inventory/my-cluster/hosts.ini
ansible-playbook reset.yml

View File

@@ -2,12 +2,14 @@
- hosts: k3s_cluster
gather_facts: yes
become: yes
roles:
- role: reset
become: true
- role: raspberrypi
become: true
vars: {state: absent}
post_tasks:
- name: Reboot and wait for node to come back up
become: true
reboot:
reboot_timeout: 3600

View File

@@ -1,5 +1,4 @@
---
ansible_user: root
server_init_args: >-
{% if groups['master'] | length > 1 %}
{% if ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] %}

View File

@@ -97,24 +97,24 @@
- name: Create directory .kube
file:
path: ~{{ ansible_user }}/.kube
path: "{{ ansible_user_dir }}/.kube"
state: directory
owner: "{{ ansible_user }}"
owner: "{{ ansible_user_id }}"
mode: "u=rwx,g=rx,o="
- name: Copy config file to user home directory
copy:
src: /etc/rancher/k3s/k3s.yaml
dest: ~{{ ansible_user }}/.kube/config
dest: "{{ ansible_user_dir }}/.kube/config"
remote_src: yes
owner: "{{ ansible_user }}"
owner: "{{ ansible_user_id }}"
mode: "u=rw,g=,o="
- name: Configure kubectl cluster to {{ endpoint_url }}
command: >-
k3s kubectl config set-cluster default
--server={{ endpoint_url }}
--kubeconfig ~{{ ansible_user }}/.kube/config
--kubeconfig {{ ansible_user_dir }}/.kube/config
changed_when: true
vars:
endpoint_url: >-

View File

@@ -8,20 +8,23 @@
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
- name: Copy metallb namespace to first master
template:
src: "metallb.namespace.j2"
dest: "/var/lib/rancher/k3s/server/manifests/metallb-namespace.yaml"
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
- name: Copy metallb manifest to first master
template:
src: "metallb.crds.j2"
- name: Download metallb manifest to first master
ansible.builtin.get_url:
url: "https://raw.githubusercontent.com/metallb/metallb/{{ metal_lb_controller_tag_version }}/config/manifests/metallb-native.yaml" # noqa yaml[line-length]
dest: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml"
owner: root
group: root
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
- name: Set image versions for metallb manifest
ansible.builtin.replace:
path: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml"
regexp: "{{ item.change | ansible.builtin.regex_escape }}"
replace: "{{ item.to }}"
with_items:
- change: "metallb/speaker:{{ metal_lb_controller_tag_version }}"
to: "metallb/speaker:{{ metal_lb_speaker_tag_version }}"
loop_control:
label: "{{ item.change }} => {{ item.to }}"
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']

View File

@@ -8,9 +8,9 @@
mode: 0644
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
- name: Copy vip rbac manifest to first master
template:
src: "vip.rbac.yaml.j2"
- name: Download vip rbac manifest to first master
ansible.builtin.get_url:
url: "https://raw.githubusercontent.com/kube-vip/kube-vip/{{ kube_vip_tag_version }}/docs/manifests/rbac.yaml"
dest: "/var/lib/rancher/k3s/server/manifests/vip-rbac.yaml"
owner: root
group: root

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: metallb-system
labels:
app: metallb

View File

@@ -1,32 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-vip
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
name: system:kube-vip-role
rules:
- apiGroups: [""]
resources: ["services", "services/status", "nodes", "endpoints"]
verbs: ["list","get","watch", "update"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["list", "get", "watch", "update", "create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:kube-vip-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kube-vip-role
subjects:
- kind: ServiceAccount
name: kube-vip
namespace: kube-system

View File

@@ -3,7 +3,7 @@
file:
path: /tmp/k3s
state: directory
owner: "{{ ansible_user }}"
owner: "{{ ansible_user_id }}"
mode: 0755
with_items: "{{ groups['master'] }}"
run_once: true
@@ -12,7 +12,7 @@
template:
src: "metallb.crs.j2"
dest: "/tmp/k3s/metallb-crs.yaml"
owner: "{{ ansible_user }}"
owner: "{{ ansible_user_id }}"
mode: 0755
with_items: "{{ groups['master'] }}"
run_once: true

View File

@@ -0,0 +1,3 @@
---
- name: reboot server
reboot:

7
roles/lxc/tasks/main.yml Normal file
View File

@@ -0,0 +1,7 @@
---
- name: configure rc.local for proxmox lxc containers
copy:
src: "{{ playbook_dir }}/scripts/rc.local"
dest: "/etc/rc.local"
mode: "u=rwx,g=rx,o=rx"
notify: reboot server

View File

@@ -0,0 +1,5 @@
---
- name: reboot containers
command:
"pct reboot {{ item }}"
loop: "{{ proxmox_lxc_filtered_ids }}"

View File

@@ -0,0 +1,50 @@
---
- name: check for container files that exist on this host
stat:
path: "/etc/pve/lxc/{{ item }}.conf"
loop: "{{ proxmox_lxc_ct_ids }}"
register: stat_results
- name: filter out files that do not exist
set_fact:
proxmox_lxc_filtered_files:
'{{ stat_results.results | rejectattr("stat.exists", "false") | map(attribute="stat.path") }}'
# used for the reboot handler
- name: get container ids from filtered files
set_fact:
proxmox_lxc_filtered_ids:
'{{ proxmox_lxc_filtered_files | map("split", "/") | map("last") | map("split", ".") | map("first") }}'
# https://gist.github.com/triangletodd/02f595cd4c0dc9aac5f7763ca2264185
- name: Ensure lxc config has the right apparmor profile
lineinfile:
dest: "{{ item }}"
regexp: "^lxc.apparmor.profile"
line: "lxc.apparmor.profile: unconfined"
loop: "{{ proxmox_lxc_filtered_files }}"
notify: reboot containers
- name: Ensure lxc config has the right cgroup
lineinfile:
dest: "{{ item }}"
regexp: "^lxc.cgroup.devices.allow"
line: "lxc.cgroup.devices.allow: a"
loop: "{{ proxmox_lxc_filtered_files }}"
notify: reboot containers
- name: Ensure lxc config has the right cap drop
lineinfile:
dest: "{{ item }}"
regexp: "^lxc.cap.drop"
line: "lxc.cap.drop: "
loop: "{{ proxmox_lxc_filtered_files }}"
notify: reboot containers
- name: Ensure lxc config has the right mounts
lineinfile:
dest: "{{ item }}"
regexp: "^lxc.mount.auto"
line: 'lxc.mount.auto: "proc:rw sys:rw"'
loop: "{{ proxmox_lxc_filtered_files }}"
notify: reboot containers

10
scripts/rc.local Normal file
View File

@@ -0,0 +1,10 @@
#!/bin/sh -e
# Kubeadm 1.15 needs /dev/kmsg to be there, but it's not in lxc, but we can just use /dev/console instead
# see: https://github.com/kubernetes-sigs/kind/issues/662
if [ ! -e /dev/kmsg ]; then
ln -s /dev/console /dev/kmsg
fi
# https://medium.com/@kvaps/run-kubernetes-in-lxc-container-f04aa94b6c9c
mount --make-rshared /

View File

@@ -1,24 +1,36 @@
---
- hosts: proxmox
gather_facts: true
become: yes
remote_user: "{{ proxmox_lxc_ssh_user }}"
roles:
- role: proxmox_lxc
when: proxmox_lxc_configure
- hosts: k3s_cluster
gather_facts: yes
become: yes
roles:
- role: lxc
when: proxmox_lxc_configure
- role: prereq
become: true
- role: download
become: true
- role: raspberrypi
become: true
- hosts: master
become: yes
roles:
- role: k3s/master
become: true
- hosts: node
become: yes
roles:
- role: k3s/node
become: true
- hosts: master
become: yes
roles:
- role: k3s/post
become: true