forked from tim/k3s-ansible
Fix CI (#332)
* Update pre-commit actions This was done by running "pre-commit autoupdate --freeze". * Remove pre-commit only dependencies from requirements.in Including them in the file would create the illusion that those were the versions actually used in CI, but they are not. The exact versions are determined by the pre-commit hooks which are pinned in .pre-commit-config.yaml. * Ansible Lint: Fix role-name[path] * Ansible Lint: Fix name[play] * Ansible Lint: Fix key-order[task] * Ansible Lint: Fix jinja[spacing] * Ansible Lint: Fix no-free-form * Ansible Lint: Fix var-naming[no-reserved] * Ansible Lint: Fix yaml[comments] * Ansible Lint: Fix yaml[line-length] * Ansible Lint: Fix name[casing] * Ansible Lint: Fix no-changed-when * Ansible Lint: Fix fqcn[action] * Ansible Lint: Fix args[module] * Improve task naming
This commit is contained in:
@@ -1,16 +0,0 @@
|
||||
---
|
||||
# If you want to explicitly define an interface that ALL control nodes
|
||||
# should use to propagate the VIP, define it here. Otherwise, kube-vip
|
||||
# will determine the right interface automatically at runtime.
|
||||
kube_vip_iface: null
|
||||
|
||||
server_init_args: >-
|
||||
{% if groups['master'] | length > 1 %}
|
||||
{% if ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] %}
|
||||
--cluster-init
|
||||
{% else %}
|
||||
--server https://{{ hostvars[groups['master'][0]].k3s_node_ip | split(",") | first | ansible.utils.ipwrap }}:6443
|
||||
{% endif %}
|
||||
--token {{ k3s_token }}
|
||||
{% endif %}
|
||||
{{ extra_server_args | default('') }}
|
||||
@@ -1,28 +0,0 @@
|
||||
---
|
||||
# Download logs of k3s-init.service from the nodes to localhost.
|
||||
# Note that log_destination must be set.
|
||||
|
||||
- name: Fetch k3s-init.service logs
|
||||
ansible.builtin.command:
|
||||
cmd: journalctl --all --unit=k3s-init.service
|
||||
changed_when: false
|
||||
register: k3s_init_log
|
||||
|
||||
- name: Create {{ log_destination }}
|
||||
delegate_to: localhost
|
||||
run_once: true
|
||||
become: false
|
||||
ansible.builtin.file:
|
||||
path: "{{ log_destination }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Store logs to {{ log_destination }}
|
||||
delegate_to: localhost
|
||||
become: false
|
||||
ansible.builtin.template:
|
||||
src: content.j2
|
||||
dest: "{{ log_destination }}/k3s-init@{{ ansible_hostname }}.log"
|
||||
mode: 0644
|
||||
vars:
|
||||
content: "{{ k3s_init_log.stdout }}"
|
||||
@@ -1,159 +0,0 @@
|
||||
---
|
||||
|
||||
- name: Clean previous runs of k3s-init
|
||||
systemd:
|
||||
name: k3s-init
|
||||
state: stopped
|
||||
failed_when: false
|
||||
|
||||
- name: Clean previous runs of k3s-init
|
||||
command: systemctl reset-failed k3s-init
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
args:
|
||||
warn: false # The ansible systemd module does not support reset-failed
|
||||
|
||||
- name: Deploy vip manifest
|
||||
include_tasks: vip.yml
|
||||
|
||||
- name: Deploy metallb manifest
|
||||
include_tasks: metallb.yml
|
||||
|
||||
- name: Init cluster inside the transient k3s-init service
|
||||
command:
|
||||
cmd: "systemd-run -p RestartSec=2 \
|
||||
-p Restart=on-failure \
|
||||
--unit=k3s-init \
|
||||
k3s server {{ server_init_args }}"
|
||||
creates: "{{ systemd_dir }}/k3s.service"
|
||||
|
||||
- name: Verification
|
||||
block:
|
||||
- name: Verify that all nodes actually joined (check k3s-init.service if this fails)
|
||||
command:
|
||||
cmd: k3s kubectl get nodes -l "node-role.kubernetes.io/master=true" -o=jsonpath="{.items[*].metadata.name}"
|
||||
register: nodes
|
||||
until: nodes.rc == 0 and (nodes.stdout.split() | length) == (groups['master'] | length)
|
||||
retries: "{{ retry_count | default(20) }}"
|
||||
delay: 10
|
||||
changed_when: false
|
||||
always:
|
||||
- name: Save logs of k3s-init.service
|
||||
include_tasks: fetch_k3s_init_logs.yml
|
||||
when: log_destination
|
||||
vars:
|
||||
log_destination: >-
|
||||
{{ lookup('ansible.builtin.env', 'ANSIBLE_K3S_LOG_DIR', default=False) }}
|
||||
- name: Kill the temporary service used for initialization
|
||||
systemd:
|
||||
name: k3s-init
|
||||
state: stopped
|
||||
failed_when: false
|
||||
when: not ansible_check_mode
|
||||
|
||||
- name: Copy K3s service file
|
||||
register: k3s_service
|
||||
template:
|
||||
src: "k3s.service.j2"
|
||||
dest: "{{ systemd_dir }}/k3s.service"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
|
||||
- name: Enable and check K3s service
|
||||
systemd:
|
||||
name: k3s
|
||||
daemon_reload: yes
|
||||
state: restarted
|
||||
enabled: yes
|
||||
|
||||
- name: Wait for node-token
|
||||
wait_for:
|
||||
path: /var/lib/rancher/k3s/server/node-token
|
||||
|
||||
- name: Register node-token file access mode
|
||||
stat:
|
||||
path: /var/lib/rancher/k3s/server
|
||||
register: p
|
||||
|
||||
- name: Change file access node-token
|
||||
file:
|
||||
path: /var/lib/rancher/k3s/server
|
||||
mode: "g+rx,o+rx"
|
||||
|
||||
- name: Read node-token from master
|
||||
slurp:
|
||||
src: /var/lib/rancher/k3s/server/node-token
|
||||
register: node_token
|
||||
|
||||
- name: Store Master node-token
|
||||
set_fact:
|
||||
token: "{{ node_token.content | b64decode | regex_replace('\n', '') }}"
|
||||
|
||||
- name: Restore node-token file access
|
||||
file:
|
||||
path: /var/lib/rancher/k3s/server
|
||||
mode: "{{ p.stat.mode }}"
|
||||
|
||||
- name: Create directory .kube
|
||||
file:
|
||||
path: "{{ ansible_user_dir }}/.kube"
|
||||
state: directory
|
||||
owner: "{{ ansible_user_id }}"
|
||||
mode: "u=rwx,g=rx,o="
|
||||
|
||||
- name: Copy config file to user home directory
|
||||
copy:
|
||||
src: /etc/rancher/k3s/k3s.yaml
|
||||
dest: "{{ ansible_user_dir }}/.kube/config"
|
||||
remote_src: yes
|
||||
owner: "{{ ansible_user_id }}"
|
||||
mode: "u=rw,g=,o="
|
||||
|
||||
- name: Configure kubectl cluster to {{ endpoint_url }}
|
||||
command: >-
|
||||
k3s kubectl config set-cluster default
|
||||
--server={{ endpoint_url }}
|
||||
--kubeconfig {{ ansible_user_dir }}/.kube/config
|
||||
changed_when: true
|
||||
vars:
|
||||
endpoint_url: >-
|
||||
https://{{ apiserver_endpoint | ansible.utils.ipwrap }}:6443
|
||||
# Deactivated linter rules:
|
||||
# - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap
|
||||
# would be undefined. This will not be the case during playbook execution.
|
||||
# noqa jinja[invalid]
|
||||
|
||||
- name: Create kubectl symlink
|
||||
file:
|
||||
src: /usr/local/bin/k3s
|
||||
dest: /usr/local/bin/kubectl
|
||||
state: link
|
||||
|
||||
- name: Create crictl symlink
|
||||
file:
|
||||
src: /usr/local/bin/k3s
|
||||
dest: /usr/local/bin/crictl
|
||||
state: link
|
||||
|
||||
- name: Get contents of manifests folder
|
||||
find:
|
||||
paths: /var/lib/rancher/k3s/server/manifests
|
||||
file_type: file
|
||||
register: k3s_server_manifests
|
||||
|
||||
- name: Get sub dirs of manifests folder
|
||||
find:
|
||||
paths: /var/lib/rancher/k3s/server/manifests
|
||||
file_type: directory
|
||||
register: k3s_server_manifests_directories
|
||||
|
||||
- name: Remove manifests and folders that are only needed for bootstrapping cluster so k3s doesn't auto apply on start
|
||||
file:
|
||||
path: "{{ item.path }}"
|
||||
state: absent
|
||||
with_items:
|
||||
- "{{ k3s_server_manifests.files }}"
|
||||
- "{{ k3s_server_manifests_directories.files }}"
|
||||
loop_control:
|
||||
label: "{{ item.path }}"
|
||||
@@ -1,30 +0,0 @@
|
||||
---
|
||||
- name: Create manifests directory on first master
|
||||
file:
|
||||
path: /var/lib/rancher/k3s/server/manifests
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
||||
|
||||
- name: "Download to first master: manifest for metallb-{{ metal_lb_type }}"
|
||||
ansible.builtin.get_url:
|
||||
url: "https://raw.githubusercontent.com/metallb/metallb/{{ metal_lb_controller_tag_version }}/config/manifests/metallb-{{metal_lb_type}}.yaml" # noqa yaml[line-length]
|
||||
dest: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
||||
|
||||
- name: Set image versions in manifest for metallb-{{ metal_lb_type }}
|
||||
ansible.builtin.replace:
|
||||
path: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml"
|
||||
regexp: "{{ item.change | ansible.builtin.regex_escape }}"
|
||||
replace: "{{ item.to }}"
|
||||
with_items:
|
||||
- change: "metallb/speaker:{{ metal_lb_controller_tag_version }}"
|
||||
to: "metallb/speaker:{{ metal_lb_speaker_tag_version }}"
|
||||
loop_control:
|
||||
label: "{{ item.change }} => {{ item.to }}"
|
||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
||||
@@ -1,27 +0,0 @@
|
||||
---
|
||||
- name: Create manifests directory on first master
|
||||
file:
|
||||
path: /var/lib/rancher/k3s/server/manifests
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
||||
|
||||
- name: Download vip rbac manifest to first master
|
||||
ansible.builtin.get_url:
|
||||
url: "https://raw.githubusercontent.com/kube-vip/kube-vip/{{ kube_vip_tag_version }}/docs/manifests/rbac.yaml"
|
||||
dest: "/var/lib/rancher/k3s/server/manifests/vip-rbac.yaml"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
||||
|
||||
- name: Copy vip manifest to first master
|
||||
template:
|
||||
src: "vip.yaml.j2"
|
||||
dest: "/var/lib/rancher/k3s/server/manifests/vip.yaml"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
||||
@@ -1,5 +0,0 @@
|
||||
{#
|
||||
This is a really simple template that just outputs the
|
||||
value of the "content" variable.
|
||||
#}
|
||||
{{ content }}
|
||||
@@ -1,24 +0,0 @@
|
||||
[Unit]
|
||||
Description=Lightweight Kubernetes
|
||||
Documentation=https://k3s.io
|
||||
After=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=notify
|
||||
ExecStartPre=-/sbin/modprobe br_netfilter
|
||||
ExecStartPre=-/sbin/modprobe overlay
|
||||
ExecStart=/usr/local/bin/k3s server {{ extra_server_args | default("") }}
|
||||
KillMode=process
|
||||
Delegate=yes
|
||||
# Having non-zero Limit*s causes performance problems due to accounting overhead
|
||||
# in the kernel. We recommend using cgroups to do container-local accounting.
|
||||
LimitNOFILE=1048576
|
||||
LimitNPROC=infinity
|
||||
LimitCORE=infinity
|
||||
TasksMax=infinity
|
||||
TimeoutStartSec=0
|
||||
Restart=always
|
||||
RestartSec=5s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,79 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: kube-vip-ds
|
||||
namespace: kube-system
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
name: kube-vip-ds
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: kube-vip-ds
|
||||
spec:
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
- matchExpressions:
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
operator: Exists
|
||||
containers:
|
||||
- args:
|
||||
- manager
|
||||
env:
|
||||
- name: vip_arp
|
||||
value: "true"
|
||||
- name: port
|
||||
value: "6443"
|
||||
{% if kube_vip_iface %}
|
||||
- name: vip_interface
|
||||
value: {{ kube_vip_iface }}
|
||||
{% endif %}
|
||||
- name: vip_cidr
|
||||
value: "{{ apiserver_endpoint | ansible.utils.ipsubnet | ansible.utils.ipaddr('prefix') }}"
|
||||
- name: cp_enable
|
||||
value: "true"
|
||||
- name: cp_namespace
|
||||
value: kube-system
|
||||
- name: vip_ddns
|
||||
value: "false"
|
||||
- name: svc_enable
|
||||
value: "false"
|
||||
- name: vip_leaderelection
|
||||
value: "true"
|
||||
- name: vip_leaseduration
|
||||
value: "15"
|
||||
- name: vip_renewdeadline
|
||||
value: "10"
|
||||
- name: vip_retryperiod
|
||||
value: "2"
|
||||
- name: address
|
||||
value: {{ apiserver_endpoint }}
|
||||
image: ghcr.io/kube-vip/kube-vip:{{ kube_vip_tag_version }}
|
||||
imagePullPolicy: Always
|
||||
name: kube-vip
|
||||
resources: {}
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- NET_ADMIN
|
||||
- NET_RAW
|
||||
- SYS_TIME
|
||||
hostNetwork: true
|
||||
serviceAccountName: kube-vip
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
- effect: NoExecute
|
||||
operator: Exists
|
||||
updateStrategy: {}
|
||||
status:
|
||||
currentNumberScheduled: 0
|
||||
desiredNumberScheduled: 0
|
||||
numberMisscheduled: 0
|
||||
numberReady: 0
|
||||
@@ -1,16 +0,0 @@
|
||||
---
|
||||
|
||||
- name: Copy K3s service file
|
||||
template:
|
||||
src: "k3s.service.j2"
|
||||
dest: "{{ systemd_dir }}/k3s-node.service"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0755
|
||||
|
||||
- name: Enable and check K3s service
|
||||
systemd:
|
||||
name: k3s-node
|
||||
daemon_reload: yes
|
||||
state: restarted
|
||||
enabled: yes
|
||||
@@ -1,24 +0,0 @@
|
||||
[Unit]
|
||||
Description=Lightweight Kubernetes
|
||||
Documentation=https://k3s.io
|
||||
After=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=notify
|
||||
ExecStartPre=-/sbin/modprobe br_netfilter
|
||||
ExecStartPre=-/sbin/modprobe overlay
|
||||
ExecStart=/usr/local/bin/k3s agent --server https://{{ apiserver_endpoint | ansible.utils.ipwrap }}:6443 --token {{ hostvars[groups['master'][0]]['token'] | default(k3s_token) }} {{ extra_agent_args | default("") }}
|
||||
KillMode=process
|
||||
Delegate=yes
|
||||
# Having non-zero Limit*s causes performance problems due to accounting overhead
|
||||
# in the kernel. We recommend using cgroups to do container-local accounting.
|
||||
LimitNOFILE=1048576
|
||||
LimitNPROC=infinity
|
||||
LimitCORE=infinity
|
||||
TasksMax=infinity
|
||||
TimeoutStartSec=0
|
||||
Restart=always
|
||||
RestartSec=5s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,3 +0,0 @@
|
||||
---
|
||||
# Timeout to wait for MetalLB services to come up
|
||||
metal_lb_available_timeout: 120s
|
||||
@@ -1,8 +0,0 @@
|
||||
---
|
||||
- name: Deploy metallb pool
|
||||
include_tasks: metallb.yml
|
||||
|
||||
- name: Remove tmp directory used for manifests
|
||||
file:
|
||||
path: /tmp/k3s
|
||||
state: absent
|
||||
@@ -1,101 +0,0 @@
|
||||
---
|
||||
- name: Create manifests directory for temp configuration
|
||||
file:
|
||||
path: /tmp/k3s
|
||||
state: directory
|
||||
owner: "{{ ansible_user_id }}"
|
||||
mode: 0755
|
||||
with_items: "{{ groups['master'] }}"
|
||||
run_once: true
|
||||
|
||||
- name: Copy metallb CRs manifest to first master
|
||||
template:
|
||||
src: "metallb.crs.j2"
|
||||
dest: "/tmp/k3s/metallb-crs.yaml"
|
||||
owner: "{{ ansible_user_id }}"
|
||||
mode: 0755
|
||||
with_items: "{{ groups['master'] }}"
|
||||
run_once: true
|
||||
|
||||
- name: Test metallb-system namespace
|
||||
command: >-
|
||||
k3s kubectl -n metallb-system
|
||||
changed_when: false
|
||||
with_items: "{{ groups['master'] }}"
|
||||
run_once: true
|
||||
|
||||
- name: Wait for MetalLB resources
|
||||
command: >-
|
||||
k3s kubectl wait {{ item.resource }}
|
||||
--namespace='metallb-system'
|
||||
{% if item.name | default(False) -%}{{ item.name }}{%- endif %}
|
||||
{% if item.selector | default(False) -%}--selector='{{ item.selector }}'{%- endif %}
|
||||
{% if item.condition | default(False) -%}{{ item.condition }}{%- endif %}
|
||||
--timeout='{{ metal_lb_available_timeout }}'
|
||||
changed_when: false
|
||||
run_once: true
|
||||
with_items:
|
||||
- description: controller
|
||||
resource: deployment
|
||||
name: controller
|
||||
condition: --for condition=Available=True
|
||||
- description: webhook service
|
||||
resource: pod
|
||||
selector: component=controller
|
||||
condition: --for=jsonpath='{.status.phase}'=Running
|
||||
- description: pods in replica sets
|
||||
resource: pod
|
||||
selector: component=controller,app=metallb
|
||||
condition: --for condition=Ready
|
||||
- description: ready replicas of controller
|
||||
resource: replicaset
|
||||
selector: component=controller,app=metallb
|
||||
condition: --for=jsonpath='{.status.readyReplicas}'=1
|
||||
- description: fully labeled replicas of controller
|
||||
resource: replicaset
|
||||
selector: component=controller,app=metallb
|
||||
condition: --for=jsonpath='{.status.fullyLabeledReplicas}'=1
|
||||
- description: available replicas of controller
|
||||
resource: replicaset
|
||||
selector: component=controller,app=metallb
|
||||
condition: --for=jsonpath='{.status.availableReplicas}'=1
|
||||
loop_control:
|
||||
label: "{{ item.description }}"
|
||||
|
||||
- name: Test metallb-system webhook-service endpoint
|
||||
command: >-
|
||||
k3s kubectl -n metallb-system get endpoints webhook-service
|
||||
changed_when: false
|
||||
with_items: "{{ groups['master'] }}"
|
||||
run_once: true
|
||||
|
||||
- name: Apply metallb CRs
|
||||
command: >-
|
||||
k3s kubectl apply -f /tmp/k3s/metallb-crs.yaml
|
||||
--timeout='{{ metal_lb_available_timeout }}'
|
||||
register: this
|
||||
changed_when: false
|
||||
run_once: true
|
||||
until: this.rc == 0
|
||||
retries: 5
|
||||
|
||||
- name: Test metallb-system resources for Layer 2 configuration
|
||||
command: >-
|
||||
k3s kubectl -n metallb-system get {{ item }}
|
||||
changed_when: false
|
||||
run_once: true
|
||||
when: metal_lb_mode == "layer2"
|
||||
with_items:
|
||||
- IPAddressPool
|
||||
- L2Advertisement
|
||||
|
||||
- name: Test metallb-system resources for BGP configuration
|
||||
command: >-
|
||||
k3s kubectl -n metallb-system get {{ item }}
|
||||
changed_when: false
|
||||
run_once: true
|
||||
when: metal_lb_mode == "bgp"
|
||||
with_items:
|
||||
- IPAddressPool
|
||||
- BGPPeer
|
||||
- BGPAdvertisement
|
||||
@@ -1,43 +0,0 @@
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: IPAddressPool
|
||||
metadata:
|
||||
name: first-pool
|
||||
namespace: metallb-system
|
||||
spec:
|
||||
addresses:
|
||||
{% if metal_lb_ip_range is string %}
|
||||
{# metal_lb_ip_range was used in the legacy way: single string instead of a list #}
|
||||
{# => transform to list with single element #}
|
||||
{% set metal_lb_ip_range = [metal_lb_ip_range] %}
|
||||
{% endif %}
|
||||
{% for range in metal_lb_ip_range %}
|
||||
- {{ range }}
|
||||
{% endfor %}
|
||||
|
||||
{% if metal_lb_mode == "layer2" %}
|
||||
---
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: L2Advertisement
|
||||
metadata:
|
||||
name: default
|
||||
namespace: metallb-system
|
||||
{% endif %}
|
||||
{% if metal_lb_mode == "bgp" %}
|
||||
---
|
||||
apiVersion: metallb.io/v1beta2
|
||||
kind: BGPPeer
|
||||
metadata:
|
||||
name: default
|
||||
namespace: metallb-system
|
||||
spec:
|
||||
myASN: {{ metal_lb_bgp_my_asn }}
|
||||
peerASN: {{ metal_lb_bgp_peer_asn }}
|
||||
peerAddress: {{ metal_lb_bgp_peer_address }}
|
||||
|
||||
---
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: BGPAdvertisement
|
||||
metadata:
|
||||
name: default
|
||||
namespace: metallb-system
|
||||
{% endif %}
|
||||
Reference in New Issue
Block a user