Change to FQCN with ansible-lint fixer (#553)

* Change to FQCN with ansible-lint fixer

Since ansible-base 2.10 (later ansible-core), FQCN is the new way to go.

Updated .ansible-lint with a production profile and removed fqcn in skip_list.
Updated .yamllint with rules needed.

Ran ansible-lint --fix=all, then manually applied some minor changes.

* Changed octal value in molecule/ipv6/prepare.yml
This commit is contained in:
Richard Holmboe
2024-08-13 05:59:59 +02:00
committed by GitHub
parent 635f0b21b3
commit b077a49e1f
49 changed files with 317 additions and 317 deletions

View File

@@ -23,6 +23,6 @@
ansible.builtin.template:
src: content.j2
dest: "{{ log_destination }}/k3s-init@{{ ansible_hostname }}.log"
mode: 0644
mode: "0644"
vars:
content: "{{ k3s_init_log.stdout }}"

View File

@@ -1,17 +1,16 @@
---
- name: Create k3s.service.d directory
file:
path: '{{ systemd_dir }}/k3s.service.d'
ansible.builtin.file:
path: "{{ systemd_dir }}/k3s.service.d"
state: directory
owner: root
group: root
mode: '0755'
mode: "0755"
- name: Copy K3s http_proxy conf file
template:
src: "http_proxy.conf.j2"
ansible.builtin.template:
src: http_proxy.conf.j2
dest: "{{ systemd_dir }}/k3s.service.d/http_proxy.conf"
owner: root
group: root
mode: '0755'
mode: "0755"

View File

@@ -1,27 +1,27 @@
---
- name: Create manifests directory on first master
file:
ansible.builtin.file:
path: /var/lib/rancher/k3s/server/manifests
state: directory
owner: root
group: root
mode: 0644
mode: "0644"
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
- name: Download vip cloud provider manifest to first master
ansible.builtin.get_url:
url: "https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/{{ kube_vip_cloud_provider_tag_version | default('main') }}/manifest/kube-vip-cloud-controller.yaml" # noqa yaml[line-length]
dest: "/var/lib/rancher/k3s/server/manifests/kube-vip-cloud-controller.yaml"
url: https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/{{ kube_vip_cloud_provider_tag_version | default('main') }}/manifest/kube-vip-cloud-controller.yaml # noqa yaml[line-length]
dest: /var/lib/rancher/k3s/server/manifests/kube-vip-cloud-controller.yaml
owner: root
group: root
mode: 0644
mode: "0644"
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
- name: Copy kubevip configMap manifest to first master
template:
src: "kubevip.yaml.j2"
dest: "/var/lib/rancher/k3s/server/manifests/kubevip.yaml"
ansible.builtin.template:
src: kubevip.yaml.j2
dest: /var/lib/rancher/k3s/server/manifests/kubevip.yaml
owner: root
group: root
mode: 0644
mode: "0644"
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']

View File

@@ -1,133 +1,129 @@
---
- name: Stop k3s-init
systemd:
ansible.builtin.systemd:
name: k3s-init
state: stopped
failed_when: false
# k3s-init won't work if the port is already in use
- name: Stop k3s
systemd:
ansible.builtin.systemd:
name: k3s
state: stopped
failed_when: false
- name: Clean previous runs of k3s-init # noqa command-instead-of-module
- name: Clean previous runs of k3s-init # noqa command-instead-of-module
# The systemd module does not support "reset-failed", so we need to resort to command.
command: systemctl reset-failed k3s-init
ansible.builtin.command: systemctl reset-failed k3s-init
failed_when: false
changed_when: false
- name: Deploy K3s http_proxy conf
include_tasks: http_proxy.yml
ansible.builtin.include_tasks: http_proxy.yml
when: proxy_env is defined
- name: Deploy vip manifest
include_tasks: vip.yml
ansible.builtin.include_tasks: vip.yml
- name: Deploy metallb manifest
include_tasks: metallb.yml
ansible.builtin.include_tasks: metallb.yml
tags: metallb
when: kube_vip_lb_ip_range is not defined and (not cilium_bgp or cilium_iface is not defined)
- name: Deploy kube-vip manifest
include_tasks: kube-vip.yml
ansible.builtin.include_tasks: kube-vip.yml
tags: kubevip
when: kube_vip_lb_ip_range is defined
- name: Init cluster inside the transient k3s-init service
command:
cmd: "systemd-run -p RestartSec=2 \
-p Restart=on-failure \
--unit=k3s-init \
k3s server {{ server_init_args }}"
ansible.builtin.command:
cmd: systemd-run -p RestartSec=2 -p Restart=on-failure --unit=k3s-init k3s server {{ server_init_args }}
creates: "{{ systemd_dir }}/k3s-init.service"
- name: Verification
when: not ansible_check_mode
block:
- name: Verify that all nodes actually joined (check k3s-init.service if this fails)
command:
ansible.builtin.command:
cmd: k3s kubectl get nodes -l "node-role.kubernetes.io/master=true" -o=jsonpath="{.items[*].metadata.name}"
register: nodes
until: nodes.rc == 0 and (nodes.stdout.split() | length) == (groups[group_name_master | default('master')] | length) # yamllint disable-line rule:line-length
until: nodes.rc == 0 and (nodes.stdout.split() | length) == (groups[group_name_master | default('master')] | length) # yamllint disable-line rule:line-length
retries: "{{ retry_count | default(20) }}"
delay: 10
changed_when: false
always:
- name: Save logs of k3s-init.service
include_tasks: fetch_k3s_init_logs.yml
ansible.builtin.include_tasks: fetch_k3s_init_logs.yml
when: log_destination
vars:
log_destination: >-
{{ lookup('ansible.builtin.env', 'ANSIBLE_K3S_LOG_DIR', default=False) }}
- name: Kill the temporary service used for initialization
systemd:
ansible.builtin.systemd:
name: k3s-init
state: stopped
failed_when: false
- name: Copy K3s service file
register: k3s_service
template:
src: "k3s.service.j2"
ansible.builtin.template:
src: k3s.service.j2
dest: "{{ systemd_dir }}/k3s.service"
owner: root
group: root
mode: 0644
mode: "0644"
- name: Enable and check K3s service
systemd:
ansible.builtin.systemd:
name: k3s
daemon_reload: true
state: restarted
enabled: true
- name: Wait for node-token
wait_for:
ansible.builtin.wait_for:
path: /var/lib/rancher/k3s/server/node-token
- name: Register node-token file access mode
stat:
ansible.builtin.stat:
path: /var/lib/rancher/k3s/server
register: p
- name: Change file access node-token
file:
ansible.builtin.file:
path: /var/lib/rancher/k3s/server
mode: "g+rx,o+rx"
mode: g+rx,o+rx
- name: Read node-token from master
slurp:
ansible.builtin.slurp:
src: /var/lib/rancher/k3s/server/node-token
register: node_token
- name: Store Master node-token
set_fact:
ansible.builtin.set_fact:
token: "{{ node_token.content | b64decode | regex_replace('\n', '') }}"
- name: Restore node-token file access
file:
ansible.builtin.file:
path: /var/lib/rancher/k3s/server
mode: "{{ p.stat.mode }}"
- name: Create directory .kube
file:
ansible.builtin.file:
path: "{{ ansible_user_dir }}/.kube"
state: directory
owner: "{{ ansible_user_id }}"
mode: "u=rwx,g=rx,o="
mode: u=rwx,g=rx,o=
- name: Copy config file to user home directory
copy:
ansible.builtin.copy:
src: /etc/rancher/k3s/k3s.yaml
dest: "{{ ansible_user_dir }}/.kube/config"
remote_src: true
owner: "{{ ansible_user_id }}"
mode: "u=rw,g=,o="
mode: u=rw,g=,o=
- name: Configure kubectl cluster to {{ endpoint_url }}
command: >-
ansible.builtin.command: >-
k3s kubectl config set-cluster default
--server={{ endpoint_url }}
--kubeconfig {{ ansible_user_dir }}/.kube/config
@@ -135,39 +131,39 @@
vars:
endpoint_url: >-
https://{{ apiserver_endpoint | ansible.utils.ipwrap }}:6443
# Deactivated linter rules:
# - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap
# would be undefined. This will not be the case during playbook execution.
# noqa jinja[invalid]
# Deactivated linter rules:
# - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap
# would be undefined. This will not be the case during playbook execution.
# noqa jinja[invalid]
- name: Create kubectl symlink
file:
ansible.builtin.file:
src: /usr/local/bin/k3s
dest: /usr/local/bin/kubectl
state: link
when: k3s_create_kubectl_symlink | default(true) | bool
- name: Create crictl symlink
file:
ansible.builtin.file:
src: /usr/local/bin/k3s
dest: /usr/local/bin/crictl
state: link
when: k3s_create_crictl_symlink | default(true) | bool
- name: Get contents of manifests folder
find:
ansible.builtin.find:
paths: /var/lib/rancher/k3s/server/manifests
file_type: file
register: k3s_server_manifests
- name: Get sub dirs of manifests folder
find:
ansible.builtin.find:
paths: /var/lib/rancher/k3s/server/manifests
file_type: directory
register: k3s_server_manifests_directories
- name: Remove manifests and folders that are only needed for bootstrapping cluster so k3s doesn't auto apply on start
file:
ansible.builtin.file:
path: "{{ item.path }}"
state: absent
with_items:

View File

@@ -1,30 +1,30 @@
---
- name: Create manifests directory on first master
file:
ansible.builtin.file:
path: /var/lib/rancher/k3s/server/manifests
state: directory
owner: root
group: root
mode: 0644
mode: "0644"
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
- name: "Download to first master: manifest for metallb-{{ metal_lb_type }}"
ansible.builtin.get_url:
url: "https://raw.githubusercontent.com/metallb/metallb/{{ metal_lb_controller_tag_version }}/config/manifests/metallb-{{ metal_lb_type }}.yaml" # noqa yaml[line-length]
dest: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml"
url: https://raw.githubusercontent.com/metallb/metallb/{{ metal_lb_controller_tag_version }}/config/manifests/metallb-{{ metal_lb_type }}.yaml # noqa yaml[line-length]
dest: /var/lib/rancher/k3s/server/manifests/metallb-crds.yaml
owner: root
group: root
mode: 0644
mode: "0644"
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
- name: Set image versions in manifest for metallb-{{ metal_lb_type }}
ansible.builtin.replace:
path: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml"
path: /var/lib/rancher/k3s/server/manifests/metallb-crds.yaml
regexp: "{{ item.change | ansible.builtin.regex_escape }}"
replace: "{{ item.to }}"
with_items:
- change: "metallb/speaker:{{ metal_lb_controller_tag_version }}"
to: "metallb/speaker:{{ metal_lb_speaker_tag_version }}"
- change: metallb/speaker:{{ metal_lb_controller_tag_version }}
to: metallb/speaker:{{ metal_lb_speaker_tag_version }}
loop_control:
label: "{{ item.change }} => {{ item.to }}"
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']

View File

@@ -1,27 +1,27 @@
---
- name: Create manifests directory on first master
file:
ansible.builtin.file:
path: /var/lib/rancher/k3s/server/manifests
state: directory
owner: root
group: root
mode: 0644
mode: "0644"
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
- name: Download vip rbac manifest to first master
ansible.builtin.get_url:
url: "https://kube-vip.io/manifests/rbac.yaml"
dest: "/var/lib/rancher/k3s/server/manifests/vip-rbac.yaml"
url: https://kube-vip.io/manifests/rbac.yaml
dest: /var/lib/rancher/k3s/server/manifests/vip-rbac.yaml
owner: root
group: root
mode: 0644
mode: "0644"
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
- name: Copy vip manifest to first master
template:
src: "vip.yaml.j2"
dest: "/var/lib/rancher/k3s/server/manifests/vip.yaml"
ansible.builtin.template:
src: vip.yaml.j2
dest: /var/lib/rancher/k3s/server/manifests/vip.yaml
owner: root
group: root
mode: 0644
mode: "0644"
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']