From e98e3ee77c7a07ed5ff1ab4588f394462c70b20f Mon Sep 17 00:00:00 2001 From: irish1986 Date: Mon, 2 Jan 2023 00:04:22 -0500 Subject: [PATCH] Split manifest into separate task for ease of use (#191) --- roles/k3s/master/tasks/main.yml | 48 ++-------------- roles/k3s/master/tasks/metallb.yml | 27 +++++++++ roles/k3s/master/tasks/vip.yml | 27 +++++++++ roles/k3s/post/tasks/main.yml | 90 +----------------------------- roles/k3s/post/tasks/metallb.yml | 89 +++++++++++++++++++++++++++++ 5 files changed, 149 insertions(+), 132 deletions(-) create mode 100644 roles/k3s/master/tasks/metallb.yml create mode 100644 roles/k3s/master/tasks/vip.yml create mode 100644 roles/k3s/post/tasks/metallb.yml diff --git a/roles/k3s/master/tasks/main.yml b/roles/k3s/master/tasks/main.yml index fe30aaa..053ff7b 100644 --- a/roles/k3s/master/tasks/main.yml +++ b/roles/k3s/master/tasks/main.yml @@ -13,51 +13,11 @@ args: warn: false # The ansible systemd module does not support reset-failed -- name: Create manifests directory on first master - file: - path: /var/lib/rancher/k3s/server/manifests - state: directory - owner: root - group: root - mode: 0644 - when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] +- name: Deploy vip manifest + include_tasks: vip.yml -- name: Copy vip rbac manifest to first master - template: - src: "vip.rbac.yaml.j2" - dest: "/var/lib/rancher/k3s/server/manifests/vip-rbac.yaml" - owner: root - group: root - mode: 0644 - when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] - -- name: Copy vip manifest to first master - template: - src: "vip.yaml.j2" - dest: "/var/lib/rancher/k3s/server/manifests/vip.yaml" - owner: root - group: root - mode: 0644 - when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] - -# these will be copied and installed now, then tested later and apply config -- name: Copy metallb namespace to first master - template: - src: "metallb.namespace.j2" - dest: "/var/lib/rancher/k3s/server/manifests/metallb-namespace.yaml" - owner: root - group: root - mode: 0644 - when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] - -- name: Copy metallb namespace to first master - template: - src: "metallb.crds.j2" - dest: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml" - owner: root - group: root - mode: 0644 - when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] +- name: Deploy metallb manifest + include_tasks: metallb.yml - name: Init cluster inside the transient k3s-init service command: diff --git a/roles/k3s/master/tasks/metallb.yml b/roles/k3s/master/tasks/metallb.yml new file mode 100644 index 0000000..995fce7 --- /dev/null +++ b/roles/k3s/master/tasks/metallb.yml @@ -0,0 +1,27 @@ +--- +- name: Create manifests directory on first master + file: + path: /var/lib/rancher/k3s/server/manifests + state: directory + owner: root + group: root + mode: 0644 + when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] + +- name: Copy metallb namespace to first master + template: + src: "metallb.namespace.j2" + dest: "/var/lib/rancher/k3s/server/manifests/metallb-namespace.yaml" + owner: root + group: root + mode: 0644 + when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] + +- name: Copy metallb manifest to first master + template: + src: "metallb.crds.j2" + dest: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml" + owner: root + group: root + mode: 0644 + when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] diff --git a/roles/k3s/master/tasks/vip.yml b/roles/k3s/master/tasks/vip.yml new file mode 100644 index 0000000..ae55342 --- /dev/null +++ b/roles/k3s/master/tasks/vip.yml @@ -0,0 +1,27 @@ +--- +- name: Create manifests directory on first master + file: + path: /var/lib/rancher/k3s/server/manifests + state: directory + owner: root + group: root + mode: 0644 + when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] + +- name: Copy vip rbac manifest to first master + template: + src: "vip.rbac.yaml.j2" + dest: "/var/lib/rancher/k3s/server/manifests/vip-rbac.yaml" + owner: root + group: root + mode: 0644 + when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] + +- name: Copy vip manifest to first master + template: + src: "vip.yaml.j2" + dest: "/var/lib/rancher/k3s/server/manifests/vip.yaml" + owner: root + group: root + mode: 0644 + when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] diff --git a/roles/k3s/post/tasks/main.yml b/roles/k3s/post/tasks/main.yml index a838885..84a79db 100644 --- a/roles/k3s/post/tasks/main.yml +++ b/roles/k3s/post/tasks/main.yml @@ -1,92 +1,6 @@ --- -- name: Create manifests directory for temp configuration - file: - path: /tmp/k3s - state: directory - owner: "{{ ansible_user }}" - mode: 0755 - with_items: "{{ groups['master'] }}" - run_once: true - -- name: Copy metallb CRs manifest to first master - template: - src: "metallb.crs.j2" - dest: "/tmp/k3s/metallb-crs.yaml" - owner: "{{ ansible_user }}" - mode: 0755 - with_items: "{{ groups['master'] }}" - run_once: true - -- name: Test metallb-system namespace - command: >- - k3s kubectl -n metallb-system - changed_when: false - with_items: "{{ groups['master'] }}" - run_once: true - -- name: Wait for MetalLB resources - command: >- - k3s kubectl wait {{ item.resource }} - --namespace='metallb-system' - {% if item.name | default(False) -%}{{ item.name }}{%- endif %} - {% if item.selector | default(False) -%}--selector='{{ item.selector }}'{%- endif %} - {% if item.condition | default(False) -%}{{ item.condition }}{%- endif %} - --timeout='{{ metal_lb_available_timeout }}' - changed_when: false - run_once: true - with_items: - - description: controller - resource: deployment - name: controller - condition: --for condition=Available=True - - description: webhook service - resource: pod - selector: component=controller - condition: --for=jsonpath='{.status.phase}'=Running - - description: pods in replica sets - resource: pod - selector: component=controller,app=metallb - condition: --for condition=Ready - - description: ready replicas of controller - resource: replicaset - selector: component=controller,app=metallb - condition: --for=jsonpath='{.status.readyReplicas}'=1 - - description: fully labeled replicas of controller - resource: replicaset - selector: component=controller,app=metallb - condition: --for=jsonpath='{.status.fullyLabeledReplicas}'=1 - - description: available replicas of controller - resource: replicaset - selector: component=controller,app=metallb - condition: --for=jsonpath='{.status.availableReplicas}'=1 - loop_control: - label: "{{ item.description }}" - -- name: Test metallb-system webhook-service endpoint - command: >- - k3s kubectl -n metallb-system get endpoints webhook-service - changed_when: false - with_items: "{{ groups['master'] }}" - run_once: true - -- name: Apply metallb CRs - command: >- - k3s kubectl apply -f /tmp/k3s/metallb-crs.yaml - --timeout='{{ metal_lb_available_timeout }}' - register: this - changed_when: false - run_once: true - until: this.rc == 0 - retries: 5 - -- name: Test metallb-system resources - command: >- - k3s kubectl -n metallb-system get {{ item }} - changed_when: false - run_once: true - with_items: - - IPAddressPool - - L2Advertisement +- name: Deploy metallb pool + include_tasks: metallb.yml - name: Remove tmp directory used for manifests file: diff --git a/roles/k3s/post/tasks/metallb.yml b/roles/k3s/post/tasks/metallb.yml new file mode 100644 index 0000000..2bbab5a --- /dev/null +++ b/roles/k3s/post/tasks/metallb.yml @@ -0,0 +1,89 @@ +--- +- name: Create manifests directory for temp configuration + file: + path: /tmp/k3s + state: directory + owner: "{{ ansible_user }}" + mode: 0755 + with_items: "{{ groups['master'] }}" + run_once: true + +- name: Copy metallb CRs manifest to first master + template: + src: "metallb.crs.j2" + dest: "/tmp/k3s/metallb-crs.yaml" + owner: "{{ ansible_user }}" + mode: 0755 + with_items: "{{ groups['master'] }}" + run_once: true + +- name: Test metallb-system namespace + command: >- + k3s kubectl -n metallb-system + changed_when: false + with_items: "{{ groups['master'] }}" + run_once: true + +- name: Wait for MetalLB resources + command: >- + k3s kubectl wait {{ item.resource }} + --namespace='metallb-system' + {% if item.name | default(False) -%}{{ item.name }}{%- endif %} + {% if item.selector | default(False) -%}--selector='{{ item.selector }}'{%- endif %} + {% if item.condition | default(False) -%}{{ item.condition }}{%- endif %} + --timeout='{{ metal_lb_available_timeout }}' + changed_when: false + run_once: true + with_items: + - description: controller + resource: deployment + name: controller + condition: --for condition=Available=True + - description: webhook service + resource: pod + selector: component=controller + condition: --for=jsonpath='{.status.phase}'=Running + - description: pods in replica sets + resource: pod + selector: component=controller,app=metallb + condition: --for condition=Ready + - description: ready replicas of controller + resource: replicaset + selector: component=controller,app=metallb + condition: --for=jsonpath='{.status.readyReplicas}'=1 + - description: fully labeled replicas of controller + resource: replicaset + selector: component=controller,app=metallb + condition: --for=jsonpath='{.status.fullyLabeledReplicas}'=1 + - description: available replicas of controller + resource: replicaset + selector: component=controller,app=metallb + condition: --for=jsonpath='{.status.availableReplicas}'=1 + loop_control: + label: "{{ item.description }}" + +- name: Test metallb-system webhook-service endpoint + command: >- + k3s kubectl -n metallb-system get endpoints webhook-service + changed_when: false + with_items: "{{ groups['master'] }}" + run_once: true + +- name: Apply metallb CRs + command: >- + k3s kubectl apply -f /tmp/k3s/metallb-crs.yaml + --timeout='{{ metal_lb_available_timeout }}' + register: this + changed_when: false + run_once: true + until: this.rc == 0 + retries: 5 + +- name: Test metallb-system resources + command: >- + k3s kubectl -n metallb-system get {{ item }} + changed_when: false + run_once: true + with_items: + - IPAddressPool + - L2Advertisement