diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index 986c9cb..fdf9187 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -35,7 +35,7 @@ k3s_version: "" ansible_user: NA systemd_dir: "" -flannel_iface: "" +container_iface: "" apiserver_endpoint: "" diff --git a/inventory/sample/group_vars/all.yml b/inventory/sample/group_vars/all.yml index 5b923f7..4a5d44d 100644 --- a/inventory/sample/group_vars/all.yml +++ b/inventory/sample/group_vars/all.yml @@ -7,8 +7,14 @@ systemd_dir: /etc/systemd/system # Set your timezone system_timezone: "Your/Timezone" -# interface which will be used for flannel -flannel_iface: "eth0" +# node interface which will be used for the container network interface (flannel or calico) +container_iface: "eth0" + +# set use_calico to true to use tigera operator/calico instead of the default CNI flannel +# install reference: https://docs.tigera.io/calico/latest/getting-started/kubernetes/k3s/multi-node-install#install-calico +use_calico: false +calico_cidr: "10.52.0.0/16" # pod cidr pool +calico_tag: "v3.27.0" # calico version tag # apiserver_endpoint is virtual ip-address which will be configured on each master apiserver_endpoint: "192.168.30.222" @@ -20,23 +26,30 @@ k3s_token: "some-SUPER-DEDEUPER-secret-password" # The IP on which the node is reachable in the cluster. # Here, a sensible default is provided, you can still override # it for each of your hosts, though. -k3s_node_ip: '{{ ansible_facts[flannel_iface]["ipv4"]["address"] }}' +k3s_node_ip: '{{ ansible_facts[container_iface]["ipv4"]["address"] }}' # Disable the taint manually by setting: k3s_master_taint = false k3s_master_taint: "{{ true if groups['node'] | default([]) | length >= 1 else false }}" # these arguments are recommended for servers as well as agents: extra_args: >- - --flannel-iface={{ flannel_iface }} + {{ '--flannel-iface=' + container_iface if not use_calico else '' }} --node-ip={{ k3s_node_ip }} # change these to your liking, the only required are: --disable servicelb, --tls-san {{ apiserver_endpoint }} +# the contents of the if block is also required if using calico extra_server_args: >- {{ extra_args }} {{ '--node-taint node-role.kubernetes.io/master=true:NoSchedule' if k3s_master_taint else '' }} + {% if use_calico %} + --flannel-backend=none + --disable-network-policy + --cluster-cidr={{ calico_cidr }} + {% endif %} --tls-san {{ apiserver_endpoint }} --disable servicelb --disable traefik + extra_agent_args: >- {{ extra_args }} diff --git a/molecule/default/overrides.yml b/molecule/default/overrides.yml index 4eea472..7bd9a56 100644 --- a/molecule/default/overrides.yml +++ b/molecule/default/overrides.yml @@ -6,7 +6,7 @@ ansible.builtin.set_fact: # See: # https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant - flannel_iface: eth1 + container_iface: eth1 # The test VMs might be a bit slow, so we give them more time to join the cluster: retry_count: 45 diff --git a/molecule/ipv6/overrides.yml b/molecule/ipv6/overrides.yml index 44bbc07..7ac7be3 100644 --- a/molecule/ipv6/overrides.yml +++ b/molecule/ipv6/overrides.yml @@ -6,7 +6,7 @@ ansible.builtin.set_fact: # See: # https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant - flannel_iface: eth1 + container_iface: eth1 # In this scenario, we have multiple interfaces that the VIP could be # broadcasted on. Since we have assigned a dedicated private network @@ -27,13 +27,13 @@ - fdad:bad:ba55::1b:0/112 - 192.168.123.80-192.168.123.90 - # k3s_node_ip is by default set to the IPv4 address of flannel_iface. + # k3s_node_ip is by default set to the IPv4 address of container_iface. # We want IPv6 addresses here of course, so we just specify them # manually below. k3s_node_ip: "{{ node_ipv4 }},{{ node_ipv6 }}" - name: Override host variables (2/2) - # Since "extra_args" depends on "k3s_node_ip" and "flannel_iface" we have + # Since "extra_args" depends on "k3s_node_ip" and "container_iface" we have # to set this AFTER overriding the both of them. ansible.builtin.set_fact: # A few extra server args are necessary: diff --git a/molecule/ipv6/prepare.yml b/molecule/ipv6/prepare.yml index cea50d8..dfeeba7 100644 --- a/molecule/ipv6/prepare.yml +++ b/molecule/ipv6/prepare.yml @@ -30,7 +30,7 @@ name: net.ipv6.conf.{{ item }}.accept_dad value: "0" with_items: - - "{{ flannel_iface }}" + - "{{ container_iface }}" - name: Write IPv4 configuration ansible.builtin.template: diff --git a/molecule/ipv6/templates/55-flannel-ipv4.yaml.j2 b/molecule/ipv6/templates/55-flannel-ipv4.yaml.j2 index 6f68777..45bd56e 100644 --- a/molecule/ipv6/templates/55-flannel-ipv4.yaml.j2 +++ b/molecule/ipv6/templates/55-flannel-ipv4.yaml.j2 @@ -3,6 +3,6 @@ network: version: 2 renderer: networkd ethernets: - {{ flannel_iface }}: + {{ container_iface }}: addresses: - {{ node_ipv4 }}/24 diff --git a/molecule/single_node/overrides.yml b/molecule/single_node/overrides.yml index 799275e..191ad6f 100644 --- a/molecule/single_node/overrides.yml +++ b/molecule/single_node/overrides.yml @@ -6,7 +6,7 @@ ansible.builtin.set_fact: # See: # https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant - flannel_iface: eth1 + container_iface: eth1 # The test VMs might be a bit slow, so we give them more time to join the cluster: retry_count: 45 diff --git a/roles/k3s_server_post/tasks/calico.yml b/roles/k3s_server_post/tasks/calico.yml new file mode 100644 index 0000000..36ac1dd --- /dev/null +++ b/roles/k3s_server_post/tasks/calico.yml @@ -0,0 +1,99 @@ +--- +- block: + - name: Create manifests directory on first master + file: + path: /tmp/k3s + state: directory + owner: root + group: root + mode: 0755 + + - name: "Download to first master: manifest for Tigera Operator and Calico CRDs" + ansible.builtin.get_url: + url: "https://raw.githubusercontent.com/projectcalico/calico/{{ calico_tag }}/manifests/tigera-operator.yaml" + dest: "/tmp/k3s/tigera-operator.yaml" + owner: root + group: root + mode: 0755 + + - name: Copy Calico custom resources manifest to first master + ansible.builtin.template: + src: "calico.crs.j2" + dest: /tmp/k3s/custom-resources.yaml + + - name: Deploy or replace Tigera Operator + block: + - name: Deploy Tigera Operator + ansible.builtin.command: + cmd: kubectl create -f /tmp/k3s/tigera-operator.yaml + register: create_operator + changed_when: "'created' in create_operator.stdout" + failed_when: "'Error' in create_operator.stderr and 'already exists' not in create_operator.stderr" + rescue: + - name: Replace existing Tigera Operator + ansible.builtin.command: + cmd: kubectl replace -f /tmp/k3s/tigera-operator.yaml + register: replace_operator + changed_when: "'replaced' in replace_operator.stdout" + failed_when: "'Error' in replace_operator.stderr" + + - name: Wait for Tigera Operator resources + command: >- + k3s kubectl wait {{ item.type }}/{{ item.name }} + --namespace='tigera-operator' + --for=condition=Available=True + --timeout=7s + register: tigera_result + changed_when: false + until: tigera_result is succeeded + retries: 7 + delay: 7 + with_items: + - { name: tigera-operator, type: deployment } + loop_control: + label: "{{ item.type }}/{{ item.name }}" + + - name: Deploy Calico custom resources + block: + - name: Deploy custom resources for Calico + ansible.builtin.command: + cmd: kubectl create -f /tmp/k3s/custom-resources.yaml + register: create_cr + changed_when: "'created' in create_cr.stdout" + failed_when: "'Error' in create_cr.stderr and 'already exists' not in create_cr.stderr" + rescue: + - name: Apply new Calico custom resource manifest + ansible.builtin.command: + cmd: kubectl apply -f /tmp/k3s/custom-resources.yaml + register: apply_cr + changed_when: "'configured' in apply_cr.stdout or 'created' in apply_cr.stdout" + failed_when: "'Error' in apply_cr.stderr" + + - name: Wait for Calico system resources to be available + command: >- + {% if item.type == 'daemonset' %} + k3s kubectl wait pods + --namespace='{{ item.namespace }}' + --selector={{ item.selector }} + --for=condition=Ready + {% else %} + k3s kubectl wait {{ item.type }}/{{ item.name }} + --namespace='{{ item.namespace }}' + --for=condition=Available + {% endif %} + --timeout=7s + register: cr_result + changed_when: false + until: cr_result is succeeded + retries: 30 + delay: 7 + with_items: + - { name: calico-typha, type: deployment, namespace: calico-system } + - { name: calico-kube-controllers, type: deployment, namespace: calico-system } + - { name: csi-node-driver, type: daemonset, selector: 'k8s-app=csi-node-driver', namespace: calico-system } + - { name: calico-node, type: daemonset, selector: 'k8s-app=calico-node', namespace: calico-system } + - { name: calico-apiserver, type: deployment, selector: 'k8s-app=calico-apiserver', namespace: calico-apiserver } + loop_control: + label: "{{ item.type }}/{{ item.name }}" + when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] + run_once: true # stops "skipped" log spam diff --git a/roles/k3s_server_post/tasks/main.yml b/roles/k3s_server_post/tasks/main.yml index f88dc08..1cb6122 100644 --- a/roles/k3s_server_post/tasks/main.yml +++ b/roles/k3s_server_post/tasks/main.yml @@ -1,4 +1,9 @@ --- +- name: Deploy calico + include_tasks: calico.yml + tags: calico + when: use_calico == true + - name: Deploy metallb pool include_tasks: metallb.yml tags: metallb diff --git a/roles/k3s_server_post/templates/calico.crs.j2 b/roles/k3s_server_post/templates/calico.crs.j2 new file mode 100644 index 0000000..d88b2c6 --- /dev/null +++ b/roles/k3s_server_post/templates/calico.crs.j2 @@ -0,0 +1,28 @@ +# This section includes base Calico installation configuration. +# For more information, see: https://docs.tigera.io/calico/latest/reference/installation/api#operator.tigera.io/v1.Installation +apiVersion: operator.tigera.io/v1 +kind: Installation +metadata: + name: default +spec: + # Configures Calico networking. + calicoNetwork: + # Note: The ipPools section cannot be modified post-install. + ipPools: + - blockSize: {{ calico_blockSize if calico_blockSize is defined else '26' }} + cidr: {{ calico_cidr if calico_cidr is defined else '10.52.0.0/16' }} + encapsulation: {{ calico_encapsulation if calico_encapsulation is defined else 'VXLANCrossSubnet' }} + natOutgoing: {{ calico_natOutgoing if calico_natOutgoing is defined else 'Enabled' }} + nodeSelector: {{ calico_nodeSelector if calico_nodeSelector is defined else 'all()' }} + nodeAddressAutodetectionV4: + interface: {{ container_iface if container_iface is defined else 'eth0' }} + +--- + +# This section configures the Calico API server. +# For more information, see: https://docs.tigera.io/calico/latest/reference/installation/api#operator.tigera.io/v1.APIServer +apiVersion: operator.tigera.io/v1 +kind: APIServer +metadata: + name: default +spec: {}