Compare commits

..

16 Commits

Author SHA1 Message Date
Techno Tim
6695d13683 upgrade k3s to v1.24.4+k3s1 (#64)
* feat(k3s): Upgrade to v1.24.4+k3s1
* feat(metallb): updated to v0.13.5
2022-09-01 21:20:25 -05:00
Techno Tim
74e1dc1dfe Pin GitHub Actions to SHA + Dependabot (#62)
* feat(repo): Add dependabot

* fix(ci): clean up

* fix(gh-actions): pin to sha

* fix(lint): fixing yaml lint

* feat(repo): Add dependabot

* fix(vagrant): up retry count to 60 because gh actions are sloooooow
2022-08-30 23:15:15 -05:00
Techno Tim
56f8f21850 fix(ansible): Install services separate from config (#63) 2022-08-30 21:44:55 -05:00
Timothy Stewart
117c608a73 fix(ansible): added longer wait with todo 2022-08-29 23:16:13 -05:00
niki-on-github
e28d8f38e2 add ansible.posix module to requirements.yml (#59)
Co-authored-by: arch <arch@local>
Co-authored-by: Techno Tim <timothystewart6@gmail.com>
2022-08-29 22:58:57 -05:00
Simon Leiner
9d8a5cc2b8 Execute Vagrant cluster in CI (#57) 2022-08-29 19:45:07 -05:00
Techno Tim
2296959894 fix(ci): Fix Linting (#61) 2022-08-28 20:36:05 -05:00
Timothy Stewart
6d793c5c96 fix(ansible): add wait 2022-08-28 17:49:38 -05:00
Timothy Stewart
47ac514dc6 fix(ansible): fix lint 2022-08-28 16:42:07 -05:00
Timothy Stewart
611cf5ab0b fix(ansible): fix lint 2022-08-28 16:32:52 -05:00
Timothy Stewart
c82cbfc501 fix(ansible): fix lint 2022-08-28 16:29:04 -05:00
Timothy Stewart
f603a048c3 fix(ansible): fix lint 2022-08-28 16:26:46 -05:00
Timothy Stewart
4b959719ba fix(ansible): run task on one master 2022-08-28 16:00:10 -05:00
Timothy Stewart
db8fbd9447 chore(lint): Fix yaml lint 2022-08-28 14:27:22 -05:00
Techno Tim
aa05ab153e fix(ansible): Refactored ansible steps to now install metallb in post… (#58)
* fix(ansible): Refactored ansible steps to now install metallb in post task and verify
2022-08-28 14:25:09 -05:00
Simon Leiner
370e19169b Print fewer logs when removing manifests (#55) 2022-08-23 23:26:08 -05:00
17 changed files with 349 additions and 121 deletions

11
.github/dependabot.yml vendored Normal file
View File

@@ -0,0 +1,11 @@
---
version: 2
updates:
- package-ecosystem: "pip"
directory: "/"
schedule:
interval: "daily"
rebase-strategy: "auto"
ignore:
- dependency-name: "*"
update-types: ["version-update:semver-major"]

View File

@@ -1,31 +1,30 @@
---
name: Lint
'on':
name: Linting
on:
pull_request:
push:
branches:
- master
jobs:
test:
name: Lint
ansible-lint:
name: YAML Lint + Ansible Lint
runs-on: ubuntu-latest
steps:
- name: Check out the codebase.
uses: actions/checkout@v2
- name: Check out the codebase
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # 3.0.2
- name: Set up Python 3.7.
uses: actions/setup-python@v2
- name: Set up Python 3.x
uses: actions/setup-python@b55428b1882923874294fa556849718a1d7f2ca5 #4.0.2
with:
python-version: '3.x'
- name: Install test dependencies.
- name: Install test dependencies
run: pip3 install yamllint ansible-lint ansible
- name: Run yamllint.
- name: Run yamllint
run: yamllint .
- name: Run ansible-lint.
- name: Run ansible-lint
run: ansible-lint

69
.github/workflows/test.yml vendored Normal file
View File

@@ -0,0 +1,69 @@
---
name: Test
on:
pull_request:
push:
branches:
- master
jobs:
vagrant:
name: Vagrant
runs-on: macos-12
env:
HOMEBREW_NO_INSTALL_CLEANUP: 1
VAGRANT_CWD: ${{ github.workspace }}/vagrant
steps:
- name: Check out the codebase
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # 3.0.2
- name: Install Ansible
run: brew install ansible
- name: Install role dependencies
run: ansible-galaxy install -r collections/requirements.yml
- name: Configure VirtualBox
run: >-
sudo mkdir -p /etc/vbox &&
echo "* 192.168.30.0/24" | sudo tee -a /etc/vbox/networks.conf > /dev/null
- name: Cache Vagrant boxes
uses: actions/cache@fd5de65bc895cf536527842281bea11763fefd77 # 3.0.8
with:
path: |
~/.vagrant.d/boxes
key: vagrant-boxes-${{ hashFiles('**/Vagrantfile') }}
restore-keys: |
vagrant-boxes
- name: Create virtual machines
run: vagrant up
timeout-minutes: 10
- name: Provision cluster using Ansible
# Since Ansible sets up _all_ machines, it is sufficient to run it only
# once (i.e, for a single node - we are choosing control1 here)
run: vagrant provision control1 --provision-with ansible
timeout-minutes: 25
- name: Set up kubectl on the host
run: brew install kubectl &&
mkdir -p ~/.kube &&
vagrant ssh control1 --command "cat ~/.kube/config" > ~/.kube/config
- name: Show cluster nodes
run: kubectl describe -A nodes
- name: Show cluster pods
run: kubectl describe -A pods
- name: Test cluster
run: $VAGRANT_CWD/test_cluster.py --verbose --locals
timeout-minutes: 5
- name: Destroy virtual machines
if: always() # do this even if a step before has failed
run: vagrant destroy --force

View File

@@ -1,3 +1,4 @@
---
collections:
- name: community.general
- name: ansible.posix

View File

@@ -1,5 +1,5 @@
---
k3s_version: v1.24.3+k3s1
k3s_version: v1.24.4+k3s1
# this is the user that has ssh access to these machines
ansible_user: ansibleuser
systemd_dir: /etc/systemd/system
@@ -25,8 +25,8 @@ extra_agent_args: ""
kube_vip_tag_version: "v0.5.0"
# image tag for metal lb
metal_lb_speaker_tag_version: "v0.13.4"
metal_lb_controller_tag_version: "v0.13.4"
metal_lb_speaker_tag_version: "v0.13.5"
metal_lb_controller_tag_version: "v0.13.5"
# metallb ip range for load balancer
metal_lb_ip_range: "192.168.30.80-192.168.30.90"

View File

@@ -40,7 +40,8 @@
mode: 0644
when: ansible_host == hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0])
- name: Copy metallb namespace manifest to first master
# these will be copied and installed now, then tested later and apply config
- name: Copy metallb namespace to first master
template:
src: "metallb.namespace.j2"
dest: "/var/lib/rancher/k3s/server/manifests/metallb-namespace.yaml"
@@ -49,19 +50,10 @@
mode: 0644
when: ansible_host == hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0])
- name: Copy metallb ConfigMap manifest to first master
- name: Copy metallb namespace to first master
template:
src: "metallb.ipaddresspool.j2"
dest: "/var/lib/rancher/k3s/server/manifests/metallb-configmap.yaml"
owner: root
group: root
mode: 0644
when: ansible_host == hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0])
- name: Copy metallb main manifest to first master
template:
src: "metallb.yaml.j2"
dest: "/var/lib/rancher/k3s/server/manifests/metallb.yaml"
src: "metallb.crds.j2"
dest: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml"
owner: root
group: root
mode: 0644
@@ -93,6 +85,7 @@
name: k3s-init
state: stopped
failed_when: false
when: not ansible_check_mode
- name: Copy K3s service file
register: k3s_service
@@ -184,7 +177,6 @@
file_type: directory
register: k3s_server_manifests_directories
- name: Remove manifests and folders that are only needed for bootstrapping cluster so k3s doesn't auto apply on start
file:
path: "{{ item.path }}"
@@ -192,3 +184,5 @@
with_items:
- "{{ k3s_server_manifests.files }}"
- "{{ k3s_server_manifests_directories.files }}"
loop_control:
label: "{{ item.path }}"

View File

@@ -1118,85 +1118,6 @@ metadata:
name: speaker
namespace: metallb-system
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
labels:
app: metallb
name: controller
spec:
allowPrivilegeEscalation: false
allowedCapabilities: []
allowedHostPaths: []
defaultAddCapabilities: []
defaultAllowPrivilegeEscalation: false
fsGroup:
ranges:
- max: 65535
min: 1
rule: MustRunAs
hostIPC: false
hostNetwork: false
hostPID: false
privileged: false
readOnlyRootFilesystem: true
requiredDropCapabilities:
- ALL
runAsUser:
ranges:
- max: 65535
min: 1
rule: MustRunAs
seLinux:
rule: RunAsAny
supplementalGroups:
ranges:
- max: 65535
min: 1
rule: MustRunAs
volumes:
- configMap
- secret
- emptyDir
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
labels:
app: metallb
name: speaker
spec:
allowPrivilegeEscalation: false
allowedCapabilities:
- NET_RAW
allowedHostPaths: []
defaultAddCapabilities: []
defaultAllowPrivilegeEscalation: false
fsGroup:
rule: RunAsAny
hostIPC: false
hostNetwork: true
hostPID: false
hostPorts:
- max: 7472
min: 7472
- max: 7946
min: 7946
privileged: true
readOnlyRootFilesystem: true
requiredDropCapabilities:
- ALL
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- configMap
- secret
- emptyDir
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
@@ -1789,6 +1710,7 @@ webhooks:
apiVersions:
- v1beta1
operations:
- CREATE
- DELETE
resources:
- bfdprofiles

View File

@@ -0,0 +1,107 @@
---
- name: Create manifests directory for temp configuration
file:
path: /tmp/k3s
state: directory
owner: root
group: root
mode: 0644
with_items: "{{ groups['master'] }}"
run_once: true
- name: Copy metallb CRs manifest to first master
template:
src: "metallb.crs.j2"
dest: "/tmp/k3s/metallb-crs.yaml"
owner: root
group: root
mode: 0644
with_items: "{{ groups['master'] }}"
run_once: true
- name: Test metallb-system namespace
command: >-
k3s kubectl -n metallb-system
changed_when: false
with_items: "{{ groups['master'] }}"
run_once: true
- name: Wait for metallb controller to be running
command: >-
kubectl wait deployment -n metallb-system controller --for condition=Available=True --timeout=60s
changed_when: false
with_items: "{{ groups['master'] }}"
run_once: true
- name: Wait for metallb webhook service to be running
command: >-
kubectl wait -n metallb-system --for=jsonpath='{.status.phase}'=Running pods \
--selector component=controller --timeout=60s
changed_when: false
with_items: "{{ groups['master'] }}"
run_once: true
- name: Wait for metallb pods in replicasets
command: >-
kubectl wait pods -n metallb-system --for condition=Ready \
--selector component=controller,app=metallb --timeout=60s
changed_when: false
with_items: "{{ groups['master'] }}"
run_once: true
- name: Wait for the metallb controller readyReplicas
command: >-
kubectl wait -n metallb-system --for=jsonpath='{.status.readyReplicas}'=1 replicasets \
--selector component=controller,app=metallb --timeout=60s
changed_when: false
with_items: "{{ groups['master'] }}"
run_once: true
- name: Wait for the metallb controller fullyLabeledReplicas
command: >-
kubectl wait -n metallb-system --for=jsonpath='{.status.fullyLabeledReplicas}'=1 replicasets \
--selector component=controller,app=metallb --timeout=60s
changed_when: false
with_items: "{{ groups['master'] }}"
run_once: true
- name: Wait for the metallb controller availableReplicas
command: >-
kubectl wait -n metallb-system --for=jsonpath='{.status.availableReplicas}'=1 replicasets \
--selector component=controller,app=metallb --timeout=60s
changed_when: false
with_items: "{{ groups['master'] }}"
run_once: true
- name: Test metallb-system webhook-service endpoint
command: >-
k3s kubectl -n metallb-system get endpoints webhook-service
changed_when: false
with_items: "{{ groups['master'] }}"
run_once: true
- name: Apply metallb CRs
command: >-
k3s kubectl apply -f /tmp/k3s/metallb-crs.yaml
changed_when: false
with_items: "{{ groups['master'] }}"
run_once: true
- name: Test metallb-system IPAddressPool
command: >-
k3s kubectl -n metallb-system get IPAddressPool
changed_when: false
with_items: "{{ groups['master'] }}"
run_once: true
- name: Test metallb-system L2Advertisement
command: >-
k3s kubectl -n metallb-system get L2Advertisement
changed_when: false
with_items: "{{ groups['master'] }}"
run_once: true
- name: Remove tmp director used for manifests
file:
path: /tmp/k3s
state: absent

View File

@@ -1,3 +1,3 @@
---
- name: reboot
- name: Reboot
reboot:

View File

@@ -6,3 +6,4 @@
regexp: '^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$'
line: '\1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory'
notify: reboot
when: not ansible_check_mode

View File

@@ -13,7 +13,6 @@
- name: Flush iptables before changing to iptables-legacy
iptables:
flush: true
changed_when: false # iptables flush always returns changed
- name: Changing to iptables-legacy
alternatives:

View File

@@ -6,7 +6,8 @@
regexp: '^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$'
line: '\1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory'
notify: reboot
when: not ansible_check_mode
- name: Install linux-modules-extra-raspi
apt: name=linux-modules-extra-raspi state=present
when: raspberry_pi
when: (raspberry_pi) and (not ansible_check_mode)

View File

@@ -10,7 +10,7 @@
- k3s-node
- k3s-init
- name: pkill -9 -f "k3s/data/[^/]+/bin/containerd-shim-runc"
- name: RUN pkill -9 -f "k3s/data/[^/]+/bin/containerd-shim-runc"
register: pkill_containerd_shim_runc
command: pkill -9 -f "k3s/data/[^/]+/bin/containerd-shim-runc"
changed_when: "pkill_containerd_shim_runc.rc == 0"
@@ -47,13 +47,18 @@
- /usr/local/bin/k3s
- /var/lib/cni/
- name: daemon_reload
- name: Reload daemon_reload
systemd:
daemon_reload: yes
- name: Remove linux-modules-extra-raspi
apt: name=linux-modules-extra-raspi state=absent
- name: Remove tmp director used for manifests
file:
path: /tmp/k3s
state: absent
- name: Reboot and wait for node to come back up
reboot:
reboot_timeout: 3600

View File

@@ -17,3 +17,8 @@
become: yes
roles:
- role: k3s/node
- hosts: master
become: yes
roles:
- role: k3s/post

16
vagrant/Vagrantfile vendored
View File

@@ -3,12 +3,12 @@
Vagrant.configure("2") do |config|
# General configuration
config.vm.box = "generic/ubuntu2110"
config.vm.box = "generic/ubuntu2204"
config.vm.synced_folder ".", "/vagrant", disabled: true
config.ssh.insert_key = false
config.vm.provider :virtualbox do |v|
v.memory = 4096
v.memory = 2048
v.cpus = 2
v.linked_clone = true
end
@@ -50,7 +50,7 @@ Vagrant.configure("2") do |config|
"master" => ["control1", "control2", "control3"],
"node" => ["node1", "node2"],
"k3s_cluster:children" => ["master", "node"],
"k3s_cluster:vars" => {"k3s_version" => "v1.23.4+k3s1",
"k3s_cluster:vars" => {"k3s_version" => "v1.24.4+k3s1",
"ansible_user" => "vagrant",
"systemd_dir" => "/etc/systemd/system",
"flannel_iface" => "eth1",
@@ -58,11 +58,11 @@ Vagrant.configure("2") do |config|
"k3s_token" => "supersecret",
"extra_server_args" => "--node-ip={{ ansible_eth1.ipv4.address }} --flannel-iface={{ flannel_iface }} --no-deploy servicelb --no-deploy traefik",
"extra_agent_args" => "--flannel-iface={{ flannel_iface }}",
"kube_vip_tag_version" => "v0.4.2",
"metal_lb_speaker_tag_version" => "v0.12.1",
"metal_lb_controller_tag_version" => "v0.12.1",
"kube_vip_tag_version" => "v0.5.0",
"metal_lb_speaker_tag_version" => "v0.13.4",
"metal_lb_controller_tag_version" => "v0.13.4",
"metal_lb_ip_range" => "192.168.30.80-192.168.30.90",
"retry_count" => "30"}
"retry_count" => "60"}
}
ansible.host_vars = {
"control1" => {
@@ -76,4 +76,4 @@ Vagrant.configure("2") do |config|
}
}
end
end
end

114
vagrant/test_cluster.py Executable file
View File

@@ -0,0 +1,114 @@
#!/usr/bin/env python3
# Perform a few tests on a cluster created with this playbook.
# To simplify test execution, the scripts does not depend on any third-party
# packages, only the Python standard library.
import json
import subprocess
import unittest
from pathlib import Path
from time import sleep
from warnings import warn
VAGRANT_DIR = Path(__file__).parent.absolute()
PLAYBOOK_DIR = VAGRANT_DIR.parent.absolute()
class TestK3sCluster(unittest.TestCase):
def _kubectl(self, args: str, json_out: bool = True) -> dict | None:
cmd = "kubectl"
if json_out:
cmd += " -o json"
cmd += f" {args}"
result = subprocess.run(cmd, capture_output=True, shell=True, check=True)
if json_out:
return json.loads(result.stdout)
else:
return None
def _curl(self, url: str) -> str:
options = [
"--silent", # no progress info
"--show-error", # ... but errors should still be shown
"--fail", # set exit code on error
"--location", # follow redirects
]
cmd = f'curl {" ".join(options)} "{url}"'
result = subprocess.run(cmd, capture_output=True, shell=True, check=True)
output = result.stdout.decode("utf-8")
return output
def _apply_manifest(self, manifest_file: Path) -> dict:
apply_result = self._kubectl(
f'apply --filename="{manifest_file}" --cascade="background"'
)
self.addCleanup(
lambda: self._kubectl(
f'delete --filename="{manifest_file}"',
json_out=False,
)
)
return apply_result
@staticmethod
def _retry(function, retries: int = 5, seconds_between_retries=1):
for retry in range(1, retries + 1):
try:
return function()
except Exception as exc:
if retry < retries:
sleep(seconds_between_retries)
continue
else:
raise exc
def _get_load_balancer_ip(
self,
service: str,
namespace: str = "default",
) -> str | None:
svc_description = self._kubectl(
f'get --namespace="{namespace}" service "{service}"'
)
ip = svc_description["status"]["loadBalancer"]["ingress"][0]["ip"]
return ip
def test_nodes_exist(self):
out = self._kubectl("get nodes")
node_names = {item["metadata"]["name"] for item in out["items"]}
self.assertEqual(
node_names,
{"control1", "control2", "control3", "node1", "node2"},
)
def test_ip_address_pool_exists(self):
out = self._kubectl("get --all-namespaces IpAddressPool")
pools = out["items"]
self.assertGreater(len(pools), 0)
def test_nginx_example_page(self):
# Deploy the manifests to the cluster
deployment = self._apply_manifest(PLAYBOOK_DIR / "example" / "deployment.yml")
service = self._apply_manifest(PLAYBOOK_DIR / "example" / "service.yml")
# Assert that the dummy page is available
metallb_ip = self._retry(
lambda: self._get_load_balancer_ip(service["metadata"]["name"])
)
# Now that an IP address was assigned, let's reload the service description:
service = self._kubectl(f'get service "{service["metadata"]["name"]}"')
metallb_port = service["spec"]["ports"][0]["port"]
response_body = self._retry(
lambda: self._curl(f"http://{metallb_ip}:{metallb_port}/")
)
self.assertIn("Welcome to nginx!", response_body)
if __name__ == "__main__":
unittest.main()