mirror of
https://github.com/techno-tim/k3s-ansible.git
synced 2025-12-25 18:23:05 +01:00
Compare commits
65 Commits
v1.24.9+k3
...
c6f4f38a5d
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c6f4f38a5d | ||
|
|
18044d23a4 | ||
|
|
7669fd4721 | ||
|
|
2cd03f38f2 | ||
|
|
8e1265fbae | ||
|
|
cddbfc8e40 | ||
|
|
f6ee0c72ef | ||
|
|
e7ba494a00 | ||
|
|
70e658cf98 | ||
|
|
7badfbd7bd | ||
|
|
e880f08d26 | ||
|
|
95b2836dfc | ||
|
|
505c2eeff2 | ||
|
|
9b6d551dd6 | ||
|
|
a64e882fb7 | ||
|
|
38e773315b | ||
|
|
70ddf7b63c | ||
|
|
fb3128a783 | ||
|
|
2e318e0862 | ||
|
|
0607eb8aa4 | ||
|
|
a9904d1562 | ||
|
|
9707bc8a58 | ||
|
|
e635bd2626 | ||
|
|
1aabb5a927 | ||
|
|
215690b55b | ||
|
|
bd44a9b126 | ||
|
|
8d61fe81e5 | ||
|
|
c0ff304f22 | ||
|
|
83077ecdd1 | ||
|
|
33ae0d4970 | ||
|
|
edd4838407 | ||
|
|
5c79ea9b71 | ||
|
|
3d204ad851 | ||
|
|
13bd868faa | ||
|
|
c564a8562a | ||
|
|
0d6d43e7ca | ||
|
|
c0952288c2 | ||
|
|
1c9796e98b | ||
|
|
288c4089e0 | ||
|
|
49f0a2ce6b | ||
|
|
6c4621bd56 | ||
|
|
3e16ab6809 | ||
|
|
83fe50797c | ||
|
|
2db0b3024c | ||
|
|
6b2af77e74 | ||
|
|
d1d1bc3d91 | ||
|
|
3a1a7a19aa | ||
|
|
030eeb4b75 | ||
|
|
4aeeb124ef | ||
|
|
511c020bec | ||
|
|
c47da38b53 | ||
|
|
6448948e9f | ||
|
|
7bc198ab26 | ||
|
|
65bbc8e2ac | ||
|
|
dc2976e7f6 | ||
|
|
5a7ba98968 | ||
|
|
10c6ef1d57 | ||
|
|
ed4d888e3d | ||
|
|
49d6d484ae | ||
|
|
96c49c864e | ||
|
|
60adb1de42 | ||
|
|
e023808f2f | ||
|
|
511ec493d6 | ||
|
|
be3e72e173 | ||
|
|
e33cbe52c1 |
@@ -13,5 +13,8 @@ exclude_paths:
|
||||
- 'molecule/**/prepare.yml'
|
||||
- 'molecule/**/reset.yml'
|
||||
|
||||
# The file was generated by galaxy ansible - don't mess with it.
|
||||
- 'galaxy.yml'
|
||||
|
||||
skip_list:
|
||||
- 'fqcn-builtins'
|
||||
|
||||
2
.github/ISSUE_TEMPLATE.md
vendored
2
.github/ISSUE_TEMPLATE.md
vendored
@@ -35,7 +35,7 @@ k3s_version: ""
|
||||
ansible_user: NA
|
||||
systemd_dir: ""
|
||||
|
||||
flannel_iface: ""
|
||||
container_iface: ""
|
||||
|
||||
apiserver_endpoint: ""
|
||||
|
||||
|
||||
2
.github/workflows/lint.yml
vendored
2
.github/workflows/lint.yml
vendored
@@ -7,7 +7,7 @@ jobs:
|
||||
name: Pre-Commit
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
PYTHON_VERSION: "3.10"
|
||||
PYTHON_VERSION: "3.11"
|
||||
|
||||
steps:
|
||||
- name: Check out the codebase
|
||||
|
||||
3
.github/workflows/test.yml
vendored
3
.github/workflows/test.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
- single_node
|
||||
fail-fast: false
|
||||
env:
|
||||
PYTHON_VERSION: "3.10"
|
||||
PYTHON_VERSION: "3.11"
|
||||
|
||||
steps:
|
||||
- name: Check out the codebase
|
||||
@@ -71,6 +71,7 @@ jobs:
|
||||
|
||||
- name: Test with molecule
|
||||
run: molecule test --scenario-name ${{ matrix.scenario }}
|
||||
timeout-minutes: 90
|
||||
env:
|
||||
ANSIBLE_K3S_LOG_DIR: ${{ runner.temp }}/logs/k3s-ansible/${{ matrix.scenario }}
|
||||
ANSIBLE_SSH_RETRIES: 4
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1,2 +1,3 @@
|
||||
.env/
|
||||
*.log
|
||||
ansible.cfg
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: 3298ddab3c13dd77d6ce1fc0baf97691430d84b0 # v4.3.0
|
||||
rev: f71fa2c1f9cf5cb705f73dffe4b21f7c61470ba9 # frozen: v4.4.0
|
||||
hooks:
|
||||
- id: requirements-txt-fixer
|
||||
- id: sort-simple-yaml
|
||||
@@ -12,24 +12,24 @@ repos:
|
||||
- id: trailing-whitespace
|
||||
args: [--markdown-linebreak-ext=md]
|
||||
- repo: https://github.com/adrienverge/yamllint.git
|
||||
rev: 9cce2940414e9560ae4c8518ddaee2ac1863a4d2 # v1.28.0
|
||||
rev: b05e028c5881819161d11cb543fd96a30c06cceb # frozen: v1.32.0
|
||||
hooks:
|
||||
- id: yamllint
|
||||
args: [-c=.yamllint]
|
||||
- repo: https://github.com/ansible-community/ansible-lint.git
|
||||
rev: a058554b9bcf88f12ad09ab9fb93b267a214368f # v6.8.6
|
||||
rev: 3293b64b939c0de16ef8cb81dd49255e475bf89a # frozen: v6.17.2
|
||||
hooks:
|
||||
- id: ansible-lint
|
||||
- repo: https://github.com/shellcheck-py/shellcheck-py
|
||||
rev: 4c7c3dd7161ef39e984cb295e93a968236dc8e8a # v0.8.0.4
|
||||
rev: 375289a39f5708101b1f916eb729e8d6da96993f # frozen: v0.9.0.5
|
||||
hooks:
|
||||
- id: shellcheck
|
||||
- repo: https://github.com/Lucas-C/pre-commit-hooks
|
||||
rev: 04618e68aa2380828a36a23ff5f65a06ae8f59b9 # v1.3.1
|
||||
rev: 12885e376b93dc4536ad68d156065601e4433665 # frozen: v1.5.1
|
||||
hooks:
|
||||
- id: remove-crlf
|
||||
- id: remove-tabs
|
||||
- repo: https://github.com/sirosen/texthooks
|
||||
rev: 30d9af95631de0d7cff4e282bde9160d38bb0359 # 0.4.0
|
||||
rev: c4ffd3e31669dd4fa4d31a23436cc13839730084 # frozen: 0.5.0
|
||||
hooks:
|
||||
- id: fix-smartquotes
|
||||
|
||||
@@ -6,4 +6,6 @@ rules:
|
||||
max: 120
|
||||
level: warning
|
||||
truthy:
|
||||
allowed-values: ['true', 'false', 'yes', 'no']
|
||||
allowed-values: ['true', 'false']
|
||||
ignore:
|
||||
- galaxy.yml
|
||||
|
||||
32
README.md
32
README.md
@@ -4,11 +4,11 @@
|
||||
|
||||
This playbook will build an HA Kubernetes cluster with `k3s`, `kube-vip` and MetalLB via `ansible`.
|
||||
|
||||
This is based on the work from [this fork](https://github.com/212850a/k3s-ansible) which is based on the work from [k3s-io/k3s-ansible](https://github.com/k3s-io/k3s-ansible). It uses [kube-vip](https://kube-vip.chipzoller.dev/) to create a load balancer for control plane, and [metal-lb](https://metallb.universe.tf/installation/) for its service `LoadBalancer`.
|
||||
This is based on the work from [this fork](https://github.com/212850a/k3s-ansible) which is based on the work from [k3s-io/k3s-ansible](https://github.com/k3s-io/k3s-ansible). It uses [kube-vip](https://kube-vip.io/) to create a load balancer for control plane, and [metal-lb](https://metallb.universe.tf/installation/) for its service `LoadBalancer`.
|
||||
|
||||
If you want more context on how this works, see:
|
||||
|
||||
📄 [Documentation](https://docs.technotim.live/posts/k3s-etcd-ansible/) (including example commands)
|
||||
📄 [Documentation](https://technotim.live/posts/k3s-etcd-ansible/) (including example commands)
|
||||
|
||||
📺 [Watch the Video](https://www.youtube.com/watch?v=CbkEWcUZ7zM)
|
||||
|
||||
@@ -28,7 +28,7 @@ on processor architecture:
|
||||
|
||||
## ✅ System requirements
|
||||
|
||||
- Deployment environment must have Ansible 2.4.0+. If you need a quick primer on Ansible [you can check out my docs and setting up Ansible](https://docs.technotim.live/posts/ansible-automation/).
|
||||
- Control Node (the machine you are running `ansible` commands) must have Ansible 2.11+ If you need a quick primer on Ansible [you can check out my docs and setting up Ansible](https://technotim.live/posts/ansible-automation/).
|
||||
|
||||
- You will also need to install collections that this playbook uses by running `ansible-galaxy collection install -r ./collections/requirements.yml` (important❗)
|
||||
|
||||
@@ -67,6 +67,8 @@ node
|
||||
|
||||
If multiple hosts are in the master group, the playbook will automatically set up k3s in [HA mode with etcd](https://rancher.com/docs/k3s/latest/en/installation/ha-embedded/).
|
||||
|
||||
Finally, copy `ansible.example.cfg` to `ansible.cfg` and adapt the inventory path to match the files that you just created.
|
||||
|
||||
This requires at least k3s version `1.19.1` however the version is configurable by using the `k3s_version` variable.
|
||||
|
||||
If needed, you can also edit `inventory/my-cluster/group_vars/all.yml` to match your environment.
|
||||
@@ -99,7 +101,7 @@ scp debian@master_ip:~/.kube/config ~/.kube/config
|
||||
|
||||
### 🔨 Testing your cluster
|
||||
|
||||
See the commands [here](https://docs.technotim.live/posts/k3s-etcd-ansible/#testing-your-cluster).
|
||||
See the commands [here](https://technotim.live/posts/k3s-etcd-ansible/#testing-your-cluster).
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
@@ -116,6 +118,28 @@ You can find more information about it [here](molecule/README.md).
|
||||
|
||||
This repo uses `pre-commit` and `pre-commit-hooks` to lint and fix common style and syntax errors. Be sure to install python packages and then run `pre-commit install`. For more information, see [pre-commit](https://pre-commit.com/)
|
||||
|
||||
## 🌌 Ansible Galaxy
|
||||
|
||||
This collection can now be used in larger ansible projects.
|
||||
|
||||
Instructions:
|
||||
|
||||
- create or modify a file `collections/requirements.yml` in your project
|
||||
|
||||
```yml
|
||||
collections:
|
||||
- name: ansible.utils
|
||||
- name: community.general
|
||||
- name: ansible.posix
|
||||
- name: kubernetes.core
|
||||
- name: https://github.com/techno-tim/k3s-ansible.git
|
||||
type: git
|
||||
version: master
|
||||
```
|
||||
|
||||
- install via `ansible-galaxy collection install -r ./collections/requirements.yml`
|
||||
- every role is now available via the prefix `techno_tim.k3s_ansible.` e.g. `techno_tim.k3s_ansible.lxc`
|
||||
|
||||
## Thanks 🤝
|
||||
|
||||
This repo is really standing on the shoulders of giants. Thank you to all those who have contributed and thanks to these repos for code and ideas:
|
||||
|
||||
23
ansible.cfg
23
ansible.cfg
@@ -1,23 +0,0 @@
|
||||
[defaults]
|
||||
nocows = True
|
||||
roles_path = ./roles
|
||||
inventory = ./hosts.ini
|
||||
stdout_callback = yaml
|
||||
|
||||
remote_tmp = $HOME/.ansible/tmp
|
||||
local_tmp = $HOME/.ansible/tmp
|
||||
timeout = 60
|
||||
host_key_checking = False
|
||||
deprecation_warnings = False
|
||||
callbacks_enabled = profile_tasks
|
||||
log_path = ./ansible.log
|
||||
|
||||
[privilege_escalation]
|
||||
become = True
|
||||
|
||||
[ssh_connection]
|
||||
scp_if_ssh = smart
|
||||
retries = 3
|
||||
ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o Compression=yes -o ServerAliveInterval=15s
|
||||
pipelining = True
|
||||
control_path = %(directory)s/%%h-%%r
|
||||
2
ansible.example.cfg
Normal file
2
ansible.example.cfg
Normal file
@@ -0,0 +1,2 @@
|
||||
[defaults]
|
||||
inventory = inventory/my-cluster/hosts.ini ; Adapt this to the path to your inventory file
|
||||
@@ -1,3 +1,3 @@
|
||||
#!/bin/bash
|
||||
|
||||
ansible-playbook site.yml -i inventory/my-cluster/hosts.ini
|
||||
ansible-playbook site.yml
|
||||
|
||||
81
galaxy.yml
Normal file
81
galaxy.yml
Normal file
@@ -0,0 +1,81 @@
|
||||
### REQUIRED
|
||||
# The namespace of the collection. This can be a company/brand/organization or product namespace under which all
|
||||
# content lives. May only contain alphanumeric lowercase characters and underscores. Namespaces cannot start with
|
||||
# underscores or numbers and cannot contain consecutive underscores
|
||||
namespace: techno_tim
|
||||
|
||||
# The name of the collection. Has the same character restrictions as 'namespace'
|
||||
name: k3s_ansible
|
||||
|
||||
# The version of the collection. Must be compatible with semantic versioning
|
||||
version: 1.0.0
|
||||
|
||||
# The path to the Markdown (.md) readme file. This path is relative to the root of the collection
|
||||
readme: README.md
|
||||
|
||||
# A list of the collection's content authors. Can be just the name or in the format 'Full Name <email> (url)
|
||||
# @nicks:irc/im.site#channel'
|
||||
authors:
|
||||
- your name <example@domain.com>
|
||||
|
||||
|
||||
### OPTIONAL but strongly recommended
|
||||
# A short summary description of the collection
|
||||
description: >
|
||||
The easiest way to bootstrap a self-hosted High Availability Kubernetes
|
||||
cluster. A fully automated HA k3s etcd install with kube-vip, MetalLB,
|
||||
and more.
|
||||
|
||||
# Either a single license or a list of licenses for content inside of a collection. Ansible Galaxy currently only
|
||||
# accepts L(SPDX,https://spdx.org/licenses/) licenses. This key is mutually exclusive with 'license_file'
|
||||
license:
|
||||
- Apache-2.0
|
||||
|
||||
|
||||
# A list of tags you want to associate with the collection for indexing/searching. A tag name has the same character
|
||||
# requirements as 'namespace' and 'name'
|
||||
tags:
|
||||
- etcd
|
||||
- high-availability
|
||||
- k8s
|
||||
- k3s
|
||||
- k3s-cluster
|
||||
- kube-vip
|
||||
- kubernetes
|
||||
- metallb
|
||||
- rancher
|
||||
|
||||
# Collections that this collection requires to be installed for it to be usable. The key of the dict is the
|
||||
# collection label 'namespace.name'. The value is a version range
|
||||
# L(specifiers,https://python-semanticversion.readthedocs.io/en/latest/#requirement-specification). Multiple version
|
||||
# range specifiers can be set and are separated by ','
|
||||
dependencies:
|
||||
ansible.utils: '*'
|
||||
ansible.posix: '*'
|
||||
community.general: '*'
|
||||
kubernetes.core: '*'
|
||||
|
||||
# The URL of the originating SCM repository
|
||||
repository: https://github.com/techno-tim/k3s-ansible
|
||||
|
||||
# The URL to any online docs
|
||||
documentation: https://github.com/techno-tim/k3s-ansible
|
||||
|
||||
# The URL to the homepage of the collection/project
|
||||
homepage: https://www.youtube.com/watch?v=CbkEWcUZ7zM
|
||||
|
||||
# The URL to the collection issue tracker
|
||||
issues: https://github.com/techno-tim/k3s-ansible/issues
|
||||
|
||||
# A list of file glob-like patterns used to filter any files or directories that should not be included in the build
|
||||
# artifact. A pattern is matched from the relative path of the file or directory of the collection directory. This
|
||||
# uses 'fnmatch' to match the files or directories. Some directories and files like 'galaxy.yml', '*.pyc', '*.retry',
|
||||
# and '.git' are always filtered. Mutually exclusive with 'manifest'
|
||||
build_ignore: []
|
||||
|
||||
# A dict controlling use of manifest directives used in building the collection artifact. The key 'directives' is a
|
||||
# list of MANIFEST.in style
|
||||
# L(directives,https://packaging.python.org/en/latest/guides/using-manifest-in/#manifest-in-commands). The key
|
||||
# 'omit_default_directives' is a boolean that controls whether the default directives are used. Mutually exclusive
|
||||
# with 'build_ignore'
|
||||
# manifest: null
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
k3s_version: v1.24.9+k3s1
|
||||
k3s_version: v1.25.16+k3s4
|
||||
# this is the user that has ssh access to these machines
|
||||
ansible_user: ansibleuser
|
||||
systemd_dir: /etc/systemd/system
|
||||
@@ -7,8 +7,14 @@ systemd_dir: /etc/systemd/system
|
||||
# Set your timezone
|
||||
system_timezone: "Your/Timezone"
|
||||
|
||||
# interface which will be used for flannel
|
||||
flannel_iface: "eth0"
|
||||
# node interface which will be used for the container network interface (flannel or calico)
|
||||
container_iface: "eth0"
|
||||
|
||||
# set use_calico to true to use tigera operator/calico instead of the default CNI flannel
|
||||
# install reference: https://docs.tigera.io/calico/latest/getting-started/kubernetes/k3s/multi-node-install#install-calico
|
||||
use_calico: false
|
||||
calico_cidr: "10.52.0.0/16" # pod cidr pool
|
||||
calico_tag: "v3.27.0" # calico version tag
|
||||
|
||||
# apiserver_endpoint is virtual ip-address which will be configured on each master
|
||||
apiserver_endpoint: "192.168.30.222"
|
||||
@@ -20,32 +26,117 @@ k3s_token: "some-SUPER-DEDEUPER-secret-password"
|
||||
# The IP on which the node is reachable in the cluster.
|
||||
# Here, a sensible default is provided, you can still override
|
||||
# it for each of your hosts, though.
|
||||
k3s_node_ip: '{{ ansible_facts[flannel_iface]["ipv4"]["address"] }}'
|
||||
k3s_node_ip: '{{ ansible_facts[container_iface]["ipv4"]["address"] }}'
|
||||
|
||||
# Disable the taint manually by setting: k3s_master_taint = false
|
||||
k3s_master_taint: "{{ true if groups['node'] | default([]) | length >= 1 else false }}"
|
||||
|
||||
# these arguments are recommended for servers as well as agents:
|
||||
extra_args: >-
|
||||
--flannel-iface={{ flannel_iface }}
|
||||
{{ '--flannel-iface=' + container_iface if not use_calico else '' }}
|
||||
--node-ip={{ k3s_node_ip }}
|
||||
|
||||
# change these to your liking, the only required are: --disable servicelb, --tls-san {{ apiserver_endpoint }}
|
||||
# the contents of the if block is also required if using calico
|
||||
extra_server_args: >-
|
||||
{{ extra_args }}
|
||||
{{ '--node-taint node-role.kubernetes.io/master=true:NoSchedule' if k3s_master_taint else '' }}
|
||||
{% if use_calico %}
|
||||
--flannel-backend=none
|
||||
--disable-network-policy
|
||||
--cluster-cidr={{ calico_cidr }}
|
||||
{% endif %}
|
||||
--tls-san {{ apiserver_endpoint }}
|
||||
--disable servicelb
|
||||
--disable traefik
|
||||
|
||||
extra_agent_args: >-
|
||||
{{ extra_args }}
|
||||
|
||||
# image tag for kube-vip
|
||||
kube_vip_tag_version: "v0.5.7"
|
||||
kube_vip_tag_version: "v0.5.12"
|
||||
|
||||
# metallb type frr or native
|
||||
metal_lb_type: "native"
|
||||
|
||||
# metallb mode layer2 or bgp
|
||||
metal_lb_mode: "layer2"
|
||||
|
||||
# bgp options
|
||||
# metal_lb_bgp_my_asn: "64513"
|
||||
# metal_lb_bgp_peer_asn: "64512"
|
||||
# metal_lb_bgp_peer_address: "192.168.30.1"
|
||||
|
||||
# image tag for metal lb
|
||||
metal_lb_speaker_tag_version: "v0.13.7"
|
||||
metal_lb_controller_tag_version: "v0.13.7"
|
||||
metal_lb_speaker_tag_version: "v0.13.9"
|
||||
metal_lb_controller_tag_version: "v0.13.9"
|
||||
|
||||
# metallb ip range for load balancer
|
||||
metal_lb_ip_range: "192.168.30.80-192.168.30.90"
|
||||
|
||||
# Only enable if your nodes are proxmox LXC nodes, make sure to configure your proxmox nodes
|
||||
# in your hosts.ini file.
|
||||
# Please read https://gist.github.com/triangletodd/02f595cd4c0dc9aac5f7763ca2264185 before using this.
|
||||
# Most notably, your containers must be privileged, and must not have nesting set to true.
|
||||
# Please note this script disables most of the security of lxc containers, with the trade off being that lxc
|
||||
# containers are significantly more resource efficent compared to full VMs.
|
||||
# Mixing and matching VMs and lxc containers is not supported, ymmv if you want to do this.
|
||||
# I would only really recommend using this if you have partiularly low powered proxmox nodes where the overhead of
|
||||
# VMs would use a significant portion of your available resources.
|
||||
proxmox_lxc_configure: false
|
||||
# the user that you would use to ssh into the host, for example if you run ssh some-user@my-proxmox-host,
|
||||
# set this value to some-user
|
||||
proxmox_lxc_ssh_user: root
|
||||
# the unique proxmox ids for all of the containers in the cluster, both worker and master nodes
|
||||
proxmox_lxc_ct_ids:
|
||||
- 200
|
||||
- 201
|
||||
- 202
|
||||
- 203
|
||||
- 204
|
||||
|
||||
# Only enable this if you have set up your own container registry to act as a mirror / pull-through cache
|
||||
# (harbor / nexus / docker's official registry / etc).
|
||||
# Can be beneficial for larger dev/test environments (for example if you're getting rate limited by docker hub),
|
||||
# or air-gapped environments where your nodes don't have internet access after the initial setup
|
||||
# (which is still needed for downloading the k3s binary and such).
|
||||
# k3s's documentation about private registries here: https://docs.k3s.io/installation/private-registry
|
||||
custom_registries: false
|
||||
# The registries can be authenticated or anonymous, depending on your registry server configuration.
|
||||
# If they allow anonymous access, simply remove the following bit from custom_registries_yaml
|
||||
# configs:
|
||||
# "registry.domain.com":
|
||||
# auth:
|
||||
# username: yourusername
|
||||
# password: yourpassword
|
||||
# The following is an example that pulls all images used in this playbook through your private registries.
|
||||
# It also allows you to pull your own images from your private registry, without having to use imagePullSecrets
|
||||
# in your deployments.
|
||||
# If all you need is your own images and you don't care about caching the docker/quay/ghcr.io images,
|
||||
# you can just remove those from the mirrors: section.
|
||||
custom_registries_yaml: |
|
||||
mirrors:
|
||||
docker.io:
|
||||
endpoint:
|
||||
- "https://registry.domain.com/v2/dockerhub"
|
||||
quay.io:
|
||||
endpoint:
|
||||
- "https://registry.domain.com/v2/quayio"
|
||||
ghcr.io:
|
||||
endpoint:
|
||||
- "https://registry.domain.com/v2/ghcrio"
|
||||
registry.domain.com:
|
||||
endpoint:
|
||||
- "https://registry.domain.com"
|
||||
|
||||
configs:
|
||||
"registry.domain.com":
|
||||
auth:
|
||||
username: yourusername
|
||||
password: yourpassword
|
||||
|
||||
# Only enable and configure these if you access the internet through a proxy
|
||||
# proxy_env:
|
||||
# HTTP_PROXY: "http://proxy.domain.local:3128"
|
||||
# HTTPS_PROXY: "http://proxy.domain.local:3128"
|
||||
# NO_PROXY: "*.domain.local,127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16"
|
||||
|
||||
2
inventory/sample/group_vars/proxmox.yml
Normal file
2
inventory/sample/group_vars/proxmox.yml
Normal file
@@ -0,0 +1,2 @@
|
||||
---
|
||||
ansible_user: '{{ proxmox_lxc_ssh_user }}'
|
||||
@@ -7,6 +7,11 @@
|
||||
192.168.30.41
|
||||
192.168.30.42
|
||||
|
||||
# only required if proxmox_lxc_configure: true
|
||||
# must contain all proxmox instances that have a master or worker node
|
||||
# [proxmox]
|
||||
# 192.168.30.43
|
||||
|
||||
[k3s_cluster:children]
|
||||
master
|
||||
node
|
||||
|
||||
@@ -4,8 +4,9 @@
|
||||
tasks:
|
||||
- name: Override host variables
|
||||
ansible.builtin.set_fact:
|
||||
# See: https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant # noqa yaml[line-length]
|
||||
flannel_iface: eth1
|
||||
# See:
|
||||
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
|
||||
container_iface: eth1
|
||||
|
||||
# The test VMs might be a bit slow, so we give them more time to join the cluster:
|
||||
retry_count: 45
|
||||
|
||||
@@ -17,6 +17,6 @@
|
||||
# and security needs.
|
||||
ansible.builtin.systemd:
|
||||
name: firewalld
|
||||
enabled: no
|
||||
enabled: false
|
||||
state: stopped
|
||||
become: true
|
||||
|
||||
3
molecule/ipv6/host_vars/control2.yml
Normal file
3
molecule/ipv6/host_vars/control2.yml
Normal file
@@ -0,0 +1,3 @@
|
||||
---
|
||||
node_ipv4: 192.168.123.12
|
||||
node_ipv6: fdad:bad:ba55::de:12
|
||||
@@ -4,7 +4,6 @@ dependency:
|
||||
driver:
|
||||
name: vagrant
|
||||
platforms:
|
||||
|
||||
- name: control1
|
||||
box: generic/ubuntu2204
|
||||
memory: 2048
|
||||
@@ -21,6 +20,22 @@ platforms:
|
||||
ssh.username: "vagrant"
|
||||
ssh.password: "vagrant"
|
||||
|
||||
- name: control2
|
||||
box: generic/ubuntu2204
|
||||
memory: 2048
|
||||
cpus: 2
|
||||
groups:
|
||||
- k3s_cluster
|
||||
- master
|
||||
interfaces:
|
||||
- network_name: private_network
|
||||
ip: fdad:bad:ba55::de:12
|
||||
config_options:
|
||||
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||
# see: https://github.com/chef/bento/issues/1405
|
||||
ssh.username: "vagrant"
|
||||
ssh.password: "vagrant"
|
||||
|
||||
- name: node1
|
||||
box: generic/ubuntu2204
|
||||
memory: 2048
|
||||
|
||||
@@ -4,8 +4,14 @@
|
||||
tasks:
|
||||
- name: Override host variables (1/2)
|
||||
ansible.builtin.set_fact:
|
||||
# See: https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant # noqa yaml[line-length]
|
||||
flannel_iface: eth1
|
||||
# See:
|
||||
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
|
||||
container_iface: eth1
|
||||
|
||||
# In this scenario, we have multiple interfaces that the VIP could be
|
||||
# broadcasted on. Since we have assigned a dedicated private network
|
||||
# here, let's make sure that it is used.
|
||||
kube_vip_iface: eth1
|
||||
|
||||
# The test VMs might be a bit slow, so we give them more time to join the cluster:
|
||||
retry_count: 45
|
||||
@@ -21,13 +27,13 @@
|
||||
- fdad:bad:ba55::1b:0/112
|
||||
- 192.168.123.80-192.168.123.90
|
||||
|
||||
# k3s_node_ip is by default set to the IPv4 address of flannel_iface.
|
||||
# k3s_node_ip is by default set to the IPv4 address of container_iface.
|
||||
# We want IPv6 addresses here of course, so we just specify them
|
||||
# manually below.
|
||||
k3s_node_ip: "{{ node_ipv4 }},{{ node_ipv6 }}"
|
||||
|
||||
- name: Override host variables (2/2)
|
||||
# Since "extra_args" depends on "k3s_node_ip" and "flannel_iface" we have
|
||||
# Since "extra_args" depends on "k3s_node_ip" and "container_iface" we have
|
||||
# to set this AFTER overriding the both of them.
|
||||
ansible.builtin.set_fact:
|
||||
# A few extra server args are necessary:
|
||||
|
||||
@@ -30,7 +30,7 @@
|
||||
name: net.ipv6.conf.{{ item }}.accept_dad
|
||||
value: "0"
|
||||
with_items:
|
||||
- "{{ flannel_iface }}"
|
||||
- "{{ container_iface }}"
|
||||
|
||||
- name: Write IPv4 configuration
|
||||
ansible.builtin.template:
|
||||
|
||||
@@ -3,6 +3,6 @@ network:
|
||||
version: 2
|
||||
renderer: networkd
|
||||
ethernets:
|
||||
{{ flannel_iface }}:
|
||||
{{ container_iface }}:
|
||||
addresses:
|
||||
- {{ node_ipv4 }}/24
|
||||
|
||||
@@ -2,4 +2,4 @@
|
||||
- name: Verify
|
||||
hosts: all
|
||||
roles:
|
||||
- verify/from_outside
|
||||
- verify_from_outside
|
||||
|
||||
@@ -6,4 +6,4 @@ outside_host: localhost
|
||||
testing_namespace: molecule-verify-from-outside
|
||||
|
||||
# The directory in which the example manifests reside
|
||||
example_manifests_path: ../../../../example
|
||||
example_manifests_path: ../../../example
|
||||
@@ -34,14 +34,14 @@
|
||||
|
||||
- name: Assert that the nginx welcome page is available
|
||||
ansible.builtin.uri:
|
||||
url: http://{{ ip | ansible.utils.ipwrap }}:{{ port }}/
|
||||
return_content: yes
|
||||
url: http://{{ ip | ansible.utils.ipwrap }}:{{ port_ }}/
|
||||
return_content: true
|
||||
register: result
|
||||
failed_when: "'Welcome to nginx!' not in result.content"
|
||||
vars:
|
||||
ip: >-
|
||||
{{ nginx_services.resources[0].status.loadBalancer.ingress[0].ip }}
|
||||
port: >-
|
||||
port_: >-
|
||||
{{ nginx_services.resources[0].spec.ports[0].port }}
|
||||
# Deactivated linter rules:
|
||||
# - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap
|
||||
@@ -4,8 +4,9 @@
|
||||
tasks:
|
||||
- name: Override host variables
|
||||
ansible.builtin.set_fact:
|
||||
# See: https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant # noqa yaml[line-length]
|
||||
flannel_iface: eth1
|
||||
# See:
|
||||
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
|
||||
container_iface: eth1
|
||||
|
||||
# The test VMs might be a bit slow, so we give them more time to join the cluster:
|
||||
retry_count: 45
|
||||
|
||||
2
reboot.sh
Normal file → Executable file
2
reboot.sh
Normal file → Executable file
@@ -1,3 +1,3 @@
|
||||
#!/bin/bash
|
||||
|
||||
ansible-playbook reboot.yml -i inventory/my-cluster/hosts.ini
|
||||
ansible-playbook reboot.yml
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
---
|
||||
- name: Reboot k3s_cluster
|
||||
hosts: k3s_cluster
|
||||
gather_facts: yes
|
||||
become: yes
|
||||
gather_facts: true
|
||||
tasks:
|
||||
- name: Reboot the nodes (and Wait upto 5 mins max)
|
||||
become: true
|
||||
reboot:
|
||||
reboot_timeout: 300
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
ansible-core>=2.13.5
|
||||
ansible-lint>=6.8.6
|
||||
jmespath>=1.0.1
|
||||
jsonpatch>=1.32
|
||||
kubernetes>=25.3.0
|
||||
@@ -9,4 +8,3 @@ netaddr>=0.8.0
|
||||
pre-commit>=2.20.0
|
||||
pre-commit-hooks>=1.3.1
|
||||
pyyaml>=6.0
|
||||
yamllint>=1.28.0
|
||||
|
||||
@@ -1,29 +1,21 @@
|
||||
#
|
||||
# This file is autogenerated by pip-compile with python 3.8
|
||||
# To update, run:
|
||||
# This file is autogenerated by pip-compile with Python 3.11
|
||||
# by the following command:
|
||||
#
|
||||
# pip-compile requirements.in
|
||||
#
|
||||
ansible-compat==2.2.4
|
||||
# via
|
||||
# ansible-lint
|
||||
# molecule
|
||||
ansible-core==2.14.1
|
||||
ansible-compat==3.0.1
|
||||
# via molecule
|
||||
ansible-core==2.15.4
|
||||
# via
|
||||
# -r requirements.in
|
||||
# ansible-lint
|
||||
ansible-lint==6.8.6
|
||||
# via -r requirements.in
|
||||
# ansible-compat
|
||||
arrow==1.2.3
|
||||
# via jinja2-time
|
||||
attrs==22.1.0
|
||||
# via jsonschema
|
||||
binaryornot==0.4.4
|
||||
# via cookiecutter
|
||||
black==22.10.0
|
||||
# via ansible-lint
|
||||
bracex==2.3.post1
|
||||
# via wcmatch
|
||||
cachetools==5.2.0
|
||||
# via google-auth
|
||||
certifi==2022.9.24
|
||||
@@ -40,7 +32,6 @@ charset-normalizer==2.1.1
|
||||
# via requests
|
||||
click==8.1.3
|
||||
# via
|
||||
# black
|
||||
# click-help-colors
|
||||
# cookiecutter
|
||||
# molecule
|
||||
@@ -59,9 +50,7 @@ distro==1.8.0
|
||||
enrich==1.2.7
|
||||
# via molecule
|
||||
filelock==3.8.0
|
||||
# via
|
||||
# ansible-lint
|
||||
# virtualenv
|
||||
# via virtualenv
|
||||
google-auth==2.14.0
|
||||
# via kubernetes
|
||||
identify==2.5.8
|
||||
@@ -79,14 +68,13 @@ jinja2-time==0.2.0
|
||||
# via cookiecutter
|
||||
jmespath==1.0.1
|
||||
# via -r requirements.in
|
||||
jsonpatch==1.32
|
||||
jsonpatch==1.33
|
||||
# via -r requirements.in
|
||||
jsonpointer==2.3
|
||||
# via jsonpatch
|
||||
jsonschema==4.17.0
|
||||
# via
|
||||
# ansible-compat
|
||||
# ansible-lint
|
||||
# molecule
|
||||
kubernetes==25.3.0
|
||||
# via -r requirements.in
|
||||
@@ -98,9 +86,7 @@ molecule==4.0.4
|
||||
# molecule-vagrant
|
||||
molecule-vagrant==1.0.0
|
||||
# via -r requirements.in
|
||||
mypy-extensions==0.4.3
|
||||
# via black
|
||||
netaddr==0.8.0
|
||||
netaddr==0.10.0
|
||||
# via -r requirements.in
|
||||
nodeenv==1.7.0
|
||||
# via pre-commit
|
||||
@@ -110,21 +96,14 @@ packaging==21.3
|
||||
# via
|
||||
# ansible-compat
|
||||
# ansible-core
|
||||
# ansible-lint
|
||||
# molecule
|
||||
pathspec==0.10.1
|
||||
# via
|
||||
# black
|
||||
# yamllint
|
||||
platformdirs==2.5.2
|
||||
# via
|
||||
# black
|
||||
# virtualenv
|
||||
# via virtualenv
|
||||
pluggy==1.0.0
|
||||
# via molecule
|
||||
pre-commit==2.21.0
|
||||
# via -r requirements.in
|
||||
pre-commit-hooks==4.4.0
|
||||
pre-commit-hooks==4.5.0
|
||||
# via -r requirements.in
|
||||
pyasn1==0.4.8
|
||||
# via
|
||||
@@ -148,18 +127,16 @@ python-slugify==6.1.2
|
||||
# via cookiecutter
|
||||
python-vagrant==1.0.0
|
||||
# via molecule-vagrant
|
||||
pyyaml==6.0
|
||||
pyyaml==6.0.1
|
||||
# via
|
||||
# -r requirements.in
|
||||
# ansible-compat
|
||||
# ansible-core
|
||||
# ansible-lint
|
||||
# cookiecutter
|
||||
# kubernetes
|
||||
# molecule
|
||||
# molecule-vagrant
|
||||
# pre-commit
|
||||
# yamllint
|
||||
requests==2.28.1
|
||||
# via
|
||||
# cookiecutter
|
||||
@@ -171,15 +148,12 @@ resolvelib==0.8.1
|
||||
# via ansible-core
|
||||
rich==12.6.0
|
||||
# via
|
||||
# ansible-lint
|
||||
# enrich
|
||||
# molecule
|
||||
rsa==4.9
|
||||
# via google-auth
|
||||
ruamel-yaml==0.17.21
|
||||
# via
|
||||
# ansible-lint
|
||||
# pre-commit-hooks
|
||||
# via pre-commit-hooks
|
||||
selinux==0.2.1
|
||||
# via molecule-vagrant
|
||||
six==1.16.0
|
||||
@@ -187,7 +161,7 @@ six==1.16.0
|
||||
# google-auth
|
||||
# kubernetes
|
||||
# python-dateutil
|
||||
subprocess-tee==0.3.5
|
||||
subprocess-tee==0.4.1
|
||||
# via ansible-compat
|
||||
text-unidecode==1.3
|
||||
# via python-slugify
|
||||
@@ -197,14 +171,8 @@ urllib3==1.26.12
|
||||
# requests
|
||||
virtualenv==20.16.6
|
||||
# via pre-commit
|
||||
wcmatch==8.4.1
|
||||
# via ansible-lint
|
||||
websocket-client==1.4.2
|
||||
# via kubernetes
|
||||
yamllint==1.29.0
|
||||
# via
|
||||
# -r requirements.in
|
||||
# ansible-lint
|
||||
|
||||
# The following packages are considered to be unsafe in a requirements file:
|
||||
# setuptools
|
||||
|
||||
2
reset.sh
2
reset.sh
@@ -1,3 +1,3 @@
|
||||
#!/bin/bash
|
||||
|
||||
ansible-playbook reset.yml -i inventory/my-cluster/hosts.ini
|
||||
ansible-playbook reset.yml
|
||||
|
||||
19
reset.yml
19
reset.yml
@@ -1,13 +1,24 @@
|
||||
---
|
||||
|
||||
- hosts: k3s_cluster
|
||||
gather_facts: yes
|
||||
become: yes
|
||||
- name: Reset k3s cluster
|
||||
hosts: k3s_cluster
|
||||
gather_facts: true
|
||||
roles:
|
||||
- role: reset
|
||||
become: true
|
||||
- role: raspberrypi
|
||||
become: true
|
||||
vars: {state: absent}
|
||||
post_tasks:
|
||||
- name: Reboot and wait for node to come back up
|
||||
become: true
|
||||
reboot:
|
||||
reboot_timeout: 3600
|
||||
|
||||
- name: Revert changes to Proxmox cluster
|
||||
hosts: proxmox
|
||||
gather_facts: true
|
||||
become: true
|
||||
remote_user: "{{ proxmox_lxc_ssh_user }}"
|
||||
roles:
|
||||
- role: reset_proxmox_lxc
|
||||
when: proxmox_lxc_configure
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
---
|
||||
ansible_user: root
|
||||
server_init_args: >-
|
||||
{% if groups['master'] | length > 1 %}
|
||||
{% if ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] %}
|
||||
--cluster-init
|
||||
{% else %}
|
||||
--server https://{{ hostvars[groups['master'][0]].k3s_node_ip }}:6443
|
||||
{% endif %}
|
||||
--token {{ k3s_token }}
|
||||
{% endif %}
|
||||
{{ extra_server_args | default('') }}
|
||||
@@ -1,27 +0,0 @@
|
||||
---
|
||||
- name: Create manifests directory on first master
|
||||
file:
|
||||
path: /var/lib/rancher/k3s/server/manifests
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
||||
|
||||
- name: Copy metallb namespace to first master
|
||||
template:
|
||||
src: "metallb.namespace.j2"
|
||||
dest: "/var/lib/rancher/k3s/server/manifests/metallb-namespace.yaml"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
||||
|
||||
- name: Copy metallb manifest to first master
|
||||
template:
|
||||
src: "metallb.crds.j2"
|
||||
dest: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
||||
@@ -1,27 +0,0 @@
|
||||
---
|
||||
- name: Create manifests directory on first master
|
||||
file:
|
||||
path: /var/lib/rancher/k3s/server/manifests
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
||||
|
||||
- name: Copy vip rbac manifest to first master
|
||||
template:
|
||||
src: "vip.rbac.yaml.j2"
|
||||
dest: "/var/lib/rancher/k3s/server/manifests/vip-rbac.yaml"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
||||
|
||||
- name: Copy vip manifest to first master
|
||||
template:
|
||||
src: "vip.yaml.j2"
|
||||
dest: "/var/lib/rancher/k3s/server/manifests/vip.yaml"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,6 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: metallb-system
|
||||
labels:
|
||||
app: metallb
|
||||
@@ -1,32 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: kube-vip
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations:
|
||||
rbac.authorization.kubernetes.io/autoupdate: "true"
|
||||
name: system:kube-vip-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services", "services/status", "nodes", "endpoints"]
|
||||
verbs: ["list","get","watch", "update"]
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
verbs: ["list", "get", "watch", "update", "create"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: system:kube-vip-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:kube-vip-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kube-vip
|
||||
namespace: kube-system
|
||||
3
roles/k3s/node/defaults/main.yml
Normal file
3
roles/k3s/node/defaults/main.yml
Normal file
@@ -0,0 +1,3 @@
|
||||
---
|
||||
# Name of the master group
|
||||
group_name_master: master
|
||||
18
roles/k3s_agent/tasks/http_proxy.yml
Normal file
18
roles/k3s_agent/tasks/http_proxy.yml
Normal file
@@ -0,0 +1,18 @@
|
||||
---
|
||||
|
||||
- name: Create k3s.service.d directory
|
||||
file:
|
||||
path: '{{ systemd_dir }}/k3s.service.d'
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0755'
|
||||
|
||||
|
||||
- name: Copy K3s http_proxy conf file
|
||||
template:
|
||||
src: "http_proxy.conf.j2"
|
||||
dest: "{{ systemd_dir }}/k3s.service.d/http_proxy.conf"
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0755'
|
||||
@@ -1,5 +1,9 @@
|
||||
---
|
||||
|
||||
- name: Deploy K3s http_proxy conf
|
||||
include_tasks: http_proxy.yml
|
||||
when: proxy_env is defined
|
||||
|
||||
- name: Copy K3s service file
|
||||
template:
|
||||
src: "k3s.service.j2"
|
||||
@@ -11,6 +15,6 @@
|
||||
- name: Enable and check K3s service
|
||||
systemd:
|
||||
name: k3s-node
|
||||
daemon_reload: yes
|
||||
daemon_reload: true
|
||||
state: restarted
|
||||
enabled: yes
|
||||
enabled: true
|
||||
4
roles/k3s_agent/templates/http_proxy.conf.j2
Normal file
4
roles/k3s_agent/templates/http_proxy.conf.j2
Normal file
@@ -0,0 +1,4 @@
|
||||
[Service]
|
||||
Environment=HTTP_PROXY={{ proxy_env.HTTP_PROXY }}
|
||||
Environment=HTTPS_PROXY={{ proxy_env.HTTPS_PROXY }}
|
||||
Environment=NO_PROXY={{ proxy_env.NO_PROXY }}
|
||||
@@ -7,7 +7,7 @@ After=network-online.target
|
||||
Type=notify
|
||||
ExecStartPre=-/sbin/modprobe br_netfilter
|
||||
ExecStartPre=-/sbin/modprobe overlay
|
||||
ExecStart=/usr/local/bin/k3s agent --server https://{{ apiserver_endpoint | ansible.utils.ipwrap }}:6443 --token {{ hostvars[groups['master'][0]]['token'] | default(k3s_token) }} {{ extra_agent_args | default("") }}
|
||||
ExecStart=/usr/local/bin/k3s agent --server https://{{ apiserver_endpoint | ansible.utils.ipwrap }}:6443 --token {{ hostvars[groups[group_name_master | default('master')][0]]['token'] | default(k3s_token) }} {{ extra_agent_args | default("") }}
|
||||
KillMode=process
|
||||
Delegate=yes
|
||||
# Having non-zero Limit*s causes performance problems due to accounting overhead
|
||||
6
roles/k3s_custom_registries/defaults/main.yml
Normal file
6
roles/k3s_custom_registries/defaults/main.yml
Normal file
@@ -0,0 +1,6 @@
|
||||
---
|
||||
# Indicates whether custom registries for k3s should be configured
|
||||
# Possible values:
|
||||
# - present
|
||||
# - absent
|
||||
state: present
|
||||
17
roles/k3s_custom_registries/tasks/main.yml
Normal file
17
roles/k3s_custom_registries/tasks/main.yml
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
|
||||
- name: Create directory /etc/rancher/k3s
|
||||
file:
|
||||
path: "/etc/{{ item }}"
|
||||
state: directory
|
||||
mode: '0755'
|
||||
loop:
|
||||
- rancher
|
||||
- rancher/k3s
|
||||
|
||||
- name: Insert registries into /etc/rancher/k3s/registries.yaml
|
||||
blockinfile:
|
||||
path: /etc/rancher/k3s/registries.yaml
|
||||
block: "{{ custom_registries_yaml }}"
|
||||
mode: '0600'
|
||||
create: true
|
||||
20
roles/k3s_server/defaults/main.yml
Normal file
20
roles/k3s_server/defaults/main.yml
Normal file
@@ -0,0 +1,20 @@
|
||||
---
|
||||
# If you want to explicitly define an interface that ALL control nodes
|
||||
# should use to propagate the VIP, define it here. Otherwise, kube-vip
|
||||
# will determine the right interface automatically at runtime.
|
||||
kube_vip_iface: null
|
||||
|
||||
# Name of the master group
|
||||
group_name_master: master
|
||||
|
||||
# yamllint disable rule:line-length
|
||||
server_init_args: >-
|
||||
{% if groups[group_name_master | default('master')] | length > 1 %}
|
||||
{% if ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] %}
|
||||
--cluster-init
|
||||
{% else %}
|
||||
--server https://{{ hostvars[groups[group_name_master | default('master')][0]].k3s_node_ip | split(",") | first | ansible.utils.ipwrap }}:6443
|
||||
{% endif %}
|
||||
--token {{ k3s_token }}
|
||||
{% endif %}
|
||||
{{ extra_server_args | default('') }}
|
||||
18
roles/k3s_server/tasks/http_proxy.yml
Normal file
18
roles/k3s_server/tasks/http_proxy.yml
Normal file
@@ -0,0 +1,18 @@
|
||||
---
|
||||
|
||||
- name: Create k3s.service.d directory
|
||||
file:
|
||||
path: '{{ systemd_dir }}/k3s.service.d'
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0755'
|
||||
|
||||
|
||||
- name: Copy K3s http_proxy conf file
|
||||
template:
|
||||
src: "http_proxy.conf.j2"
|
||||
dest: "{{ systemd_dir }}/k3s.service.d/http_proxy.conf"
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0755'
|
||||
@@ -1,23 +1,27 @@
|
||||
---
|
||||
|
||||
- name: Clean previous runs of k3s-init
|
||||
- name: Stop k3s-init
|
||||
systemd:
|
||||
name: k3s-init
|
||||
state: stopped
|
||||
failed_when: false
|
||||
|
||||
- name: Clean previous runs of k3s-init
|
||||
- name: Clean previous runs of k3s-init # noqa command-instead-of-module
|
||||
# The systemd module does not support "reset-failed", so we need to resort to command.
|
||||
command: systemctl reset-failed k3s-init
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
args:
|
||||
warn: false # The ansible systemd module does not support reset-failed
|
||||
|
||||
- name: Deploy K3s http_proxy conf
|
||||
include_tasks: http_proxy.yml
|
||||
when: proxy_env is defined
|
||||
|
||||
- name: Deploy vip manifest
|
||||
include_tasks: vip.yml
|
||||
|
||||
- name: Deploy metallb manifest
|
||||
include_tasks: metallb.yml
|
||||
tags: metallb
|
||||
|
||||
- name: Init cluster inside the transient k3s-init service
|
||||
command:
|
||||
@@ -28,12 +32,13 @@
|
||||
creates: "{{ systemd_dir }}/k3s.service"
|
||||
|
||||
- name: Verification
|
||||
when: not ansible_check_mode
|
||||
block:
|
||||
- name: Verify that all nodes actually joined (check k3s-init.service if this fails)
|
||||
command:
|
||||
cmd: k3s kubectl get nodes -l "node-role.kubernetes.io/master=true" -o=jsonpath="{.items[*].metadata.name}"
|
||||
register: nodes
|
||||
until: nodes.rc == 0 and (nodes.stdout.split() | length) == (groups['master'] | length)
|
||||
until: nodes.rc == 0 and (nodes.stdout.split() | length) == (groups[group_name_master | default('master')] | length) # yamllint disable-line rule:line-length
|
||||
retries: "{{ retry_count | default(20) }}"
|
||||
delay: 10
|
||||
changed_when: false
|
||||
@@ -49,7 +54,6 @@
|
||||
name: k3s-init
|
||||
state: stopped
|
||||
failed_when: false
|
||||
when: not ansible_check_mode
|
||||
|
||||
- name: Copy K3s service file
|
||||
register: k3s_service
|
||||
@@ -63,9 +67,9 @@
|
||||
- name: Enable and check K3s service
|
||||
systemd:
|
||||
name: k3s
|
||||
daemon_reload: yes
|
||||
daemon_reload: true
|
||||
state: restarted
|
||||
enabled: yes
|
||||
enabled: true
|
||||
|
||||
- name: Wait for node-token
|
||||
wait_for:
|
||||
@@ -97,24 +101,24 @@
|
||||
|
||||
- name: Create directory .kube
|
||||
file:
|
||||
path: ~{{ ansible_user }}/.kube
|
||||
path: "{{ ansible_user_dir }}/.kube"
|
||||
state: directory
|
||||
owner: "{{ ansible_user }}"
|
||||
owner: "{{ ansible_user_id }}"
|
||||
mode: "u=rwx,g=rx,o="
|
||||
|
||||
- name: Copy config file to user home directory
|
||||
copy:
|
||||
src: /etc/rancher/k3s/k3s.yaml
|
||||
dest: ~{{ ansible_user }}/.kube/config
|
||||
remote_src: yes
|
||||
owner: "{{ ansible_user }}"
|
||||
dest: "{{ ansible_user_dir }}/.kube/config"
|
||||
remote_src: true
|
||||
owner: "{{ ansible_user_id }}"
|
||||
mode: "u=rw,g=,o="
|
||||
|
||||
- name: Configure kubectl cluster to {{ endpoint_url }}
|
||||
command: >-
|
||||
k3s kubectl config set-cluster default
|
||||
--server={{ endpoint_url }}
|
||||
--kubeconfig ~{{ ansible_user }}/.kube/config
|
||||
--kubeconfig {{ ansible_user_dir }}/.kube/config
|
||||
changed_when: true
|
||||
vars:
|
||||
endpoint_url: >-
|
||||
30
roles/k3s_server/tasks/metallb.yml
Normal file
30
roles/k3s_server/tasks/metallb.yml
Normal file
@@ -0,0 +1,30 @@
|
||||
---
|
||||
- name: Create manifests directory on first master
|
||||
file:
|
||||
path: /var/lib/rancher/k3s/server/manifests
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||
|
||||
- name: "Download to first master: manifest for metallb-{{ metal_lb_type }}"
|
||||
ansible.builtin.get_url:
|
||||
url: "https://raw.githubusercontent.com/metallb/metallb/{{ metal_lb_controller_tag_version }}/config/manifests/metallb-{{ metal_lb_type }}.yaml" # noqa yaml[line-length]
|
||||
dest: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||
|
||||
- name: Set image versions in manifest for metallb-{{ metal_lb_type }}
|
||||
ansible.builtin.replace:
|
||||
path: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml"
|
||||
regexp: "{{ item.change | ansible.builtin.regex_escape }}"
|
||||
replace: "{{ item.to }}"
|
||||
with_items:
|
||||
- change: "metallb/speaker:{{ metal_lb_controller_tag_version }}"
|
||||
to: "metallb/speaker:{{ metal_lb_speaker_tag_version }}"
|
||||
loop_control:
|
||||
label: "{{ item.change }} => {{ item.to }}"
|
||||
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||
27
roles/k3s_server/tasks/vip.yml
Normal file
27
roles/k3s_server/tasks/vip.yml
Normal file
@@ -0,0 +1,27 @@
|
||||
---
|
||||
- name: Create manifests directory on first master
|
||||
file:
|
||||
path: /var/lib/rancher/k3s/server/manifests
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||
|
||||
- name: Download vip rbac manifest to first master
|
||||
ansible.builtin.get_url:
|
||||
url: "https://raw.githubusercontent.com/kube-vip/kube-vip/{{ kube_vip_tag_version }}/docs/manifests/rbac.yaml"
|
||||
dest: "/var/lib/rancher/k3s/server/manifests/vip-rbac.yaml"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||
|
||||
- name: Copy vip manifest to first master
|
||||
template:
|
||||
src: "vip.yaml.j2"
|
||||
dest: "/var/lib/rancher/k3s/server/manifests/vip.yaml"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||
4
roles/k3s_server/templates/http_proxy.conf.j2
Normal file
4
roles/k3s_server/templates/http_proxy.conf.j2
Normal file
@@ -0,0 +1,4 @@
|
||||
[Service]
|
||||
Environment=HTTP_PROXY={{ proxy_env.HTTP_PROXY }}
|
||||
Environment=HTTPS_PROXY={{ proxy_env.HTTPS_PROXY }}
|
||||
Environment=NO_PROXY={{ proxy_env.NO_PROXY }}
|
||||
@@ -30,8 +30,10 @@ spec:
|
||||
value: "true"
|
||||
- name: port
|
||||
value: "6443"
|
||||
{% if kube_vip_iface %}
|
||||
- name: vip_interface
|
||||
value: {{ flannel_iface }}
|
||||
value: {{ kube_vip_iface }}
|
||||
{% endif %}
|
||||
- name: vip_cidr
|
||||
value: "{{ apiserver_endpoint | ansible.utils.ipsubnet | ansible.utils.ipaddr('prefix') }}"
|
||||
- name: cp_enable
|
||||
@@ -1,3 +1,6 @@
|
||||
---
|
||||
# Timeout to wait for MetalLB services to come up
|
||||
metal_lb_available_timeout: 120s
|
||||
|
||||
# Name of the master group
|
||||
group_name_master: master
|
||||
99
roles/k3s_server_post/tasks/calico.yml
Normal file
99
roles/k3s_server_post/tasks/calico.yml
Normal file
@@ -0,0 +1,99 @@
|
||||
---
|
||||
- block:
|
||||
- name: Create manifests directory on first master
|
||||
file:
|
||||
path: /tmp/k3s
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0755
|
||||
|
||||
- name: "Download to first master: manifest for Tigera Operator and Calico CRDs"
|
||||
ansible.builtin.get_url:
|
||||
url: "https://raw.githubusercontent.com/projectcalico/calico/{{ calico_tag }}/manifests/tigera-operator.yaml"
|
||||
dest: "/tmp/k3s/tigera-operator.yaml"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0755
|
||||
|
||||
- name: Copy Calico custom resources manifest to first master
|
||||
ansible.builtin.template:
|
||||
src: "calico.crs.j2"
|
||||
dest: /tmp/k3s/custom-resources.yaml
|
||||
|
||||
- name: Deploy or replace Tigera Operator
|
||||
block:
|
||||
- name: Deploy Tigera Operator
|
||||
ansible.builtin.command:
|
||||
cmd: kubectl create -f /tmp/k3s/tigera-operator.yaml
|
||||
register: create_operator
|
||||
changed_when: "'created' in create_operator.stdout"
|
||||
failed_when: "'Error' in create_operator.stderr and 'already exists' not in create_operator.stderr"
|
||||
rescue:
|
||||
- name: Replace existing Tigera Operator
|
||||
ansible.builtin.command:
|
||||
cmd: kubectl replace -f /tmp/k3s/tigera-operator.yaml
|
||||
register: replace_operator
|
||||
changed_when: "'replaced' in replace_operator.stdout"
|
||||
failed_when: "'Error' in replace_operator.stderr"
|
||||
|
||||
- name: Wait for Tigera Operator resources
|
||||
command: >-
|
||||
k3s kubectl wait {{ item.type }}/{{ item.name }}
|
||||
--namespace='tigera-operator'
|
||||
--for=condition=Available=True
|
||||
--timeout=7s
|
||||
register: tigera_result
|
||||
changed_when: false
|
||||
until: tigera_result is succeeded
|
||||
retries: 7
|
||||
delay: 7
|
||||
with_items:
|
||||
- { name: tigera-operator, type: deployment }
|
||||
loop_control:
|
||||
label: "{{ item.type }}/{{ item.name }}"
|
||||
|
||||
- name: Deploy Calico custom resources
|
||||
block:
|
||||
- name: Deploy custom resources for Calico
|
||||
ansible.builtin.command:
|
||||
cmd: kubectl create -f /tmp/k3s/custom-resources.yaml
|
||||
register: create_cr
|
||||
changed_when: "'created' in create_cr.stdout"
|
||||
failed_when: "'Error' in create_cr.stderr and 'already exists' not in create_cr.stderr"
|
||||
rescue:
|
||||
- name: Apply new Calico custom resource manifest
|
||||
ansible.builtin.command:
|
||||
cmd: kubectl apply -f /tmp/k3s/custom-resources.yaml
|
||||
register: apply_cr
|
||||
changed_when: "'configured' in apply_cr.stdout or 'created' in apply_cr.stdout"
|
||||
failed_when: "'Error' in apply_cr.stderr"
|
||||
|
||||
- name: Wait for Calico system resources to be available
|
||||
command: >-
|
||||
{% if item.type == 'daemonset' %}
|
||||
k3s kubectl wait pods
|
||||
--namespace='{{ item.namespace }}'
|
||||
--selector={{ item.selector }}
|
||||
--for=condition=Ready
|
||||
{% else %}
|
||||
k3s kubectl wait {{ item.type }}/{{ item.name }}
|
||||
--namespace='{{ item.namespace }}'
|
||||
--for=condition=Available
|
||||
{% endif %}
|
||||
--timeout=7s
|
||||
register: cr_result
|
||||
changed_when: false
|
||||
until: cr_result is succeeded
|
||||
retries: 30
|
||||
delay: 7
|
||||
with_items:
|
||||
- { name: calico-typha, type: deployment, namespace: calico-system }
|
||||
- { name: calico-kube-controllers, type: deployment, namespace: calico-system }
|
||||
- { name: csi-node-driver, type: daemonset, selector: 'k8s-app=csi-node-driver', namespace: calico-system }
|
||||
- { name: calico-node, type: daemonset, selector: 'k8s-app=calico-node', namespace: calico-system }
|
||||
- { name: calico-apiserver, type: deployment, selector: 'k8s-app=calico-apiserver', namespace: calico-apiserver }
|
||||
loop_control:
|
||||
label: "{{ item.type }}/{{ item.name }}"
|
||||
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||
run_once: true # stops "skipped" log spam
|
||||
@@ -1,6 +1,12 @@
|
||||
---
|
||||
- name: Deploy calico
|
||||
include_tasks: calico.yml
|
||||
tags: calico
|
||||
when: use_calico == true
|
||||
|
||||
- name: Deploy metallb pool
|
||||
include_tasks: metallb.yml
|
||||
tags: metallb
|
||||
|
||||
- name: Remove tmp directory used for manifests
|
||||
file:
|
||||
@@ -3,25 +3,25 @@
|
||||
file:
|
||||
path: /tmp/k3s
|
||||
state: directory
|
||||
owner: "{{ ansible_user }}"
|
||||
owner: "{{ ansible_user_id }}"
|
||||
mode: 0755
|
||||
with_items: "{{ groups['master'] }}"
|
||||
with_items: "{{ groups[group_name_master | default('master')] }}"
|
||||
run_once: true
|
||||
|
||||
- name: Copy metallb CRs manifest to first master
|
||||
template:
|
||||
src: "metallb.crs.j2"
|
||||
dest: "/tmp/k3s/metallb-crs.yaml"
|
||||
owner: "{{ ansible_user }}"
|
||||
owner: "{{ ansible_user_id }}"
|
||||
mode: 0755
|
||||
with_items: "{{ groups['master'] }}"
|
||||
with_items: "{{ groups[group_name_master | default('master')] }}"
|
||||
run_once: true
|
||||
|
||||
- name: Test metallb-system namespace
|
||||
command: >-
|
||||
k3s kubectl -n metallb-system
|
||||
changed_when: false
|
||||
with_items: "{{ groups['master'] }}"
|
||||
with_items: "{{ groups[group_name_master | default('master')] }}"
|
||||
run_once: true
|
||||
|
||||
- name: Wait for MetalLB resources
|
||||
@@ -66,7 +66,7 @@
|
||||
command: >-
|
||||
k3s kubectl -n metallb-system get endpoints webhook-service
|
||||
changed_when: false
|
||||
with_items: "{{ groups['master'] }}"
|
||||
with_items: "{{ groups[group_name_master | default('master')] }}"
|
||||
run_once: true
|
||||
|
||||
- name: Apply metallb CRs
|
||||
@@ -79,11 +79,23 @@
|
||||
until: this.rc == 0
|
||||
retries: 5
|
||||
|
||||
- name: Test metallb-system resources
|
||||
- name: Test metallb-system resources for Layer 2 configuration
|
||||
command: >-
|
||||
k3s kubectl -n metallb-system get {{ item }}
|
||||
changed_when: false
|
||||
run_once: true
|
||||
when: metal_lb_mode == "layer2"
|
||||
with_items:
|
||||
- IPAddressPool
|
||||
- L2Advertisement
|
||||
|
||||
- name: Test metallb-system resources for BGP configuration
|
||||
command: >-
|
||||
k3s kubectl -n metallb-system get {{ item }}
|
||||
changed_when: false
|
||||
run_once: true
|
||||
when: metal_lb_mode == "bgp"
|
||||
with_items:
|
||||
- IPAddressPool
|
||||
- BGPPeer
|
||||
- BGPAdvertisement
|
||||
28
roles/k3s_server_post/templates/calico.crs.j2
Normal file
28
roles/k3s_server_post/templates/calico.crs.j2
Normal file
@@ -0,0 +1,28 @@
|
||||
# This section includes base Calico installation configuration.
|
||||
# For more information, see: https://docs.tigera.io/calico/latest/reference/installation/api#operator.tigera.io/v1.Installation
|
||||
apiVersion: operator.tigera.io/v1
|
||||
kind: Installation
|
||||
metadata:
|
||||
name: default
|
||||
spec:
|
||||
# Configures Calico networking.
|
||||
calicoNetwork:
|
||||
# Note: The ipPools section cannot be modified post-install.
|
||||
ipPools:
|
||||
- blockSize: {{ calico_blockSize if calico_blockSize is defined else '26' }}
|
||||
cidr: {{ calico_cidr if calico_cidr is defined else '10.52.0.0/16' }}
|
||||
encapsulation: {{ calico_encapsulation if calico_encapsulation is defined else 'VXLANCrossSubnet' }}
|
||||
natOutgoing: {{ calico_natOutgoing if calico_natOutgoing is defined else 'Enabled' }}
|
||||
nodeSelector: {{ calico_nodeSelector if calico_nodeSelector is defined else 'all()' }}
|
||||
nodeAddressAutodetectionV4:
|
||||
interface: {{ container_iface if container_iface is defined else 'eth0' }}
|
||||
|
||||
---
|
||||
|
||||
# This section configures the Calico API server.
|
||||
# For more information, see: https://docs.tigera.io/calico/latest/reference/installation/api#operator.tigera.io/v1.APIServer
|
||||
apiVersion: operator.tigera.io/v1
|
||||
kind: APIServer
|
||||
metadata:
|
||||
name: default
|
||||
spec: {}
|
||||
@@ -13,9 +13,31 @@ spec:
|
||||
{% for range in metal_lb_ip_range %}
|
||||
- {{ range }}
|
||||
{% endfor %}
|
||||
|
||||
{% if metal_lb_mode == "layer2" %}
|
||||
---
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: L2Advertisement
|
||||
metadata:
|
||||
name: default
|
||||
namespace: metallb-system
|
||||
{% endif %}
|
||||
{% if metal_lb_mode == "bgp" %}
|
||||
---
|
||||
apiVersion: metallb.io/v1beta2
|
||||
kind: BGPPeer
|
||||
metadata:
|
||||
name: default
|
||||
namespace: metallb-system
|
||||
spec:
|
||||
myASN: {{ metal_lb_bgp_my_asn }}
|
||||
peerASN: {{ metal_lb_bgp_peer_asn }}
|
||||
peerAddress: {{ metal_lb_bgp_peer_address }}
|
||||
|
||||
---
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: BGPAdvertisement
|
||||
metadata:
|
||||
name: default
|
||||
namespace: metallb-system
|
||||
{% endif %}
|
||||
5
roles/lxc/handlers/main.yml
Normal file
5
roles/lxc/handlers/main.yml
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
- name: Reboot server
|
||||
become: true
|
||||
reboot:
|
||||
listen: reboot server
|
||||
21
roles/lxc/tasks/main.yml
Normal file
21
roles/lxc/tasks/main.yml
Normal file
@@ -0,0 +1,21 @@
|
||||
---
|
||||
- name: Check for rc.local file
|
||||
stat:
|
||||
path: /etc/rc.local
|
||||
register: rcfile
|
||||
|
||||
- name: Create rc.local if needed
|
||||
lineinfile:
|
||||
path: /etc/rc.local
|
||||
line: "#!/bin/sh -e"
|
||||
create: true
|
||||
insertbefore: BOF
|
||||
mode: "u=rwx,g=rx,o=rx"
|
||||
when: not rcfile.stat.exists
|
||||
|
||||
- name: Write rc.local file
|
||||
blockinfile:
|
||||
path: /etc/rc.local
|
||||
content: "{{ lookup('template', 'templates/rc.local.j2') }}"
|
||||
state: present
|
||||
notify: reboot server
|
||||
4
roles/prereq/defaults/main.yml
Normal file
4
roles/prereq/defaults/main.yml
Normal file
@@ -0,0 +1,4 @@
|
||||
---
|
||||
secure_path:
|
||||
RedHat: '/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/bin'
|
||||
Suse: '/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/bin'
|
||||
@@ -1,34 +1,37 @@
|
||||
---
|
||||
- name: Set same timezone on every Server
|
||||
timezone:
|
||||
community.general.timezone:
|
||||
name: "{{ system_timezone }}"
|
||||
when: (system_timezone is defined) and (system_timezone != "Your/Timezone")
|
||||
|
||||
- name: Set SELinux to disabled state
|
||||
selinux:
|
||||
ansible.posix.selinux:
|
||||
state: disabled
|
||||
when: ansible_os_family == "RedHat"
|
||||
|
||||
- name: Enable IPv4 forwarding
|
||||
sysctl:
|
||||
ansible.posix.sysctl:
|
||||
name: net.ipv4.ip_forward
|
||||
value: "1"
|
||||
state: present
|
||||
reload: yes
|
||||
reload: true
|
||||
tags: sysctl
|
||||
|
||||
- name: Enable IPv6 forwarding
|
||||
sysctl:
|
||||
ansible.posix.sysctl:
|
||||
name: net.ipv6.conf.all.forwarding
|
||||
value: "1"
|
||||
state: present
|
||||
reload: yes
|
||||
reload: true
|
||||
tags: sysctl
|
||||
|
||||
- name: Enable IPv6 router advertisements
|
||||
sysctl:
|
||||
ansible.posix.sysctl:
|
||||
name: net.ipv6.conf.all.accept_ra
|
||||
value: "2"
|
||||
state: present
|
||||
reload: yes
|
||||
reload: true
|
||||
tags: sysctl
|
||||
|
||||
- name: Add br_netfilter to /etc/modules-load.d/
|
||||
copy:
|
||||
@@ -38,28 +41,29 @@
|
||||
when: ansible_os_family == "RedHat"
|
||||
|
||||
- name: Load br_netfilter
|
||||
modprobe:
|
||||
community.general.modprobe:
|
||||
name: br_netfilter
|
||||
state: present
|
||||
when: ansible_os_family == "RedHat"
|
||||
|
||||
- name: Set bridge-nf-call-iptables (just to be sure)
|
||||
sysctl:
|
||||
ansible.posix.sysctl:
|
||||
name: "{{ item }}"
|
||||
value: "1"
|
||||
state: present
|
||||
reload: yes
|
||||
reload: true
|
||||
when: ansible_os_family == "RedHat"
|
||||
loop:
|
||||
- net.bridge.bridge-nf-call-iptables
|
||||
- net.bridge.bridge-nf-call-ip6tables
|
||||
tags: sysctl
|
||||
|
||||
- name: Add /usr/local/bin to sudo secure_path
|
||||
lineinfile:
|
||||
line: 'Defaults secure_path = /sbin:/bin:/usr/sbin:/usr/bin:/usr/local/bin'
|
||||
line: 'Defaults secure_path = {{ secure_path[ansible_os_family] }}'
|
||||
regexp: "Defaults(\\s)*secure_path(\\s)*="
|
||||
state: present
|
||||
insertafter: EOF
|
||||
path: /etc/sudoers
|
||||
validate: 'visudo -cf %s'
|
||||
when: ansible_os_family == "RedHat"
|
||||
when: ansible_os_family in [ "RedHat", "Suse" ]
|
||||
|
||||
13
roles/proxmox_lxc/handlers/main.yml
Normal file
13
roles/proxmox_lxc/handlers/main.yml
Normal file
@@ -0,0 +1,13 @@
|
||||
---
|
||||
- name: Reboot containers
|
||||
block:
|
||||
- name: Get container ids from filtered files
|
||||
set_fact:
|
||||
proxmox_lxc_filtered_ids: >-
|
||||
{{ proxmox_lxc_filtered_files | map("split", "/") | map("last") | map("split", ".") | map("first") }}
|
||||
listen: reboot containers
|
||||
- name: Reboot container
|
||||
command: "pct reboot {{ item }}"
|
||||
loop: "{{ proxmox_lxc_filtered_ids }}"
|
||||
changed_when: true
|
||||
listen: reboot containers
|
||||
44
roles/proxmox_lxc/tasks/main.yml
Normal file
44
roles/proxmox_lxc/tasks/main.yml
Normal file
@@ -0,0 +1,44 @@
|
||||
---
|
||||
- name: Check for container files that exist on this host
|
||||
stat:
|
||||
path: "/etc/pve/lxc/{{ item }}.conf"
|
||||
loop: "{{ proxmox_lxc_ct_ids }}"
|
||||
register: stat_results
|
||||
|
||||
- name: Filter out files that do not exist
|
||||
set_fact:
|
||||
proxmox_lxc_filtered_files:
|
||||
'{{ stat_results.results | rejectattr("stat.exists", "false") | map(attribute="stat.path") }}'
|
||||
|
||||
# https://gist.github.com/triangletodd/02f595cd4c0dc9aac5f7763ca2264185
|
||||
- name: Ensure lxc config has the right apparmor profile
|
||||
lineinfile:
|
||||
dest: "{{ item }}"
|
||||
regexp: "^lxc.apparmor.profile"
|
||||
line: "lxc.apparmor.profile: unconfined"
|
||||
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||
notify: reboot containers
|
||||
|
||||
- name: Ensure lxc config has the right cgroup
|
||||
lineinfile:
|
||||
dest: "{{ item }}"
|
||||
regexp: "^lxc.cgroup.devices.allow"
|
||||
line: "lxc.cgroup.devices.allow: a"
|
||||
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||
notify: reboot containers
|
||||
|
||||
- name: Ensure lxc config has the right cap drop
|
||||
lineinfile:
|
||||
dest: "{{ item }}"
|
||||
regexp: "^lxc.cap.drop"
|
||||
line: "lxc.cap.drop: "
|
||||
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||
notify: reboot containers
|
||||
|
||||
- name: Ensure lxc config has the right mounts
|
||||
lineinfile:
|
||||
dest: "{{ item }}"
|
||||
regexp: "^lxc.mount.auto"
|
||||
line: 'lxc.mount.auto: "proc:rw sys:rw"'
|
||||
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||
notify: reboot containers
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
- name: reboot
|
||||
- name: Reboot
|
||||
reboot:
|
||||
listen: reboot
|
||||
|
||||
@@ -47,20 +47,16 @@
|
||||
- raspberry_pi|default(false)
|
||||
- ansible_facts.lsb.description|default("") is match("Debian.*bullseye")
|
||||
|
||||
- name: execute OS related tasks on the Raspberry Pi - {{ action }}
|
||||
- name: Execute OS related tasks on the Raspberry Pi - {{ action_ }}
|
||||
include_tasks: "{{ item }}"
|
||||
with_first_found:
|
||||
- "{{ action }}/{{ detected_distribution }}-{{ detected_distribution_major_version }}.yml"
|
||||
- "{{ action }}/{{ detected_distribution }}.yml"
|
||||
- "{{ action }}/{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml"
|
||||
- "{{ action }}/{{ ansible_distribution }}.yml"
|
||||
- "{{ action }}/default.yml"
|
||||
- "{{ action_ }}/{{ detected_distribution }}-{{ detected_distribution_major_version }}.yml"
|
||||
- "{{ action_ }}/{{ detected_distribution }}.yml"
|
||||
- "{{ action_ }}/{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml"
|
||||
- "{{ action_ }}/{{ ansible_distribution }}.yml"
|
||||
- "{{ action_ }}/default.yml"
|
||||
vars:
|
||||
action: >-
|
||||
{% if state == "present" -%}
|
||||
setup
|
||||
{%- else -%}
|
||||
teardown
|
||||
{%- endif %}
|
||||
action_: >-
|
||||
{% if state == "present" %}setup{% else %}teardown{% endif %}
|
||||
when:
|
||||
- raspberry_pi|default(false)
|
||||
|
||||
@@ -8,20 +8,22 @@
|
||||
notify: reboot
|
||||
|
||||
- name: Install iptables
|
||||
apt: name=iptables state=present
|
||||
apt:
|
||||
name: iptables
|
||||
state: present
|
||||
|
||||
- name: Flush iptables before changing to iptables-legacy
|
||||
iptables:
|
||||
flush: true
|
||||
|
||||
- name: Changing to iptables-legacy
|
||||
alternatives:
|
||||
community.general.alternatives:
|
||||
path: /usr/sbin/iptables-legacy
|
||||
name: iptables
|
||||
register: ip4_legacy
|
||||
|
||||
- name: Changing to ip6tables-legacy
|
||||
alternatives:
|
||||
community.general.alternatives:
|
||||
path: /usr/sbin/ip6tables-legacy
|
||||
name: ip6tables
|
||||
register: ip6_legacy
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
- name: Enable cgroup via boot commandline if not already enabled for Rocky
|
||||
lineinfile:
|
||||
path: /boot/cmdline.txt
|
||||
backrefs: yes
|
||||
backrefs: true
|
||||
regexp: '^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$'
|
||||
line: '\1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory'
|
||||
notify: reboot
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
- name: Enable cgroup via boot commandline if not already enabled for Ubuntu on a Raspberry Pi
|
||||
lineinfile:
|
||||
path: /boot/firmware/cmdline.txt
|
||||
backrefs: yes
|
||||
backrefs: true
|
||||
regexp: '^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$'
|
||||
line: '\1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory'
|
||||
notify: reboot
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
systemd:
|
||||
name: "{{ item }}"
|
||||
state: stopped
|
||||
enabled: no
|
||||
enabled: false
|
||||
failed_when: false
|
||||
with_items:
|
||||
- k3s
|
||||
@@ -46,11 +46,48 @@
|
||||
- /var/lib/rancher/
|
||||
- /var/lib/cni/
|
||||
|
||||
- name: Remove K3s http_proxy files
|
||||
file:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
with_items:
|
||||
- "{{ systemd_dir }}/k3s.service.d"
|
||||
- "{{ systemd_dir }}/k3s-node.service.d"
|
||||
when: proxy_env is defined
|
||||
|
||||
- name: Reload daemon_reload
|
||||
systemd:
|
||||
daemon_reload: yes
|
||||
daemon_reload: true
|
||||
|
||||
- name: Remove tmp directory used for manifests
|
||||
file:
|
||||
path: /tmp/k3s
|
||||
state: absent
|
||||
|
||||
- name: Check if rc.local exists
|
||||
stat:
|
||||
path: /etc/rc.local
|
||||
register: rcfile
|
||||
|
||||
- name: Remove rc.local modifications for proxmox lxc containers
|
||||
become: true
|
||||
blockinfile:
|
||||
path: /etc/rc.local
|
||||
content: "{{ lookup('template', 'templates/rc.local.j2') }}"
|
||||
create: false
|
||||
state: absent
|
||||
when: proxmox_lxc_configure and rcfile.stat.exists
|
||||
|
||||
- name: Check rc.local for cleanup
|
||||
become: true
|
||||
slurp:
|
||||
src: /etc/rc.local
|
||||
register: rcslurp
|
||||
when: proxmox_lxc_configure and rcfile.stat.exists
|
||||
|
||||
- name: Cleanup rc.local if we only have a Shebang line
|
||||
become: true
|
||||
file:
|
||||
path: /etc/rc.local
|
||||
state: absent
|
||||
when: proxmox_lxc_configure and rcfile.stat.exists and ((rcslurp.content | b64decode).splitlines() | length) <= 1
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
check_mode: false
|
||||
|
||||
- name: Umount filesystem
|
||||
mount:
|
||||
ansible.posix.mount:
|
||||
path: "{{ item }}"
|
||||
state: unmounted
|
||||
with_items:
|
||||
|
||||
1
roles/reset_proxmox_lxc/handlers/main.yml
Symbolic link
1
roles/reset_proxmox_lxc/handlers/main.yml
Symbolic link
@@ -0,0 +1 @@
|
||||
../../proxmox_lxc/handlers/main.yml
|
||||
47
roles/reset_proxmox_lxc/tasks/main.yml
Normal file
47
roles/reset_proxmox_lxc/tasks/main.yml
Normal file
@@ -0,0 +1,47 @@
|
||||
---
|
||||
- name: Check for container files that exist on this host
|
||||
stat:
|
||||
path: "/etc/pve/lxc/{{ item }}.conf"
|
||||
loop: "{{ proxmox_lxc_ct_ids }}"
|
||||
register: stat_results
|
||||
|
||||
- name: Filter out files that do not exist
|
||||
set_fact:
|
||||
proxmox_lxc_filtered_files:
|
||||
'{{ stat_results.results | rejectattr("stat.exists", "false") | map(attribute="stat.path") }}'
|
||||
|
||||
- name: Remove LXC apparmor profile
|
||||
lineinfile:
|
||||
dest: "{{ item }}"
|
||||
regexp: "^lxc.apparmor.profile"
|
||||
line: "lxc.apparmor.profile: unconfined"
|
||||
state: absent
|
||||
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||
notify: reboot containers
|
||||
|
||||
- name: Remove lxc cgroups
|
||||
lineinfile:
|
||||
dest: "{{ item }}"
|
||||
regexp: "^lxc.cgroup.devices.allow"
|
||||
line: "lxc.cgroup.devices.allow: a"
|
||||
state: absent
|
||||
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||
notify: reboot containers
|
||||
|
||||
- name: Remove lxc cap drop
|
||||
lineinfile:
|
||||
dest: "{{ item }}"
|
||||
regexp: "^lxc.cap.drop"
|
||||
line: "lxc.cap.drop: "
|
||||
state: absent
|
||||
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||
notify: reboot containers
|
||||
|
||||
- name: Remove lxc mounts
|
||||
lineinfile:
|
||||
dest: "{{ item }}"
|
||||
regexp: "^lxc.mount.auto"
|
||||
line: 'lxc.mount.auto: "proc:rw sys:rw"'
|
||||
state: absent
|
||||
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||
notify: reboot containers
|
||||
50
site.yml
50
site.yml
@@ -1,24 +1,48 @@
|
||||
---
|
||||
|
||||
- hosts: k3s_cluster
|
||||
gather_facts: yes
|
||||
become: yes
|
||||
- name: Prepare Proxmox cluster
|
||||
hosts: proxmox
|
||||
gather_facts: true
|
||||
become: true
|
||||
environment: "{{ proxy_env | default({}) }}"
|
||||
roles:
|
||||
- role: proxmox_lxc
|
||||
when: proxmox_lxc_configure
|
||||
|
||||
- name: Prepare k3s nodes
|
||||
hosts: k3s_cluster
|
||||
gather_facts: true
|
||||
environment: "{{ proxy_env | default({}) }}"
|
||||
roles:
|
||||
- role: lxc
|
||||
become: true
|
||||
when: proxmox_lxc_configure
|
||||
- role: prereq
|
||||
become: true
|
||||
- role: download
|
||||
become: true
|
||||
- role: raspberrypi
|
||||
become: true
|
||||
- role: k3s_custom_registries
|
||||
become: true
|
||||
when: custom_registries
|
||||
|
||||
- hosts: master
|
||||
become: yes
|
||||
- name: Setup k3s servers
|
||||
hosts: master
|
||||
environment: "{{ proxy_env | default({}) }}"
|
||||
roles:
|
||||
- role: k3s/master
|
||||
- role: k3s_server
|
||||
become: true
|
||||
|
||||
- hosts: node
|
||||
become: yes
|
||||
- name: Setup k3s agents
|
||||
hosts: node
|
||||
environment: "{{ proxy_env | default({}) }}"
|
||||
roles:
|
||||
- role: k3s/node
|
||||
- role: k3s_agent
|
||||
become: true
|
||||
|
||||
- hosts: master
|
||||
become: yes
|
||||
- name: Configure k3s cluster
|
||||
hosts: master
|
||||
environment: "{{ proxy_env | default({}) }}"
|
||||
roles:
|
||||
- role: k3s/post
|
||||
- role: k3s_server_post
|
||||
become: true
|
||||
|
||||
8
templates/rc.local.j2
Normal file
8
templates/rc.local.j2
Normal file
@@ -0,0 +1,8 @@
|
||||
# Kubeadm 1.15 needs /dev/kmsg to be there, but it's not in lxc, but we can just use /dev/console instead
|
||||
# see: https://github.com/kubernetes-sigs/kind/issues/662
|
||||
if [ ! -e /dev/kmsg ]; then
|
||||
ln -s /dev/console /dev/kmsg
|
||||
fi
|
||||
|
||||
# https://medium.com/@kvaps/run-kubernetes-in-lxc-container-f04aa94b6c9c
|
||||
mount --make-rshared /
|
||||
Reference in New Issue
Block a user