mirror of
https://github.com/techno-tim/k3s-ansible.git
synced 2025-12-28 19:52:40 +01:00
Compare commits
10 Commits
0b94640930
...
v1.29.2+k3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a46d97a28d | ||
|
|
dc9d571f17 | ||
|
|
6742551e5c | ||
|
|
fb3478a086 | ||
|
|
518c5bb62a | ||
|
|
3f5d8dfe9f | ||
|
|
efbfadcb93 | ||
|
|
f81ec04ba2 | ||
|
|
8432d3bc66 | ||
|
|
14ae9df1bc |
4
.github/workflows/cache.yml
vendored
4
.github/workflows/cache.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Check out the codebase
|
- name: Check out the codebase
|
||||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # 4.1.1
|
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # 4.1.2
|
||||||
with:
|
with:
|
||||||
ref: ${{ github.event.pull_request.head.sha }}
|
ref: ${{ github.event.pull_request.head.sha }}
|
||||||
|
|
||||||
@@ -23,7 +23,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Cache Vagrant boxes
|
- name: Cache Vagrant boxes
|
||||||
id: cache-vagrant
|
id: cache-vagrant
|
||||||
uses: actions/cache@ab5e6d0c87105b4c9c2047343972218f562e4319 # 4.0
|
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # 4.0
|
||||||
with:
|
with:
|
||||||
lookup-only: true #if it exists, we don't need to restore and can skip the next step
|
lookup-only: true #if it exists, we don't need to restore and can skip the next step
|
||||||
path: |
|
path: |
|
||||||
|
|||||||
6
.github/workflows/lint.yml
vendored
6
.github/workflows/lint.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Check out the codebase
|
- name: Check out the codebase
|
||||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # 4.1.1
|
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # 4.1.2
|
||||||
with:
|
with:
|
||||||
ref: ${{ github.event.pull_request.head.sha }}
|
ref: ${{ github.event.pull_request.head.sha }}
|
||||||
|
|
||||||
@@ -22,7 +22,7 @@ jobs:
|
|||||||
cache: 'pip' # caching pip dependencies
|
cache: 'pip' # caching pip dependencies
|
||||||
|
|
||||||
- name: Restore Ansible cache
|
- name: Restore Ansible cache
|
||||||
uses: actions/cache/restore@ab5e6d0c87105b4c9c2047343972218f562e4319 # 4.0
|
uses: actions/cache/restore@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # 4.0
|
||||||
with:
|
with:
|
||||||
path: ~/.ansible/collections
|
path: ~/.ansible/collections
|
||||||
key: ansible-${{ hashFiles('collections/requirements.yml') }}
|
key: ansible-${{ hashFiles('collections/requirements.yml') }}
|
||||||
@@ -45,7 +45,7 @@ jobs:
|
|||||||
runs-on: self-hosted
|
runs-on: self-hosted
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # 4.1.1
|
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # 4.1.2
|
||||||
- name: Ensure SHA pinned actions
|
- name: Ensure SHA pinned actions
|
||||||
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@ba37328d4ea95eaf8b3bd6c6cef308f709a5f2ec # 3.0.3
|
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@ba37328d4ea95eaf8b3bd6c6cef308f709a5f2ec # 3.0.3
|
||||||
with:
|
with:
|
||||||
|
|||||||
6
.github/workflows/test.yml
vendored
6
.github/workflows/test.yml
vendored
@@ -10,7 +10,7 @@ jobs:
|
|||||||
matrix:
|
matrix:
|
||||||
scenario:
|
scenario:
|
||||||
- default
|
- default
|
||||||
- ipv6
|
# - ipv6
|
||||||
- single_node
|
- single_node
|
||||||
- calico
|
- calico
|
||||||
- cilium
|
- cilium
|
||||||
@@ -21,7 +21,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Check out the codebase
|
- name: Check out the codebase
|
||||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # 4.1.1
|
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # 4.1.2
|
||||||
with:
|
with:
|
||||||
ref: ${{ github.event.pull_request.head.sha }}
|
ref: ${{ github.event.pull_request.head.sha }}
|
||||||
|
|
||||||
@@ -65,7 +65,7 @@ jobs:
|
|||||||
cache: 'pip' # caching pip dependencies
|
cache: 'pip' # caching pip dependencies
|
||||||
|
|
||||||
- name: Restore vagrant Boxes cache
|
- name: Restore vagrant Boxes cache
|
||||||
uses: actions/cache/restore@ab5e6d0c87105b4c9c2047343972218f562e4319 # 4.0
|
uses: actions/cache/restore@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # 4.0
|
||||||
with:
|
with:
|
||||||
path: ~/.vagrant.d/boxes
|
path: ~/.vagrant.d/boxes
|
||||||
key: vagrant-boxes-${{ hashFiles('**/molecule.yml') }}
|
key: vagrant-boxes-${{ hashFiles('**/molecule.yml') }}
|
||||||
|
|||||||
16
README.md
16
README.md
@@ -96,8 +96,22 @@ ansible-playbook reset.yml -i inventory/my-cluster/hosts.ini
|
|||||||
To copy your `kube config` locally so that you can access your **Kubernetes** cluster run:
|
To copy your `kube config` locally so that you can access your **Kubernetes** cluster run:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
scp debian@master_ip:~/.kube/config ~/.kube/config
|
scp debian@master_ip:/etc/rancher/k3s/k3s.yaml ~/.kube/config
|
||||||
```
|
```
|
||||||
|
If you get file Permission denied, go into the node and temporarly run:
|
||||||
|
```bash
|
||||||
|
sudo chmod 777 /etc/rancher/k3s/k3s.yaml
|
||||||
|
```
|
||||||
|
Then copy with the scp command and reset the permissions back to:
|
||||||
|
```bash
|
||||||
|
sudo chmod 600 /etc/rancher/k3s/k3s.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
You'll then want to modify the config to point to master IP by running:
|
||||||
|
```bash
|
||||||
|
sudo nano ~/.kube/config
|
||||||
|
```
|
||||||
|
Then change `server: https://127.0.0.1:6443` to match your master IP: `server: https://192.168.1.222:6443`
|
||||||
|
|
||||||
### 🔨 Testing your cluster
|
### 🔨 Testing your cluster
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
---
|
---
|
||||||
k3s_version: v1.29.0+k3s1
|
k3s_version: v1.29.2+k3s1
|
||||||
# this is the user that has ssh access to these machines
|
# this is the user that has ssh access to these machines
|
||||||
ansible_user: ansibleuser
|
ansible_user: ansibleuser
|
||||||
systemd_dir: /etc/systemd/system
|
systemd_dir: /etc/systemd/system
|
||||||
@@ -13,13 +13,13 @@ flannel_iface: "eth0"
|
|||||||
# uncomment calico_iface to use tigera operator/calico cni instead of flannel https://docs.tigera.io/calico/latest/about
|
# uncomment calico_iface to use tigera operator/calico cni instead of flannel https://docs.tigera.io/calico/latest/about
|
||||||
# calico_iface: "eth0"
|
# calico_iface: "eth0"
|
||||||
calico_ebpf: false # use eBPF dataplane instead of iptables
|
calico_ebpf: false # use eBPF dataplane instead of iptables
|
||||||
calico_tag: "v3.27.0" # calico version tag
|
calico_tag: "v3.27.2" # calico version tag
|
||||||
|
|
||||||
# uncomment cilium_iface to use cilium cni instead of flannel or calico
|
# uncomment cilium_iface to use cilium cni instead of flannel or calico
|
||||||
# ensure v4.19.57, v5.1.16, v5.2.0 or more recent kernel
|
# ensure v4.19.57, v5.1.16, v5.2.0 or more recent kernel
|
||||||
# cilium_iface: "eth0"
|
# cilium_iface: "eth0"
|
||||||
cilium_mode: "native" # native when nodes on same subnet or using bgp, else set routed
|
cilium_mode: "native" # native when nodes on same subnet or using bgp, else set routed
|
||||||
cilium_tag: "v1.14.6" # cilium version tag
|
cilium_tag: "v1.15.2" # cilium version tag
|
||||||
cilium_hubble: true # enable hubble observability relay and ui
|
cilium_hubble: true # enable hubble observability relay and ui
|
||||||
|
|
||||||
# if using calico or cilium, you may specify the cluster pod cidr pool
|
# if using calico or cilium, you may specify the cluster pod cidr pool
|
||||||
@@ -72,7 +72,7 @@ extra_agent_args: >-
|
|||||||
{{ extra_args }}
|
{{ extra_args }}
|
||||||
|
|
||||||
# image tag for kube-vip
|
# image tag for kube-vip
|
||||||
kube_vip_tag_version: "v0.6.4"
|
kube_vip_tag_version: "v0.7.2"
|
||||||
|
|
||||||
# tag for kube-vip-cloud-provider manifest
|
# tag for kube-vip-cloud-provider manifest
|
||||||
# kube_vip_cloud_provider_tag_version: "main"
|
# kube_vip_cloud_provider_tag_version: "main"
|
||||||
@@ -93,8 +93,8 @@ metal_lb_mode: "layer2"
|
|||||||
# metal_lb_bgp_peer_address: "192.168.30.1"
|
# metal_lb_bgp_peer_address: "192.168.30.1"
|
||||||
|
|
||||||
# image tag for metal lb
|
# image tag for metal lb
|
||||||
metal_lb_speaker_tag_version: "v0.13.12"
|
metal_lb_speaker_tag_version: "v0.14.3"
|
||||||
metal_lb_controller_tag_version: "v0.13.12"
|
metal_lb_controller_tag_version: "v0.14.3"
|
||||||
|
|
||||||
# metallb ip range for load balancer
|
# metallb ip range for load balancer
|
||||||
metal_lb_ip_range: "192.168.30.80-192.168.30.90"
|
metal_lb_ip_range: "192.168.30.80-192.168.30.90"
|
||||||
|
|||||||
@@ -10,7 +10,7 @@
|
|||||||
|
|
||||||
- name: Download vip rbac manifest to first master
|
- name: Download vip rbac manifest to first master
|
||||||
ansible.builtin.get_url:
|
ansible.builtin.get_url:
|
||||||
url: "https://raw.githubusercontent.com/kube-vip/kube-vip/{{ kube_vip_tag_version }}/docs/manifests/rbac.yaml"
|
url: "https://kube-vip.io/manifests/rbac.yaml"
|
||||||
dest: "/var/lib/rancher/k3s/server/manifests/vip-rbac.yaml"
|
dest: "/var/lib/rancher/k3s/server/manifests/vip-rbac.yaml"
|
||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
|
|||||||
@@ -48,7 +48,7 @@
|
|||||||
k3s kubectl wait {{ item.type }}/{{ item.name }}
|
k3s kubectl wait {{ item.type }}/{{ item.name }}
|
||||||
--namespace='tigera-operator'
|
--namespace='tigera-operator'
|
||||||
--for=condition=Available=True
|
--for=condition=Available=True
|
||||||
--timeout=7s
|
--timeout=30s
|
||||||
register: tigera_result
|
register: tigera_result
|
||||||
changed_when: false
|
changed_when: false
|
||||||
until: tigera_result is succeeded
|
until: tigera_result is succeeded
|
||||||
@@ -87,7 +87,7 @@
|
|||||||
--namespace='{{ item.namespace }}'
|
--namespace='{{ item.namespace }}'
|
||||||
--for=condition=Available
|
--for=condition=Available
|
||||||
{% endif %}
|
{% endif %}
|
||||||
--timeout=7s
|
--timeout=30s
|
||||||
register: cr_result
|
register: cr_result
|
||||||
changed_when: false
|
changed_when: false
|
||||||
until: cr_result is succeeded
|
until: cr_result is succeeded
|
||||||
|
|||||||
@@ -185,7 +185,7 @@
|
|||||||
--helm-set bpf.loadBalancer.mode={{ bpf_lb_mode | default("hybrid") }}
|
--helm-set bpf.loadBalancer.mode={{ bpf_lb_mode | default("hybrid") }}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
environment:
|
environment:
|
||||||
KUBECONFIG: /home/{{ ansible_user }}/.kube/config
|
KUBECONFIG: "{{ ansible_user_dir }}/.kube/config"
|
||||||
register: cilium_install_result
|
register: cilium_install_result
|
||||||
changed_when: cilium_install_result.rc == 0
|
changed_when: cilium_install_result.rc == 0
|
||||||
when: cilium_installed.rc != 0 or cilium_needs_update
|
when: cilium_installed.rc != 0 or cilium_needs_update
|
||||||
@@ -202,7 +202,7 @@
|
|||||||
--namespace=kube-system
|
--namespace=kube-system
|
||||||
--for=condition=Available
|
--for=condition=Available
|
||||||
{% endif %}
|
{% endif %}
|
||||||
--timeout=7s
|
--timeout=30s
|
||||||
register: cr_result
|
register: cr_result
|
||||||
changed_when: false
|
changed_when: false
|
||||||
until: cr_result is succeeded
|
until: cr_result is succeeded
|
||||||
|
|||||||
Reference in New Issue
Block a user