initial commit

This commit is contained in:
Timothy Stewart
2022-03-27 10:05:37 -05:00
commit f32371d595
38 changed files with 1551 additions and 0 deletions

View File

@@ -0,0 +1,12 @@
---
ansible_user: root
server_init_args: >-
{% if groups['master'] | length > 1 %}
{% if ansible_host == hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0]) %}
--cluster-init
{% else %}
--server https://{{ hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0]) }}:6443
{% endif %}
--token {{ k3s_token }}
{% endif %}
{{ extra_server_args | default('') }}

View File

@@ -0,0 +1,172 @@
---
- name: Clean previous runs of k3s-init
systemd:
name: k3s-init
state: stopped
failed_when: false
- name: Clean previous runs of k3s-init
command: systemctl reset-failed k3s-init
failed_when: false
changed_when: false
args:
warn: false # The ansible systemd module does not support reset-failed
- name: Create manifests directory on first master
file:
path: /var/lib/rancher/k3s/server/manifests
state: directory
owner: root
group: root
mode: 0644
when: ansible_host == hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0])
- name: Copy vip rbac manifest to first master
template:
src: "vip.rbac.yaml.j2"
dest: "/var/lib/rancher/k3s/server/manifests/vip.rbac.yaml"
owner: root
group: root
mode: 0644
when: ansible_host == hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0])
- name: Copy vip manifest to first master
template:
src: "vip.yaml.j2"
dest: "/var/lib/rancher/k3s/server/manifests/vip.yaml"
owner: root
group: root
mode: 0644
when: ansible_host == hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0])
- name: Copy metallb namespace manifest to first master
template:
src: "metallb.namespace.j2"
dest: "/var/lib/rancher/k3s/server/manifests/metallb.namespace.yaml"
owner: root
group: root
mode: 0644
when: ansible_host == hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0])
- name: Copy metallb ConfigMap manifest to first master
template:
src: "metallb.configmap.j2"
dest: "/var/lib/rancher/k3s/server/manifests/metallb.configmap.yaml"
owner: root
group: root
mode: 0644
when: ansible_host == hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0])
- name: Copy metallb main manifest to first master
template:
src: "metallb.yaml.j2"
dest: "/var/lib/rancher/k3s/server/manifests/metallb.yaml"
owner: root
group: root
mode: 0644
when: ansible_host == hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0])
- name: Init cluster inside the transient k3s-init service
command:
cmd: "systemd-run -p RestartSec=2 \
-p Restart=on-failure \
--unit=k3s-init \
k3s server {{ server_init_args }}"
creates: "{{ systemd_dir }}/k3s.service"
args:
warn: false # The ansible systemd module does not support transient units
- name: Verification
block:
- name: Verify that all nodes actually joined (check k3s-init.service if this fails)
command:
cmd: k3s kubectl get nodes -l "node-role.kubernetes.io/master=true" -o=jsonpath="{.items[*].metadata.name}"
register: nodes
until: nodes.rc == 0 and (nodes.stdout.split() | length) == (groups['master'] | length)
retries: 20
delay: 10
changed_when: false
always:
- name: Kill the temporary service used for initialization
systemd:
name: k3s-init
state: stopped
failed_when: false
- name: Copy K3s service file
register: k3s_service
template:
src: "k3s.service.j2"
dest: "{{ systemd_dir }}/k3s.service"
owner: root
group: root
mode: 0644
- name: Enable and check K3s service
systemd:
name: k3s
daemon_reload: yes
state: restarted
enabled: yes
- name: Wait for node-token
wait_for:
path: "{{ k3s_server_location }}/server/node-token"
- name: Register node-token file access mode
stat:
path: "{{ k3s_server_location }}/server/node-token"
register: p
- name: Change file access node-token
file:
path: "{{ k3s_server_location }}/server/node-token"
mode: "g+rx,o+rx"
- name: Read node-token from master
slurp:
path: "{{ k3s_server_location }}/server/node-token"
register: node_token
- name: Store Master node-token
set_fact:
token: "{{ node_token.content | b64decode | regex_replace('\n', '') }}"
- name: Restore node-token file access
file:
path: "{{ k3s_server_location }}/server/node-token"
mode: "{{ p.stat.mode }}"
- name: Create directory .kube
file:
path: ~{{ ansible_user }}/.kube
state: directory
owner: "{{ ansible_user }}"
mode: "u=rwx,g=rx,o="
- name: Copy config file to user home directory
copy:
src: /etc/rancher/k3s/k3s.yaml
dest: ~{{ ansible_user }}/.kube/config
remote_src: yes
owner: "{{ ansible_user }}"
mode: "u=rw,g=,o="
- name: Configure kubectl cluster to https://{{ apiserver_endpoint }}:6443
command: >-
k3s kubectl config set-cluster default
--server=https://{{ apiserver_endpoint }}:6443
--kubeconfig ~{{ ansible_user }}/.kube/config
changed_when: true
- name: Create kubectl symlink
file:
src: /usr/local/bin/k3s
dest: /usr/local/bin/kubectl
state: link
- name: Create crictl symlink
file:
src: /usr/local/bin/k3s
dest: /usr/local/bin/crictl
state: link

View File

@@ -0,0 +1,24 @@
[Unit]
Description=Lightweight Kubernetes
Documentation=https://k3s.io
After=network-online.target
[Service]
Type=notify
ExecStartPre=-/sbin/modprobe br_netfilter
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/local/bin/k3s server --data-dir {{ k3s_server_location }} {{ extra_server_args | default("") }}
KillMode=process
Delegate=yes
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=1048576
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
TimeoutStartSec=0
Restart=always
RestartSec=5s
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,13 @@
apiVersion: v1
kind: ConfigMap
metadata:
namespace: metallb-system
name: config
data:
config: |
address-pools:
- name: default
protocol: layer2
addresses:
- {{ metal_lb_ip_range }}

View File

@@ -0,0 +1,7 @@
apiVersion: v1
kind: Namespace
metadata:
name: metallb-system
labels:
app: metallb

View File

@@ -0,0 +1,481 @@
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
labels:
app: metallb
name: controller
spec:
allowPrivilegeEscalation: false
allowedCapabilities: []
allowedHostPaths: []
defaultAddCapabilities: []
defaultAllowPrivilegeEscalation: false
fsGroup:
ranges:
- max: 65535
min: 1
rule: MustRunAs
hostIPC: false
hostNetwork: false
hostPID: false
privileged: false
readOnlyRootFilesystem: true
requiredDropCapabilities:
- ALL
runAsUser:
ranges:
- max: 65535
min: 1
rule: MustRunAs
seLinux:
rule: RunAsAny
supplementalGroups:
ranges:
- max: 65535
min: 1
rule: MustRunAs
volumes:
- configMap
- secret
- emptyDir
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
labels:
app: metallb
name: speaker
spec:
allowPrivilegeEscalation: false
allowedCapabilities:
- NET_RAW
allowedHostPaths: []
defaultAddCapabilities: []
defaultAllowPrivilegeEscalation: false
fsGroup:
rule: RunAsAny
hostIPC: false
hostNetwork: true
hostPID: false
hostPorts:
- max: 7472
min: 7472
- max: 7946
min: 7946
privileged: true
readOnlyRootFilesystem: true
requiredDropCapabilities:
- ALL
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- configMap
- secret
- emptyDir
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: metallb
name: controller
namespace: metallb-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: metallb
name: speaker
namespace: metallb-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app: metallb
name: metallb-system:controller
rules:
- apiGroups:
- ''
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- services/status
verbs:
- update
- apiGroups:
- ''
resources:
- events
verbs:
- create
- patch
- apiGroups:
- policy
resourceNames:
- controller
resources:
- podsecuritypolicies
verbs:
- use
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app: metallb
name: metallb-system:speaker
rules:
- apiGroups:
- ''
resources:
- services
- endpoints
- nodes
verbs:
- get
- list
- watch
- apiGroups: ["discovery.k8s.io"]
resources:
- endpointslices
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- events
verbs:
- create
- patch
- apiGroups:
- policy
resourceNames:
- speaker
resources:
- podsecuritypolicies
verbs:
- use
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app: metallb
name: config-watcher
namespace: metallb-system
rules:
- apiGroups:
- ''
resources:
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app: metallb
name: pod-lister
namespace: metallb-system
rules:
- apiGroups:
- ''
resources:
- pods
verbs:
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app: metallb
name: controller
namespace: metallb-system
rules:
- apiGroups:
- ''
resources:
- secrets
verbs:
- create
- apiGroups:
- ''
resources:
- secrets
resourceNames:
- memberlist
verbs:
- list
- apiGroups:
- apps
resources:
- deployments
resourceNames:
- controller
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app: metallb
name: metallb-system:controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: metallb-system:controller
subjects:
- kind: ServiceAccount
name: controller
namespace: metallb-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app: metallb
name: metallb-system:speaker
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: metallb-system:speaker
subjects:
- kind: ServiceAccount
name: speaker
namespace: metallb-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app: metallb
name: config-watcher
namespace: metallb-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: config-watcher
subjects:
- kind: ServiceAccount
name: controller
- kind: ServiceAccount
name: speaker
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app: metallb
name: pod-lister
namespace: metallb-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: pod-lister
subjects:
- kind: ServiceAccount
name: speaker
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app: metallb
name: controller
namespace: metallb-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: controller
subjects:
- kind: ServiceAccount
name: controller
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
app: metallb
component: speaker
name: speaker
namespace: metallb-system
spec:
selector:
matchLabels:
app: metallb
component: speaker
template:
metadata:
annotations:
prometheus.io/port: '7472'
prometheus.io/scrape: 'true'
labels:
app: metallb
component: speaker
spec:
containers:
- args:
- --port=7472
- --config=config
- --log-level=info
env:
- name: METALLB_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: METALLB_HOST
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: METALLB_ML_BIND_ADDR
valueFrom:
fieldRef:
fieldPath: status.podIP
# needed when another software is also using memberlist / port 7946
# when changing this default you also need to update the container ports definition
# and the PodSecurityPolicy hostPorts definition
#- name: METALLB_ML_BIND_PORT
# value: "7946"
- name: METALLB_ML_LABELS
value: "app=metallb,component=speaker"
- name: METALLB_ML_SECRET_KEY
valueFrom:
secretKeyRef:
name: memberlist
key: secretkey
image: quay.io/metallb/speaker:{{ metal_lb_speaker_tag_version }}
name: speaker
ports:
- containerPort: 7472
name: monitoring
- containerPort: 7946
name: memberlist-tcp
- containerPort: 7946
name: memberlist-udp
protocol: UDP
livenessProbe:
httpGet:
path: /metrics
port: monitoring
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /metrics
port: monitoring
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_RAW
drop:
- ALL
readOnlyRootFilesystem: true
hostNetwork: true
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: speaker
terminationGracePeriodSeconds: 2
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Exists
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: metallb
component: controller
name: controller
namespace: metallb-system
spec:
revisionHistoryLimit: 3
selector:
matchLabels:
app: metallb
component: controller
template:
metadata:
annotations:
prometheus.io/port: '7472'
prometheus.io/scrape: 'true'
labels:
app: metallb
component: controller
spec:
containers:
- args:
- --port=7472
- --config=config
- --log-level=info
env:
- name: METALLB_ML_SECRET_NAME
value: memberlist
- name: METALLB_DEPLOYMENT
value: controller
image: quay.io/metallb/controller:{{ metal_lb_controller_tag_version }}
name: controller
ports:
- containerPort: 7472
name: monitoring
livenessProbe:
httpGet:
path: /metrics
port: monitoring
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /metrics
port: monitoring
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- all
readOnlyRootFilesystem: true
nodeSelector:
kubernetes.io/os: linux
securityContext:
runAsNonRoot: true
runAsUser: 65534
fsGroup: 65534
serviceAccountName: controller
terminationGracePeriodSeconds: 0

View File

@@ -0,0 +1,33 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-vip
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
name: system:kube-vip-role
rules:
- apiGroups: [""]
resources: ["services", "services/status", "nodes"]
verbs: ["list","get","watch", "update"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["list", "get", "watch", "update", "create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:kube-vip-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kube-vip-role
subjects:
- kind: ServiceAccount
name: kube-vip
namespace: kube-system

View File

@@ -0,0 +1,79 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
creationTimestamp: null
name: kube-vip-ds
namespace: kube-system
spec:
selector:
matchLabels:
name: kube-vip-ds
template:
metadata:
creationTimestamp: null
labels:
name: kube-vip-ds
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/master
operator: Exists
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
containers:
- args:
- manager
env:
- name: vip_arp
value: "true"
- name: port
value: "6443"
- name: vip_interface
value: {{ flannel_iface }}
- name: vip_cidr
value: "32"
- name: cp_enable
value: "true"
- name: cp_namespace
value: kube-system
- name: vip_ddns
value: "false"
- name: svc_enable
value: "true"
- name: vip_leaderelection
value: "true"
- name: vip_leaseduration
value: "5"
- name: vip_renewdeadline
value: "3"
- name: vip_retryperiod
value: "1"
- name: address
value: {{ apiserver_endpoint }}
image: ghcr.io/kube-vip/kube-vip:{{ kube_vip_tag_version }}
imagePullPolicy: Always
name: kube-vip
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_RAW
- SYS_TIME
hostNetwork: true
serviceAccountName: kube-vip
tolerations:
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists
updateStrategy: {}
status:
currentNumberScheduled: 0
desiredNumberScheduled: 0
numberMisscheduled: 0
numberReady: 0