Intermediate commit (migrating Harbor to separate VM)

This commit is contained in:
Danny Bessems 2020-11-14 23:57:19 +01:00
parent 62779d47b2
commit 969b71211b
6 changed files with 71 additions and 471 deletions

View File

@ -15,7 +15,7 @@ kubectl config view --raw
```
On subsequent nodes:
```
curl -sfL https://get.k3s.io | K3S_TOKEN=<value from master> sh -s - --server https://<fqdn or ip>:6443 --no-deploy traefik
curl -sfL https://get.k3s.io | K3S_URL=https://<fqdn or ip>:6443 K3S_TOKEN=<value from master> sh -
```
Install Rancher's [System Upgrade Controller](https://rancher.com/docs/k3s/latest/en/upgrades/automated/):
@ -71,8 +71,38 @@ Store credentials in `secret`:
kubectl create secret generic --type=mount/smb smb-secret --from-literal=username=<<omitted>> --from-literal=password=<<omitted>>
```
#### 1.3) *Optional* `storageClass` for block storage:
Install [Longhorn](https://code.spamasaurus.com/djpbessems/Kubernetes.K3s.installLog/src/branch/master/storage/Longhorn/README.md) for block storage with NFS-backed backup schedules.
#### 1.3) `storageClass` for distributed block storage:
See [Longhorn Helm Chart](https://longhorn.io/):
```
kubectl create namespace longhorn-system
helm repo add longhorn https://charts.longhorn.io
helm install longhorn longhorn/longhorn --namespace longhorn-system --values=storage/Longhorn/chart-values.yml
```
Expose Longhorn's dashboard through `IngressRoute`:
```
kubectl apply -f storage/Longhorn/ingressRoute-Longhorn.yml
```
Add additional `storageClass` with backup schedule:
***After** specifying a NFS backup target (syntax: `nfs://servername:/path/to/share`) through Longhorn's dashboard*
```
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: longhorn-dailybackup
provisioner: driver.longhorn.io
allowVolumeExpansion: true
parameters:
numberOfReplicas: "3"
staleReplicaTimeout: "2880"
fromBackup: ""
recurringJobs: '[{"name":"backup", "task":"backup", "cron":"0 0 * * *", "retain":14}]'
```
Then make this the new default `storageClass`:
```
kubectl patch storageclass longhorn-dailybackup -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
kubectl patch storageclass local-path -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"false"}}}'
kubectl delete storageclass longhorn
```
### 2) Ingress Controller
##### 2.1) Create `configMap`, `secret` and `persistentVolumeClaim`
@ -247,7 +277,10 @@ kubectl apply -f services/PVR/storageClass-PVR.yml
kubectl apply -f services/PVR/deploy-NZBHydra.yml
```
###### 4.9.2) [Plex](https://www.plex.tv/) <small>(media library)</small>
~~kubectl apply -f services/PVR/deploy-Plex.yml~~
*Due to usage of symlinks, partially incompatible with SMB-share-backed storage*
```
kubectl apply -f services/PVR/deploy-Plex.yml
```
###### 4.9.3) [Radarr](https://radarr.video/) <small>(movie management)</small>
```
kubectl apply -f services/PVR/deploy-Radarr.yml
@ -284,4 +317,7 @@ kubectl apply -f services/TraefikCertsDumper/deploy-TraefikCertsDumper.yml
kubectl get $(kubectl api-resources --verbs=list -o name | paste -sd, -) --ignore-not-found --all-namespaces
* ...
* `DaemonSet` to configure nodes' **sysctl** `fs.inotify.max-user-watches`:
kubectl apply -f system/InotifyMaxWatchers/daemonSet-InotifyMaxWatchers.yml

View File

@ -1,32 +0,0 @@
### Persistent Storage
Manifest for [Longhorn](https://github.com/longhorn/longhorn):
```
curl -Ls https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/longhorn.yaml -o storage/Longhorn/deploy-Longhorn.yaml
sed -e 's/LoadBalancer/ClusterIP/' -i storage/Longhorn/deploy-Longhorn.yaml
kubectl apply -f storage/Longhorn/deploy-Longhorn.yaml
```
##### `IngressRoute` for Longhorn's dashboard:
```
kubectl apply -f storage/Longhorn/ingressRoute-Longhorn.yaml
```
##### `storageClass` with backup schedule:
After specifying a NFS backup target (syntax: `nfs://servername:/path/to/share`) through Longhorn's dashboard, create a new `storageClass` with backup schedule:
```
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: longhorn-dailybackup
provisioner: driver.longhorn.io
allowVolumeExpansion: true
parameters:
numberOfReplicas: "3"
staleReplicaTimeout: "2880"
fromBackup: ""
recurringJobs: '[{"name":"backup", "task":"backup", "cron":"0 0 * * *", "retain":14}]'
```
Then make this the new default `storageClass`:
```
kubectl patch storageclass longhorn-dailybackup -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
kubectl patch storageclass local-path -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"false"}}}'
kubectl delete storageclass longhorn
```

View File

@ -0,0 +1,2 @@
csi:
kubeletRootDir: /var/lib/kubelet

View File

@ -1,431 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: longhorn-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: longhorn-service-account
namespace: longhorn-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: longhorn-role
rules:
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- "*"
- apiGroups: [""]
resources: ["pods", "events", "persistentvolumes", "persistentvolumeclaims", "nodes", "proxy/nodes", "pods/log", "secrets", "services", "endpoints", "configmaps"]
verbs: ["*"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "list"]
- apiGroups: ["apps"]
resources: ["daemonsets", "statefulsets", "deployments"]
verbs: ["*"]
- apiGroups: ["batch"]
resources: ["jobs", "cronjobs"]
verbs: ["*"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses", "volumeattachments", "csinodes", "csidrivers"]
verbs: ["*"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
- apiGroups: ["longhorn.io"]
resources: ["volumes", "volumes/status", "engines", "engines/status", "replicas", "replicas/status", "settings",
"engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status"]
verbs: ["*"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["*"]
# to be removed after v0.7.0
- apiGroups: ["longhorn.rancher.io"]
resources: ["volumes", "engines", "replicas", "settings", "engineimages", "nodes", "instancemanagers"]
verbs: ["*"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: longhorn-bind
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: longhorn-role
subjects:
- kind: ServiceAccount
name: longhorn-service-account
namespace: longhorn-system
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
longhorn-manager: Engine
name: engines.longhorn.io
spec:
group: longhorn.io
names:
kind: Engine
listKind: EngineList
plural: engines
shortNames:
- lhe
singular: engine
scope: Namespaced
version: v1beta1
subresources:
status: {}
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
longhorn-manager: Replica
name: replicas.longhorn.io
spec:
group: longhorn.io
names:
kind: Replica
listKind: ReplicaList
plural: replicas
shortNames:
- lhr
singular: replica
scope: Namespaced
version: v1beta1
subresources:
status: {}
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
longhorn-manager: Setting
name: settings.longhorn.io
spec:
group: longhorn.io
names:
kind: Setting
listKind: SettingList
plural: settings
shortNames:
- lhs
singular: setting
scope: Namespaced
version: v1beta1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
longhorn-manager: Volume
name: volumes.longhorn.io
spec:
group: longhorn.io
names:
kind: Volume
listKind: VolumeList
plural: volumes
shortNames:
- lhv
singular: volume
scope: Namespaced
version: v1beta1
subresources:
status: {}
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
longhorn-manager: EngineImage
name: engineimages.longhorn.io
spec:
group: longhorn.io
names:
kind: EngineImage
listKind: EngineImageList
plural: engineimages
shortNames:
- lhei
singular: engineimage
scope: Namespaced
version: v1beta1
subresources:
status: {}
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
longhorn-manager: Node
name: nodes.longhorn.io
spec:
group: longhorn.io
names:
kind: Node
listKind: NodeList
plural: nodes
shortNames:
- lhn
singular: node
scope: Namespaced
version: v1beta1
subresources:
status: {}
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
longhorn-manager: InstanceManager
name: instancemanagers.longhorn.io
spec:
group: longhorn.io
names:
kind: InstanceManager
listKind: InstanceManagerList
plural: instancemanagers
shortNames:
- lhim
singular: instancemanager
scope: Namespaced
version: v1beta1
subresources:
status: {}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: longhorn-default-setting
namespace: longhorn-system
data:
default-setting.yaml: |-
backup-target:
backup-target-credential-secret:
create-default-disk-labeled-nodes:
default-data-path:
replica-soft-anti-affinity:
storage-over-provisioning-percentage:
storage-minimal-available-percentage:
upgrade-checker:
default-replica-count:
guaranteed-engine-cpu:
default-longhorn-static-storage-class:
backupstore-poll-interval:
taint-toleration:
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
app: longhorn-manager
name: longhorn-manager
namespace: longhorn-system
spec:
selector:
matchLabels:
app: longhorn-manager
template:
metadata:
labels:
app: longhorn-manager
spec:
containers:
- name: longhorn-manager
image: longhornio/longhorn-manager:v0.7.0
imagePullPolicy: Always
securityContext:
privileged: true
command:
- longhorn-manager
- -d
- daemon
- --engine-image
- longhornio/longhorn-engine:v0.7.0
- --manager-image
- longhornio/longhorn-manager:v0.7.0
- --service-account
- longhorn-service-account
ports:
- containerPort: 9500
volumeMounts:
- name: dev
mountPath: /host/dev/
- name: proc
mountPath: /host/proc/
- name: varrun
mountPath: /var/run/
- name: longhorn
mountPath: /var/lib/rancher/longhorn/
mountPropagation: Bidirectional
- name: longhorn-default-setting
mountPath: /var/lib/longhorn-setting/
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Should be: mount path of the volume longhorn-default-setting + the key of the configmap data in 04-default-setting.yaml
- name: DEFAULT_SETTING_PATH
value: /var/lib/longhorn-setting/default-setting.yaml
volumes:
- name: dev
hostPath:
path: /dev/
- name: proc
hostPath:
path: /proc/
- name: varrun
hostPath:
path: /var/run/
- name: longhorn
hostPath:
path: /var/lib/rancher/longhorn/
- name: longhorn-default-setting
configMap:
name: longhorn-default-setting
serviceAccountName: longhorn-service-account
---
kind: Service
apiVersion: v1
metadata:
labels:
app: longhorn-manager
name: longhorn-backend
namespace: longhorn-system
spec:
selector:
app: longhorn-manager
ports:
- port: 9500
targetPort: 9500
sessionAffinity: ClientIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: longhorn-ui
name: longhorn-ui
namespace: longhorn-system
spec:
replicas: 1
selector:
matchLabels:
app: longhorn-ui
template:
metadata:
labels:
app: longhorn-ui
spec:
containers:
- name: longhorn-ui
image: longhornio/longhorn-ui:v0.7.0
ports:
- containerPort: 8000
env:
- name: LONGHORN_MANAGER_IP
value: "http://longhorn-backend:9500"
serviceAccountName: longhorn-service-account
---
kind: Service
apiVersion: v1
metadata:
labels:
app: longhorn-ui
name: longhorn-frontend
namespace: longhorn-system
spec:
selector:
app: longhorn-ui
ports:
- port: 80
targetPort: 8000
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: longhorn-driver-deployer
namespace: longhorn-system
spec:
replicas: 1
selector:
matchLabels:
app: longhorn-driver-deployer
template:
metadata:
labels:
app: longhorn-driver-deployer
spec:
initContainers:
- name: wait-longhorn-manager
image: longhornio/longhorn-manager:v0.7.0
command: ['sh', '-c', 'while [ $(curl -m 1 -s -o /dev/null -w "%{http_code}" http://longhorn-backend:9500/v1) != "200" ]; do echo waiting; sleep 2; done']
containers:
- name: longhorn-driver-deployer
image: longhornio/longhorn-manager:v0.7.0
imagePullPolicy: Always
command:
- longhorn-manager
- -d
- deploy-driver
- --manager-image
- longhornio/longhorn-manager:v0.7.0
- --manager-url
- http://longhorn-backend:9500/v1
# manually set root directory for csi
#- --kubelet-root-dir
#- /var/lib/rancher/k3s/agent/kubelet
# manually specify number of CSI attacher replicas
#- --csi-attacher-replica-count
#- "3"
# manually specify number of CSI provisioner replicas
#- --csi-provisioner-replica-count
#- "3"
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: SERVICE_ACCOUNT
valueFrom:
fieldRef:
fieldPath: spec.serviceAccountName
serviceAccountName: longhorn-service-account
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: longhorn
provisioner: driver.longhorn.io
parameters:
numberOfReplicas: "3"
staleReplicaTimeout: "2880" # 48 hours in minutes
fromBackup: ""
# diskSelector: "ssd,fast"
# nodeSelector: "storage,fast"
# recurringJobs: '[{"name":"snap", "task":"snapshot", "cron":"*/1 * * * *", "retain":1},
# {"name":"backup", "task":"backup", "cron":"*/2 * * * *", "retain":1,
# "labels": {"interval":"2m"}}]'
---

View File

@ -7,7 +7,7 @@ spec:
entryPoints:
- websecure
routes:
- match: Host(`storage.k3s.spamasaurus.com`)
- match: Host(`storage.spamasaurus.com`)
kind: Rule
services:
- name: longhorn-frontend
@ -18,6 +18,6 @@ spec:
name: default
certResolver: default
domains:
- main: '*.k3s.spamasaurus.com'
- main: '*.spamasaurus.com'
sans:
- 'k3s.spamasaurus.com'
- 'spamasaurus.com'

View File

@ -0,0 +1,25 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: inotify-max-watchers
namespace: kube-system
spec:
selector:
matchLabels:
app: inotify-max-watchers
template:
metadata:
name: inotify-max-watchers
labels:
app: inotify-max-watchers
spec:
containers:
- name: inotify-max-watchers
image: alpine
imagePullPolicy: Always
securityContext:
privileged: true
command:
- "/bin/sh"
- "-c"
- "echo 'fs.inotify.max_user_watches=524288' | tee /etc/sysctl.conf; sysctl -p && tail -f /dev/null"