Drop initial code

This commit is contained in:
Danny Bessems
2026-01-15 09:58:01 +00:00
parent 227d957219
commit 1e7c9ba5cb
228 changed files with 19883 additions and 1 deletions

31
deploy/rancher/README.md Normal file
View File

@@ -0,0 +1,31 @@
# Rancher deployment overview
## Prerequisites
* IP address for LoadBalancer must be from the same subnet as Harvester node IP, i.e. 172.27.27.0/24 for teh LB in current implementation to work.
* Due to environment firewall restrictions _https://get.rke.io_ install source doe not work. All these instances must be replaced with alternative _install.sh_ download location _https://raw.githubusercontent.com/rancher/rke2/refs/heads/master/install.sh_.
## Helm chart deployment
An example Helm chart in the following [folder](./helm/rke2).
Two value files are prepared for DHCP and static IP allocation:
* [rancher_values_dhcp.yaml](./helm/rancher_values_dhcp.yaml)
* [rancher_values_static.yaml](./helm/rancher_values_static.yaml)
## Fleet bundle deployment
2 Fleet bundles were prepared based upon Helm chart:
* [mgmt-dhcp.yaml](./deploy/fleet/mgmt-dhcp.yaml)
* [mgmt-static.yaml](./deploy/fleet/mgmt-static.yaml)
## CAPI based deployment
**Notes:**
* Not updated to R&D environment and tested yet!
* There is some compatibility issues with vcluster v0.30 version and Harvester.
Harvester add-on based [deployment](./capi/addon.yaml).
Helm CRD based [deployment](./capi/helmchart.yaml).

View File

@@ -0,0 +1,265 @@
apiVersion: harvesterhci.io/v1beta1
kind: Addon
metadata:
labels:
addon.harvesterhci.io/experimental: "true"
name: rancher-embedded
namespace: rancher-embedded
spec:
chart: vcluster
version: 0.19.0
enabled: false
repo: https://charts.loft.sh
valuesContent: |-
vm_network_name: "k8s-network"
ssh_keypair: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPyW9YbYPE3efCdHMBgnP8AeVfs5Lw8MBCLhXuteliil"
vm_image_name: "ubuntu-22.04"
vm_default_user: "ubuntu"
harvester_vip: "172.27.27.40"
rancher_url: "rancher-mgmt.product.lan"
harvester_kubeconfig_b64: "YXBpVmVyc2lvbjogdjEKa2luZDogQ29uZmlnCmNsdXN0ZXJzOgotIG5hbWU6ICJsb2NhbCIKICBjbHVzdGVyOgogICAgc2VydmVyOiAiaHR0cHM6Ly8xNzIuMjcuMjcuMTkwL2s4cy9jbHVzdGVycy9sb2NhbCIKICAgIGNlcnRpZmljYXRlLWF1dGhvcml0eS1kYXRhOiAiTFMwdExTMUNSVWRKVGlCRFJWSlVTVVpKUTBGVVJTMHRMUzB0Q2sxSlNVSjJWRU5EUVwKICAgICAgVmRQWjBGM1NVSkJaMGxDUVVSQlMwSm5aM0ZvYTJwUFVGRlJSRUZxUWtkTlVuZDNSMmRaUkZaUlVVdEZlRTVyWlZjMWFHSlhiR29LWVwKICAgICAga2RzZW1SSFZuVmFXRWwwWWpOS2JrMVRXWGRLUVZsRVZsRlJSRVJDTVd0bFZ6Vm9ZbGRzYW1KSGJIcGtSMVoxV2xoSmRGa3lSa0ZOVlwKICAgICAgR014VDFSVmR3cFBSR2MxVFZSQlpVWjNNSGxPVkVWM1RVUk5lRTVxU1RSTlZFWmhSbmN3ZWs1VVJYZE5SRVY0VG1wSk5FMVVSbUZOUlwKICAgICAgVmw0U0VSQllVSm5UbFpDUVc5VUNrVXlValZpYlVaMFlWZE9jMkZZVGpCYVZ6VnNZMmt4ZG1OdFkzaEtha0ZyUW1kT1ZrSkJUVTFJVlwKICAgICAgMUkxWW0xR2RHRlhUbk5oV0U0d1dsYzFiR05wTVdvS1dWVkJlRTU2VlRWT1ZFRTBUMFJyZUUxR2EzZEZkMWxJUzI5YVNYcHFNRU5CVVwKICAgICAgVmxKUzI5YVNYcHFNRVJCVVdORVVXZEJSVUZWVlU0eFdtUmxURlY2UmdwTWFtSk1Wbk5TT1ZNMGJTdFRTWE5XWlVOa1JVcHVNVGhRYVwKICAgICAgWHBUYm1jMk5rNXhMMWhHVkZaT2RGRnFMMEl3T1hCR01GTXdUVFpMZDJSbmFHUldWM1Y1Q25vMWJFTmlSVzlVVkRaT1EwMUZRWGRFWlwKICAgICAgMWxFVmxJd1VFRlJTQzlDUVZGRVFXZExhMDFCT0VkQk1WVmtSWGRGUWk5M1VVWk5RVTFDUVdZNGQwaFJXVVFLVmxJd1QwSkNXVVZHU1wKICAgICAgRXd2Um5Ga05GRXJaamhpTlhkTFJtSjJUSEpwVTJrMWRtVnpUVUZ2UjBORGNVZFRUVFE1UWtGTlEwRXdaMEZOUlZWRFNVUk5XZ3BVUlwKICAgICAgWFl6VmpjM04zRjZja2RCTDBjNVdVUmxjMlUwVkdaNllWRlhiVmh3UWxWTE9FRm5XWFZJUVdsRlFXeHZaVEpNTVM5RU9VZE1VRGRXU1wKICAgICAgMU13TWxObUNsUnRRbHBxT1d4WVNVeFBSWEJJZDBkR05tSk1WR3hqUFFvdExTMHRMVVZPUkNCRFJWSlVTVVpKUTBGVVJTMHRMUzB0IgoKdXNlcnM6Ci0gbmFtZTogImxvY2FsIgogIHVzZXI6CiAgICB0b2tlbjogImt1YmVjb25maWctdXNlci1remo5OWJubmdmOmd4Nm1kdDVmMjlzZjRsY3R2Zm44Mnp4c3NsOXhydzJtNjg1NDhnOWpsN3psbHR2Nm00dHB6ZiIKCgpjb250ZXh0czoKLSBuYW1lOiAibG9jYWwiCiAgY29udGV4dDoKICAgIHVzZXI6ICJsb2NhbCIKICAgIGNsdXN0ZXI6ICJsb2NhbCIKCmN1cnJlbnQtY29udGV4dDogImxvY2FsIgo="
vcluster:
image: rancher/k3s:v1.30.6-k3s1
sync:
ingresses:
enabled: "true"
init:
manifestsTemplate: |-
---
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: cert-manager
spec:
chart: cert-manager
createNamespace: true
version: v1.13.0
repo: https://charts.jetstack.io
targetNamespace: cert-manager
valuesContent: |
installCRDs: true
---
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: bootstrap-cluster
spec:
chart: cluster-api-operator
repo: https://kubernetes-sigs.github.io/cluster-api-operator
version: v0.14.0
valuesContent: |
cert-manager:
enabled: true
bootstrap: rke2
controlPlane: rke2
---
apiVersion: v1
kind: Namespace
metadata:
name: caphv-system
---
apiVersion: operator.cluster.x-k8s.io/v1alpha2
kind: InfrastructureProvider
metadata:
name: harvester
namespace: caphv-system
spec:
version: v0.1.4
fetchConfig:
url: https://github.com/rancher-sandbox/cluster-api-provider-harvester/releases/download/v0.1.4/components.yaml
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
labels:
ccm: external
cluster.x-k8s.io/cluster-name: rke2-mgmt
cni: external
csi: external
name: rke2-mgmt
namespace: default
spec:
controlPlaneRef:
apiVersion: controlplane.cluster.x-k8s.io/v1alpha1
kind: RKE2ControlPlane
name: rke2-mgmt-control-plane
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: HarvesterCluster
name: rke2-mgmt-hv
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: HarvesterCluster
metadata:
name: rke2-mgmt-hv
namespace: default
spec:
identitySecret:
name: hv-identity-secret
namespace: default
loadBalancerConfig:
ipamType: dhcp
listeners:
- backendPort: 9345
name: rke2-server
port: 9345
protocol: TCP
- backendPort: 443
name: rke2-ingress
port: 443
protocol: TCP
server: {{ .Values.harvester_vip }}
targetNamespace: default
---
apiVersion: v1
data:
kubeconfig: {{ .Values.harvester_kubeconfig_b64 }}
kind: Secret
metadata:
name: hv-identity-secret
namespace: default
---
apiVersion: controlplane.cluster.x-k8s.io/v1alpha1
kind: RKE2ControlPlane
metadata:
name: rke2-mgmt-control-plane
namespace: default
spec:
agentConfig:
version: v1.29.6+rke2r1
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: HarvesterMachineTemplate
name: rke2-mgmt-cp-machine
namespace: default
replicas: 3
serverConfig:
cni: canal
---
apiVersion: bootstrap.cluster.x-k8s.io/v1alpha1
kind: RKE2ConfigTemplate
metadata:
name: rke2-mgmt-worker
namespace: default
spec:
template:
spec:
agentConfig:
version: v1.29.6+rke2r1
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: MachineDeployment
metadata:
name: rke2-mgmt-workers
namespace: default
spec:
clusterName: rke2-mgmt
replicas: 0
selector:
matchLabels:
cluster.x-k8s.io/cluster-name: rke2-mgmt
template:
spec:
bootstrap:
configRef:
apiVersion: bootstrap.cluster.x-k8s.io/v1alpha1
kind: RKE2ConfigTemplate
name: rke2-mgmt-worker
namespace: default
clusterName: rke2-mgmt
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: HarvesterMachineTemplate
name: rke2-mgmt-wk-machine
namespace: default
version: v1.29.6+rke2r1
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: HarvesterMachineTemplate
metadata:
name: rke2-mgmt-wk-machine
namespace: default
spec:
template:
spec:
cpu: 2
memory: 16Gi
networks:
- {{ .Values.vm_network_name }}
sshKeyPair: default/{{ .Values.ssh_keypair }}
sshUser: {{ .Values.vm_default_user }}
volumes:
- bootOrder: 0
imageName: default/{{ .Values.vm_image_name }}
volumeSize: 40Gi
volumeType: image
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: HarvesterMachineTemplate
metadata:
name: rke2-mgmt-cp-machine
namespace: default
spec:
template:
spec:
cpu: 2
memory: 16Gi
networks:
- {{ .Values.vm_network_name }}
sshKeyPair: default/{{ .Values.ssh_keypair }}
sshUser: {{ .Values.vm_default_user }}
volumes:
- bootOrder: 0
imageName: default/{{ .Values.vm_image_name }}
volumeSize: 40Gi
volumeType: image
---
apiVersion: addons.cluster.x-k8s.io/v1beta1
kind: ClusterResourceSet
metadata:
labels:
cluster.x-k8s.io/cluster-name: rke2-mgmt
name: rke2-mgmt-rancher-crs-0
namespace: default
spec:
clusterSelector:
matchLabels:
cluster.x-k8s.io/cluster-name: rke2-mgmt
resources:
- kind: Secret
name: rancher-namespace
- kind: Secret
name: rancher-helmchart
- kind: Secret
name: certmanager-helmchart
strategy: Reconcile
---
apiVersion: v1
kind: Secret
metadata:
name: certmanager-helmchart
namespace: default
stringData:
data: "apiVersion: helm.cattle.io/v1\nkind: HelmChart\nmetadata:\n name: cert-manager\n
\ namespace: default \nspec:\n bootstrap: true\n targetNamespace: cert-manager\n
\ createNamespace: true\n valuesContent: |-\n securityContext:\n runAsNonRoot:
true\n crds:\n enabled: true\n version: v1.16.1\n repo: https://charts.jetstack.io\n
\ chart: cert-manager\n"
type: addons.cluster.x-k8s.io/resource-set
---
apiVersion: v1
kind: Secret
metadata:
name: rancher-helmchart
namespace: default
stringData:
data: "apiVersion: helm.cattle.io/v1\nkind: HelmChart\nmetadata:\n name: rancher\n
\ namespace: default \nspec:\n bootstrap: false\n targetNamespace: cattle-system\n
\ createNamespace: true\n set:\n hostname: {{ .Values.rancher_url }}\n
\ replicas: 3\n bootstrapPassword: admin\n valuesContent: |-\n global:\n
\ cattle:\n psp:\n enabled: false\n ingress:\n tls:\n
\ source: rancher\n repo: https://releases.rancher.com/server-charts/stable\n
\ chart: rancher\n version: v2.9.1\n"
type: addons.cluster.x-k8s.io/resource-set

View File

@@ -0,0 +1,90 @@
apiVersion: harvesterhci.io/v1beta1
kind: Addon
metadata:
labels:
addon.harvesterhci.io/experimental: 'true'
name: temp-vlcuster-fix
namespace: temp-vlcuster-fix
spec:
chart: vcluster
enabled: true
repo: https://charts.loft.sh
valuesContent: |-
serviceCIDR: 10.53.0.0/16
controlPlane:
distro:
k3s:
resources:
limits:
memory: 16096Mi
cpu: 8000m
enabled: true
imagePullPolicy: IfNotPresent
image:
tag: v1.33.4-k3s1
repository: rancher/k3s
sync:
toHost:
ingresses:
enabled: true
experimental:
deploy:
vcluster:
manifests: |-
apiVersion: v1
kind: Namespace
metadata:
name: cattle-system
---
apiVersion: v1
kind: Namespace
metadata:
name: cert-manager
labels:
certmanager.k8s.io/disable-validation: "true"
helm:
- chart:
name: cert-manager
repo: https://charts.jetstack.io
version: v1.8.0
release:
name: cert-manager
namespace: cert-manager
values: |-
installCRDs: true
- chart:
name: rancher
repo: https://releases.rancher.com/server-charts/latest
version: v2.12.0
release:
name: rancher
namespace: cattle-system
values: |-
hostname: rancher.product.lan
replicas: 1
bootstrapPassword: ce6XxaBTv9pHpGln
rancherImage: rancher/rancher
ingress:
tls:
source: rancher
global:
cattle:
psp:
enabled: "false"
extraEnv:
- name: CATTLE_AGENT_IMAGE
value: rancher/rancher-agent:v2.12.0
version: v0.28.0
status:
conditions:
- lastUpdateTime: '2025-10-24T13:24:37Z'
status: 'True'
type: Completed
- lastUpdateTime: '2025-10-24T13:24:37Z'
status: 'False'
type: InProgress
- lastUpdateTime: '2025-10-24T13:23:08Z'
status: 'False'
type: OperationFailed
status: AddonDeploySuccessful

View File

@@ -0,0 +1,294 @@
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: rancher-embedded
spec:
chart: vcluster
version: 0.30.1
repo: https://charts.loft.sh
valuesContent: |
# vm_network_name: ${VM_NETWORK}
# ssh_keypair: ${VM_SSH_KEYPAIR}
# vm_image_name: ${VM_IMAGE_NAME}
# vm_default_user: ${VM_DEFAULT_USER}
# harvester_vip: ${HARVESTER_VIP}
# rancher_url: ${RANCHER_URL}
# harvester_kubeconfig_b64: ${HARVESTER_KUBECONFIG_B64}
#external:
controlPlane:
distro:
k3s:
enabled: true
image:
tag: v1.33.5-k3s1
statefulSet:
scheduling:
podManagementPolicy: OrderedReady
sync:
fromHost:
ingressClasses:
enabled: true
toHost:
ingresses:
enabled: true
experimental:
deploy:
vcluster:
#vm_network_name: "k8s-network"
#ssh_keypair: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPyW9YbYPE3efCdHMBgnP8AeVfs5Lw8MBCLhXuteliil"
#vm_image_name: "ubuntu-22.04"
#vm_default_user: "ubuntu"
#harvester_vip: "172.27.27.40"
#rancher_url: "rancher-mgmt.product.lan"
#harvester_kubeconfig_b64: "YXBpVmVyc2lvbjogdjEKa2luZDogQ29uZmlnCmNsdXN0ZXJzOgotIG5hbWU6ICJsb2NhbCIKICBjbHVzdGVyOgogICAgc2VydmVyOiAiaHR0cHM6Ly8xNzIuMjcuMjcuMTkwL2s4cy9jbHVzdGVycy9sb2NhbCIKICAgIGNlcnRpZmljYXRlLWF1dGhvcml0eS1kYXRhOiAiTFMwdExTMUNSVWRKVGlCRFJWSlVTVVpKUTBGVVJTMHRMUzB0Q2sxSlNVSjJWRU5EUVwKICAgICAgVmRQWjBGM1NVSkJaMGxDUVVSQlMwSm5aM0ZvYTJwUFVGRlJSRUZxUWtkTlVuZDNSMmRaUkZaUlVVdEZlRTVyWlZjMWFHSlhiR29LWVwKICAgICAga2RzZW1SSFZuVmFXRWwwWWpOS2JrMVRXWGRLUVZsRVZsRlJSRVJDTVd0bFZ6Vm9ZbGRzYW1KSGJIcGtSMVoxV2xoSmRGa3lSa0ZOVlwKICAgICAgR014VDFSVmR3cFBSR2MxVFZSQlpVWjNNSGxPVkVWM1RVUk5lRTVxU1RSTlZFWmhSbmN3ZWs1VVJYZE5SRVY0VG1wSk5FMVVSbUZOUlwKICAgICAgVmw0U0VSQllVSm5UbFpDUVc5VUNrVXlValZpYlVaMFlWZE9jMkZZVGpCYVZ6VnNZMmt4ZG1OdFkzaEtha0ZyUW1kT1ZrSkJUVTFJVlwKICAgICAgMUkxWW0xR2RHRlhUbk5oV0U0d1dsYzFiR05wTVdvS1dWVkJlRTU2VlRWT1ZFRTBUMFJyZUUxR2EzZEZkMWxJUzI5YVNYcHFNRU5CVVwKICAgICAgVmxKUzI5YVNYcHFNRVJCVVdORVVXZEJSVUZWVlU0eFdtUmxURlY2UmdwTWFtSk1Wbk5TT1ZNMGJTdFRTWE5XWlVOa1JVcHVNVGhRYVwKICAgICAgWHBUYm1jMk5rNXhMMWhHVkZaT2RGRnFMMEl3T1hCR01GTXdUVFpMZDJSbmFHUldWM1Y1Q25vMWJFTmlSVzlVVkRaT1EwMUZRWGRFWlwKICAgICAgMWxFVmxJd1VFRlJTQzlDUVZGRVFXZExhMDFCT0VkQk1WVmtSWGRGUWk5M1VVWk5RVTFDUVdZNGQwaFJXVVFLVmxJd1QwSkNXVVZHU1wKICAgICAgRXd2Um5Ga05GRXJaamhpTlhkTFJtSjJUSEpwVTJrMWRtVnpUVUZ2UjBORGNVZFRUVFE1UWtGTlEwRXdaMEZOUlZWRFNVUk5XZ3BVUlwKICAgICAgWFl6VmpjM04zRjZja2RCTDBjNVdVUmxjMlUwVkdaNllWRlhiVmh3UWxWTE9FRm5XWFZJUVdsRlFXeHZaVEpNTVM5RU9VZE1VRGRXU1wKICAgICAgMU13TWxObUNsUnRRbHBxT1d4WVNVeFBSWEJJZDBkR05tSk1WR3hqUFFvdExTMHRMVVZPUkNCRFJWSlVTVVpKUTBGVVJTMHRMUzB0IgoKdXNlcnM6Ci0gbmFtZTogImxvY2FsIgogIHVzZXI6CiAgICB0b2tlbjogImt1YmVjb25maWctdXNlci1remo5OWJubmdmOmd4Nm1kdDVmMjlzZjRsY3R2Zm44Mnp4c3NsOXhydzJtNjg1NDhnOWpsN3psbHR2Nm00dHB6ZiIKCgpjb250ZXh0czoKLSBuYW1lOiAibG9jYWwiCiAgY29udGV4dDoKICAgIHVzZXI6ICJsb2NhbCIKICAgIGNsdXN0ZXI6ICJsb2NhbCIKCmN1cnJlbnQtY29udGV4dDogImxvY2FsIgo="
manifestsTemplate: |-
---
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: cert-manager
spec:
chart: cert-manager
createNamespace: true
version: v1.13.0
repo: https://charts.jetstack.io
targetNamespace: cert-manager
valuesContent: |
installCRDs: true
---
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: bootstrap-cluster
spec:
chart: cluster-api-operator
repo: https://kubernetes-sigs.github.io/cluster-api-operator
version: v0.14.0
valuesContent: |
cert-manager:
enabled: true
bootstrap: rke2
controlPlane: rke2
---
apiVersion: v1
kind: Namespace
metadata:
name: caphv-system
---
apiVersion: operator.cluster.x-k8s.io/v1alpha2
kind: InfrastructureProvider
metadata:
name: harvester
namespace: caphv-system
spec:
version: v0.1.4
fetchConfig:
url: https://github.com/rancher-sandbox/cluster-api-provider-harvester/releases/download/v0.1.4/components.yaml
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
labels:
ccm: external
cluster.x-k8s.io/cluster-name: rke2-mgmt
cni: external
csi: external
name: rke2-mgmt
namespace: default
spec:
controlPlaneRef:
apiVersion: controlplane.cluster.x-k8s.io/v1alpha1
kind: RKE2ControlPlane
name: rke2-mgmt-control-plane
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: HarvesterCluster
name: rke2-mgmt-hv
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: HarvesterCluster
metadata:
name: rke2-mgmt-hv
namespace: default
spec:
identitySecret:
name: hv-identity-secret
namespace: default
loadBalancerConfig:
ipamType: dhcp
listeners:
- backendPort: 9345
name: rke2-server
port: 9345
protocol: TCP
- backendPort: 443
name: rke2-ingress
port: 443
protocol: TCP
#server: {{ .Values.experimental.deploy.vcluster.harvester_vip }}
server: 172.27.27.40
targetNamespace: default
---
apiVersion: v1
data:
#kubeconfig: {{ .Values.experimental.deploy.vcluster.harvester_kubeconfig_b64 }}
kubeconfig: "YXBpVmVyc2lvbjogdjEKa2luZDogQ29uZmlnCmNsdXN0ZXJzOgotIG5hbWU6ICJsb2NhbCIKICBjbHVzdGVyOgogICAgc2VydmVyOiAiaHR0cHM6Ly8xNzIuMjcuMjcuMTkwL2s4cy9jbHVzdGVycy9sb2NhbCIKICAgIGNlcnRpZmljYXRlLWF1dGhvcml0eS1kYXRhOiAiTFMwdExTMUNSVWRKVGlCRFJWSlVTVVpKUTBGVVJTMHRMUzB0Q2sxSlNVSjJWRU5EUVwKICAgICAgVmRQWjBGM1NVSkJaMGxDUVVSQlMwSm5aM0ZvYTJwUFVGRlJSRUZxUWtkTlVuZDNSMmRaUkZaUlVVdEZlRTVyWlZjMWFHSlhiR29LWVwKICAgICAga2RzZW1SSFZuVmFXRWwwWWpOS2JrMVRXWGRLUVZsRVZsRlJSRVJDTVd0bFZ6Vm9ZbGRzYW1KSGJIcGtSMVoxV2xoSmRGa3lSa0ZOVlwKICAgICAgR014VDFSVmR3cFBSR2MxVFZSQlpVWjNNSGxPVkVWM1RVUk5lRTVxU1RSTlZFWmhSbmN3ZWs1VVJYZE5SRVY0VG1wSk5FMVVSbUZOUlwKICAgICAgVmw0U0VSQllVSm5UbFpDUVc5VUNrVXlValZpYlVaMFlWZE9jMkZZVGpCYVZ6VnNZMmt4ZG1OdFkzaEtha0ZyUW1kT1ZrSkJUVTFJVlwKICAgICAgMUkxWW0xR2RHRlhUbk5oV0U0d1dsYzFiR05wTVdvS1dWVkJlRTU2VlRWT1ZFRTBUMFJyZUUxR2EzZEZkMWxJUzI5YVNYcHFNRU5CVVwKICAgICAgVmxKUzI5YVNYcHFNRVJCVVdORVVXZEJSVUZWVlU0eFdtUmxURlY2UmdwTWFtSk1Wbk5TT1ZNMGJTdFRTWE5XWlVOa1JVcHVNVGhRYVwKICAgICAgWHBUYm1jMk5rNXhMMWhHVkZaT2RGRnFMMEl3T1hCR01GTXdUVFpMZDJSbmFHUldWM1Y1Q25vMWJFTmlSVzlVVkRaT1EwMUZRWGRFWlwKICAgICAgMWxFVmxJd1VFRlJTQzlDUVZGRVFXZExhMDFCT0VkQk1WVmtSWGRGUWk5M1VVWk5RVTFDUVdZNGQwaFJXVVFLVmxJd1QwSkNXVVZHU1wKICAgICAgRXd2Um5Ga05GRXJaamhpTlhkTFJtSjJUSEpwVTJrMWRtVnpUVUZ2UjBORGNVZFRUVFE1UWtGTlEwRXdaMEZOUlZWRFNVUk5XZ3BVUlwKICAgICAgWFl6VmpjM04zRjZja2RCTDBjNVdVUmxjMlUwVkdaNllWRlhiVmh3UWxWTE9FRm5XWFZJUVdsRlFXeHZaVEpNTVM5RU9VZE1VRGRXU1wKICAgICAgMU13TWxObUNsUnRRbHBxT1d4WVNVeFBSWEJJZDBkR05tSk1WR3hqUFFvdExTMHRMVVZPUkNCRFJWSlVTVVpKUTBGVVJTMHRMUzB0IgoKdXNlcnM6Ci0gbmFtZTogImxvY2FsIgogIHVzZXI6CiAgICB0b2tlbjogImt1YmVjb25maWctdXNlci1remo5OWJubmdmOmd4Nm1kdDVmMjlzZjRsY3R2Zm44Mnp4c3NsOXhydzJtNjg1NDhnOWpsN3psbHR2Nm00dHB6ZiIKCgpjb250ZXh0czoKLSBuYW1lOiAibG9jYWwiCiAgY29udGV4dDoKICAgIHVzZXI6ICJsb2NhbCIKICAgIGNsdXN0ZXI6ICJsb2NhbCIKCmN1cnJlbnQtY29udGV4dDogImxvY2FsIgo="
kind: Secret
metadata:
name: hv-identity-secret
namespace: default
---
apiVersion: controlplane.cluster.x-k8s.io/v1alpha1
kind: RKE2ControlPlane
metadata:
name: rke2-mgmt-control-plane
namespace: default
spec:
agentConfig:
version: v1.33.5+rke2r1
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: HarvesterMachineTemplate
name: rke2-mgmt-cp-machine
namespace: default
replicas: 3
serverConfig:
cni: canal
---
apiVersion: bootstrap.cluster.x-k8s.io/v1alpha1
kind: RKE2ConfigTemplate
metadata:
name: rke2-mgmt-worker
namespace: default
spec:
template:
spec:
agentConfig:
version: v1.33.5+rke2r1
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: MachineDeployment
metadata:
name: rke2-mgmt-workers
namespace: default
spec:
clusterName: rke2-mgmt
replicas: 0
selector:
matchLabels:
cluster.x-k8s.io/cluster-name: rke2-mgmt
template:
spec:
bootstrap:
configRef:
apiVersion: bootstrap.cluster.x-k8s.io/v1alpha1
kind: RKE2ConfigTemplate
name: rke2-mgmt-worker
namespace: default
clusterName: rke2-mgmt
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: HarvesterMachineTemplate
name: rke2-mgmt-wk-machine
namespace: default
version: v1.29.6+rke2r1
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: HarvesterMachineTemplate
metadata:
name: rke2-mgmt-wk-machine
namespace: default
spec:
template:
spec:
cpu: 2
memory: 16Gi
networks:
#- {{ .Values.experimental.deploy.vcluster.vm_network_name }}
- k8s-network
#sshKeyPair: default/{{ .Values.experimental.deploy.vcluster.ssh_keypair }}
sshKeyPair: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPyW9YbYPE3efCdHMBgnP8AeVfs5Lw8MBCLhXuteliil"
#sshUser: {{ .Values.experimental.deploy.vcluster.vm_default_user }}
sshUser: ubuntu
volumes:
- bootOrder: 0
imageName: default/{{ .Values.experimental.deploy.vcluster.vm_image_name }}
volumeSize: 40Gi
volumeType: image
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: HarvesterMachineTemplate
metadata:
name: rke2-mgmt-cp-machine
namespace: default
spec:
template:
spec:
cpu: 2
memory: 16Gi
networks:
#- {{ .Values.experimental.deploy.vcluster.vm_network_name }}
- k8s-network
#sshKeyPair: default/{{ .Values.experimental.deploy.vcluster.ssh_keypair }}
sshKeyPair: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPyW9YbYPE3efCdHMBgnP8AeVfs5Lw8MBCLhXuteliil"
#sshUser: {{ .Values.experimental.deploy.vcluster.vm_default_user }}
sshUser: ubuntu
volumes:
- bootOrder: 0
imageName: default/{{ .Values.experimental.deploy.vcluster.vm_image_name }}
volumeSize: 40Gi
volumeType: image
---
apiVersion: addons.cluster.x-k8s.io/v1beta1
kind: ClusterResourceSet
metadata:
labels:
cluster.x-k8s.io/cluster-name: rke2-mgmt
name: rke2-mgmt-rancher-crs-0
namespace: default
spec:
clusterSelector:
matchLabels:
cluster.x-k8s.io/cluster-name: rke2-mgmt
resources:
- kind: Secret
name: rancher-namespace
- kind: Secret
name: rancher-helmchart
- kind: Secret
name: certmanager-helmchart
strategy: Reconcile
---
apiVersion: v1
kind: Secret
metadata:
name: certmanager-helmchart
namespace: default
stringData:
data: "apiVersion: helm.cattle.io/v1\nkind: HelmChart\nmetadata:\n name: cert-manager\n
\ namespace: default \nspec:\n bootstrap: true\n targetNamespace: cert-manager\n
\ createNamespace: true\n valuesContent: |-\n securityContext:\n runAsNonRoot:
true\n crds:\n enabled: true\n version: v1.16.1\n repo: https://charts.jetstack.io\n
\ chart: cert-manager\n"
type: addons.cluster.x-k8s.io/resource-set
---
apiVersion: v1
kind: Secret
metadata:
name: rancher-helmchart
namespace: default
stringData:
data: "apiVersion: helm.cattle.io/v1\nkind: HelmChart\nmetadata:\n name: rancher\n
\ namespace: default \nspec:\n bootstrap: false\n targetNamespace: cattle-system\n
\ createNamespace: true\n set:\n #hostname: {{ .Values.experimental.deploy.vcluster.rancher_url }}\n
\ hostname: rancher-mgmt.product.lan\n
\ replicas: 3\n bootstrapPassword: admin\n valuesContent: |-\n global:\n
\ cattle:\n psp:\n enabled: false\n ingress:\n tls:\n
\ source: rancher\n repo: https://releases.rancher.com/server-charts/latest\n
\ chart: rancher\n version: v2.12.3\n"
type: addons.cluster.x-k8s.io/resource-set

View File

@@ -0,0 +1,255 @@
#vm_network_name: "k8s-network"
#ssh_keypair: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPyW9YbYPE3efCdHMBgnP8AeVfs5Lw8MBCLhXuteliil"
#vm_image_name: "ubuntu-22.04"
#vm_default_user: "ubuntu"
#harvester_vip: "172.27.27.40"
#rancher_url: "rancher-mgmt.product.lan"
#harvester_kubeconfig_b64: "YXBpVmVyc2lvbjogdjEKa2luZDogQ29uZmlnCmNsdXN0ZXJzOgotIG5hbWU6ICJsb2NhbCIKICBjbHVzdGVyOgogICAgc2VydmVyOiAiaHR0cHM6Ly8xNzIuMjcuMjcuMTkwL2s4cy9jbHVzdGVycy9sb2NhbCIKICAgIGNlcnRpZmljYXRlLWF1dGhvcml0eS1kYXRhOiAiTFMwdExTMUNSVWRKVGlCRFJWSlVTVVpKUTBGVVJTMHRMUzB0Q2sxSlNVSjJWRU5EUVwKICAgICAgVmRQWjBGM1NVSkJaMGxDUVVSQlMwSm5aM0ZvYTJwUFVGRlJSRUZxUWtkTlVuZDNSMmRaUkZaUlVVdEZlRTVyWlZjMWFHSlhiR29LWVwKICAgICAga2RzZW1SSFZuVmFXRWwwWWpOS2JrMVRXWGRLUVZsRVZsRlJSRVJDTVd0bFZ6Vm9ZbGRzYW1KSGJIcGtSMVoxV2xoSmRGa3lSa0ZOVlwKICAgICAgR014VDFSVmR3cFBSR2MxVFZSQlpVWjNNSGxPVkVWM1RVUk5lRTVxU1RSTlZFWmhSbmN3ZWs1VVJYZE5SRVY0VG1wSk5FMVVSbUZOUlwKICAgICAgVmw0U0VSQllVSm5UbFpDUVc5VUNrVXlValZpYlVaMFlWZE9jMkZZVGpCYVZ6VnNZMmt4ZG1OdFkzaEtha0ZyUW1kT1ZrSkJUVTFJVlwKICAgICAgMUkxWW0xR2RHRlhUbk5oV0U0d1dsYzFiR05wTVdvS1dWVkJlRTU2VlRWT1ZFRTBUMFJyZUUxR2EzZEZkMWxJUzI5YVNYcHFNRU5CVVwKICAgICAgVmxKUzI5YVNYcHFNRVJCVVdORVVXZEJSVUZWVlU0eFdtUmxURlY2UmdwTWFtSk1Wbk5TT1ZNMGJTdFRTWE5XWlVOa1JVcHVNVGhRYVwKICAgICAgWHBUYm1jMk5rNXhMMWhHVkZaT2RGRnFMMEl3T1hCR01GTXdUVFpMZDJSbmFHUldWM1Y1Q25vMWJFTmlSVzlVVkRaT1EwMUZRWGRFWlwKICAgICAgMWxFVmxJd1VFRlJTQzlDUVZGRVFXZExhMDFCT0VkQk1WVmtSWGRGUWk5M1VVWk5RVTFDUVdZNGQwaFJXVVFLVmxJd1QwSkNXVVZHU1wKICAgICAgRXd2Um5Ga05GRXJaamhpTlhkTFJtSjJUSEpwVTJrMWRtVnpUVUZ2UjBORGNVZFRUVFE1UWtGTlEwRXdaMEZOUlZWRFNVUk5XZ3BVUlwKICAgICAgWFl6VmpjM04zRjZja2RCTDBjNVdVUmxjMlUwVkdaNllWRlhiVmh3UWxWTE9FRm5XWFZJUVdsRlFXeHZaVEpNTVM5RU9VZE1VRGRXU1wKICAgICAgMU13TWxObUNsUnRRbHBxT1d4WVNVeFBSWEJJZDBkR05tSk1WR3hqUFFvdExTMHRMVVZPUkNCRFJWSlVTVVpKUTBGVVJTMHRMUzB0IgoKdXNlcnM6Ci0gbmFtZTogImxvY2FsIgogIHVzZXI6CiAgICB0b2tlbjogImt1YmVjb25maWctdXNlci1remo5OWJubmdmOmd4Nm1kdDVmMjlzZjRsY3R2Zm44Mnp4c3NsOXhydzJtNjg1NDhnOWpsN3psbHR2Nm00dHB6ZiIKCgpjb250ZXh0czoKLSBuYW1lOiAibG9jYWwiCiAgY29udGV4dDoKICAgIHVzZXI6ICJsb2NhbCIKICAgIGNsdXN0ZXI6ICJsb2NhbCIKCmN1cnJlbnQtY29udGV4dDogImxvY2FsIgo="
vcluster:
image: rancher/k3s:v1.33.5-k3s1
sync:
ingresses:
enabled: true
init:
manifestsTemplate: |-
---
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: cert-manager
spec:
chart: cert-manager
createNamespace: true
version: v1.13.0
repo: https://charts.jetstack.io
targetNamespace: cert-manager
valuesContent: |
installCRDs: true
---
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: bootstrap-cluster
spec:
chart: cluster-api-operator
repo: https://kubernetes-sigs.github.io/cluster-api-operator
version: v0.14.0
valuesContent: |
cert-manager:
enabled: true
bootstrap: rke2
controlPlane: rke2
---
apiVersion: v1
kind: Namespace
metadata:
name: caphv-system
---
apiVersion: operator.cluster.x-k8s.io/v1alpha2
kind: InfrastructureProvider
metadata:
name: harvester
namespace: caphv-system
spec:
version: v0.1.4
fetchConfig:
url: https://github.com/rancher-sandbox/cluster-api-provider-harvester/releases/download/v0.1.4/components.yaml
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
labels:
ccm: external
cluster.x-k8s.io/cluster-name: rke2-mgmt
cni: external
csi: external
name: rke2-mgmt
namespace: default
spec:
controlPlaneRef:
apiVersion: controlplane.cluster.x-k8s.io/v1alpha1
kind: RKE2ControlPlane
name: rke2-mgmt-control-plane
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: HarvesterCluster
name: rke2-mgmt-hv
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: HarvesterCluster
metadata:
name: rke2-mgmt-hv
namespace: default
spec:
identitySecret:
name: hv-identity-secret
namespace: default
loadBalancerConfig:
ipamType: dhcp
listeners:
- backendPort: 9345
name: rke2-server
port: 9345
protocol: TCP
- backendPort: 443
name: rke2-ingress
port: 443
protocol: TCP
server: {{ .Values.harvester_vip }}
targetNamespace: default
---
apiVersion: v1
data:
kubeconfig: {{ .Values.harvester_kubeconfig_b64 }}
kind: Secret
metadata:
name: hv-identity-secret
namespace: default
---
apiVersion: controlplane.cluster.x-k8s.io/v1alpha1
kind: RKE2ControlPlane
metadata:
name: rke2-mgmt-control-plane
namespace: default
spec:
agentConfig:
version: v1.33.5+rke2r1
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: HarvesterMachineTemplate
name: rke2-mgmt-cp-machine
namespace: default
replicas: 3
serverConfig:
cni: canal
---
apiVersion: bootstrap.cluster.x-k8s.io/v1alpha1
kind: RKE2ConfigTemplate
metadata:
name: rke2-mgmt-worker
namespace: default
spec:
template:
spec:
agentConfig:
version: v1.33.5+rke2r1
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: MachineDeployment
metadata:
name: rke2-mgmt-workers
namespace: default
spec:
clusterName: rke2-mgmt
replicas: 0
selector:
matchLabels:
cluster.x-k8s.io/cluster-name: rke2-mgmt
template:
spec:
bootstrap:
configRef:
apiVersion: bootstrap.cluster.x-k8s.io/v1alpha1
kind: RKE2ConfigTemplate
name: rke2-mgmt-worker
namespace: default
clusterName: rke2-mgmt
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: HarvesterMachineTemplate
name: rke2-mgmt-wk-machine
namespace: default
version: v1.29.6+rke2r1
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: HarvesterMachineTemplate
metadata:
name: rke2-mgmt-wk-machine
namespace: default
spec:
template:
spec:
cpu: 2
memory: 16Gi
networks:
- {{ .Values.vm_network_name }}
sshKeyPair: default/{{ .Values.ssh_keypair }}
sshUser: {{ .Values.vm_default_user }}
volumes:
- bootOrder: 0
imageName: default/{{ .Values.vm_image_name }}
volumeSize: 40Gi
volumeType: image
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: HarvesterMachineTemplate
metadata:
name: rke2-mgmt-cp-machine
namespace: default
spec:
template:
spec:
cpu: 2
memory: 16Gi
networks:
- {{ .Values.vm_network_name }}
sshKeyPair: default/{{ .Values.ssh_keypair }}
sshUser: {{ .Values.vm_default_user }}
volumes:
- bootOrder: 0
imageName: default/{{ .Values.vm_image_name }}
volumeSize: 40Gi
volumeType: image
---
apiVersion: addons.cluster.x-k8s.io/v1beta1
kind: ClusterResourceSet
metadata:
labels:
cluster.x-k8s.io/cluster-name: rke2-mgmt
name: rke2-mgmt-rancher-crs-0
namespace: default
spec:
clusterSelector:
matchLabels:
cluster.x-k8s.io/cluster-name: rke2-mgmt
resources:
- kind: Secret
name: rancher-namespace
- kind: Secret
name: rancher-helmchart
- kind: Secret
name: certmanager-helmchart
strategy: Reconcile
---
apiVersion: v1
kind: Secret
metadata:
name: certmanager-helmchart
namespace: default
stringData:
data: "apiVersion: helm.cattle.io/v1\nkind: HelmChart\nmetadata:\n name: cert-manager\n
\ namespace: default \nspec:\n bootstrap: true\n targetNamespace: cert-manager\n
\ createNamespace: true\n valuesContent: |-\n securityContext:\n runAsNonRoot:
true\n crds:\n enabled: true\n version: v1.16.1\n repo: https://charts.jetstack.io\n
\ chart: cert-manager\n"
type: addons.cluster.x-k8s.io/resource-set
---
apiVersion: v1
kind: Secret
metadata:
name: rancher-helmchart
namespace: default
stringData:
data: "apiVersion: helm.cattle.io/v1\nkind: HelmChart\nmetadata:\n name: rancher\n
\ namespace: default \nspec:\n bootstrap: false\n targetNamespace: cattle-system\n
\ createNamespace: true\n set:\n hostname: {{ .Values.rancher_url }}\n
\ replicas: 3\n bootstrapPassword: admin\n valuesContent: |-\n global:\n
\ cattle:\n psp:\n enabled: false\n ingress:\n tls:\n
\ source: rancher\n repo: https://releases.rancher.com/server-charts/latest\n
\ chart: rancher\n version: v2.12.3\n"
type: addons.cluster.x-k8s.io/resource-set

View File

@@ -0,0 +1,256 @@
controlPlane:
distro:
k3s:
enabled: true
image:
tag: v1.33.5-k3s1
statefulSet:
scheduling:
podManagementPolicy: OrderedReady
experimental:
deploy:
vcluster:
manifestsTemplate: |-
---
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: cert-manager
spec:
chart: cert-manager
createNamespace: true
version: v1.13.0
repo: https://charts.jetstack.io
targetNamespace: cert-manager
valuesContent: |
installCRDs: true
---
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: bootstrap-cluster
spec:
chart: cluster-api-operator
repo: https://kubernetes-sigs.github.io/cluster-api-operator
version: v0.14.0
valuesContent: |
cert-manager:
enabled: true
bootstrap: rke2
controlPlane: rke2
---
apiVersion: v1
kind: Namespace
metadata:
name: caphv-system
---
apiVersion: operator.cluster.x-k8s.io/v1alpha2
kind: InfrastructureProvider
metadata:
name: harvester
namespace: caphv-system
spec:
version: v0.1.4
fetchConfig:
url: https://github.com/rancher-sandbox/cluster-api-provider-harvester/releases/download/v0.1.4/components.yaml
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
labels:
ccm: external
cluster.x-k8s.io/cluster-name: rke2-mgmt
cni: external
csi: external
name: rke2-mgmt
namespace: default
spec:
controlPlaneRef:
apiVersion: controlplane.cluster.x-k8s.io/v1alpha1
kind: RKE2ControlPlane
name: rke2-mgmt-control-plane
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: HarvesterCluster
name: rke2-mgmt-hv
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: HarvesterCluster
metadata:
name: rke2-mgmt-hv
namespace: default
spec:
identitySecret:
name: hv-identity-secret
namespace: default
loadBalancerConfig:
ipamType: dhcp
listeners:
- backendPort: 9345
name: rke2-server
port: 9345
protocol: TCP
- backendPort: 443
name: rke2-ingress
port: 443
protocol: TCP
server: {{ .Values.harvester_vip }}
targetNamespace: default
---
apiVersion: v1
data:
kubeconfig: {{ .Values.harvester_kubeconfig_b64 }}
kind: Secret
metadata:
name: hv-identity-secret
namespace: default
---
apiVersion: controlplane.cluster.x-k8s.io/v1alpha1
kind: RKE2ControlPlane
metadata:
name: rke2-mgmt-control-plane
namespace: default
spec:
agentConfig:
version: v1.33.5+rke2r1
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: HarvesterMachineTemplate
name: rke2-mgmt-cp-machine
namespace: default
replicas: 3
serverConfig:
cni: canal
---
apiVersion: bootstrap.cluster.x-k8s.io/v1alpha1
kind: RKE2ConfigTemplate
metadata:
name: rke2-mgmt-worker
namespace: default
spec:
template:
spec:
agentConfig:
version: v1.33.5+rke2r1
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: MachineDeployment
metadata:
name: rke2-mgmt-workers
namespace: default
spec:
clusterName: rke2-mgmt
replicas: 0
selector:
matchLabels:
cluster.x-k8s.io/cluster-name: rke2-mgmt
template:
spec:
bootstrap:
configRef:
apiVersion: bootstrap.cluster.x-k8s.io/v1alpha1
kind: RKE2ConfigTemplate
name: rke2-mgmt-worker
namespace: default
clusterName: rke2-mgmt
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: HarvesterMachineTemplate
name: rke2-mgmt-wk-machine
namespace: default
version: v1.29.6+rke2r1
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: HarvesterMachineTemplate
metadata:
name: rke2-mgmt-wk-machine
namespace: default
spec:
template:
spec:
cpu: 2
memory: 16Gi
networks:
- {{ .Values.vm_network_name }}
sshKeyPair: default/{{ .Values.ssh_keypair }}
sshUser: {{ .Values.vm_default_user }}
volumes:
- bootOrder: 0
imageName: default/{{ .Values.vm_image_name }}
volumeSize: 40Gi
volumeType: image
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: HarvesterMachineTemplate
metadata:
name: rke2-mgmt-cp-machine
namespace: default
spec:
template:
spec:
cpu: 2
memory: 16Gi
networks:
- {{ .Values.vm_network_name }}
sshKeyPair: default/{{ .Values.ssh_keypair }}
sshUser: {{ .Values.vm_default_user }}
volumes:
- bootOrder: 0
imageName: default/{{ .Values.vm_image_name }}
volumeSize: 40Gi
volumeType: image
---
apiVersion: addons.cluster.x-k8s.io/v1beta1
kind: ClusterResourceSet
metadata:
labels:
cluster.x-k8s.io/cluster-name: rke2-mgmt
name: rke2-mgmt-rancher-crs-0
namespace: default
spec:
clusterSelector:
matchLabels:
cluster.x-k8s.io/cluster-name: rke2-mgmt
resources:
- kind: Secret
name: rancher-namespace
- kind: Secret
name: rancher-helmchart
- kind: Secret
name: certmanager-helmchart
strategy: Reconcile
---
apiVersion: v1
kind: Secret
metadata:
name: certmanager-helmchart
namespace: default
stringData:
data: "apiVersion: helm.cattle.io/v1\nkind: HelmChart\nmetadata:\n name: cert-manager\n
\ namespace: default \nspec:\n bootstrap: true\n targetNamespace: cert-manager\n
\ createNamespace: true\n valuesContent: |-\n securityContext:\n runAsNonRoot:
true\n crds:\n enabled: true\n version: v1.16.1\n repo: https://charts.jetstack.io\n
\ chart: cert-manager\n"
type: addons.cluster.x-k8s.io/resource-set
---
apiVersion: v1
kind: Secret
metadata:
name: rancher-helmchart
namespace: default
stringData:
data: "apiVersion: helm.cattle.io/v1\nkind: HelmChart\nmetadata:\n name: rancher\n
\ namespace: default \nspec:\n bootstrap: false\n targetNamespace: cattle-system\n
\ createNamespace: true\n set:\n hostname: {{ .Values.rancher_url }}\n
\ replicas: 3\n bootstrapPassword: admin\n valuesContent: |-\n global:\n
\ cattle:\n psp:\n enabled: false\n ingress:\n tls:\n
\ source: rancher\n repo: https://releases.rancher.com/server-charts/latest\n
\ chart: rancher\n version: v2.12.3\n"
type: addons.cluster.x-k8s.io/resource-set
sync:
fromHost:
ingressClasses:
enabled: true
toHost:
ingresses:
enabled: true

View File

@@ -0,0 +1,475 @@
apiVersion: fleet.cattle.io/v1alpha1
kind: Bundle
metadata:
name: mgmt-cluster
namespace: fleet-local
spec:
helm:
chart: ./rke2
# releaseName: rke2-mgmt
values:
cluster_name: rke2-mgmt
control_plane:
cpu_count: 8
files:
- content: |
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: cert-manager
namespace: default
spec:
bootstrap: true
targetNamespace: cert-manager
createNamespace: true
valuesContent: |-
securityContext:
runAsNonRoot: true
crds:
enabled: true
version: v1.16.1
repo: https://charts.jetstack.io
chart: cert-manager
owner: root
path: /var/lib/rancher/rke2/server/manifests/certmanager.yaml
- content: |
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: rancher
namespace: default
spec:
bootstrap: false
targetNamespace: cattle-system
createNamespace: true
set:
hostname: rancher-mgmt.product.lan
replicas: 3
bootstrapPassword: admin
valuesContent: |-
global:
cattle:
psp:
enabled: false
ingress:
tls:
source: rancher
repo: https://releases.rancher.com/server-charts/stable
chart: rancher
version: v2.12.3
owner: root
path: /var/lib/rancher/rke2/server/manifests/rancher.yaml
ipam: dhcp
loadbalancer_gateway: 172.27.27.1
loadbalancer_subnet: 172.27.27.0/24
memory_gb: 16
network:
- |
network:
version: 2
renderer: networkd
ethernets:
enp1s0:
dhcp4: yes
- |
network:
version: 2
renderer: networkd
ethernets:
enp1s0:
dhcp4: yes
- |
network:
version: 2
renderer: networkd
ethernets:
enp1s0:
dhcp4: yes
node_count: 3
vip: 172.27.27.40
network_name: k8s-network
registry_config:
configs:
rgcrprod.azurecr.us:
auth:
password: test
username: test
rke2_version: v1.33.4+rke2r1
ssh_pub_key: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPyW9YbYPE3efCdHMBgnP8AeVfs5Lw8MBCLhXuteliil
system_default_registry: ""
vm:
airgapped_image: false
image: ubuntu-22.04
qemu_agent_enable: true
qemu_agent_install: true
worker:
node_count: 0
storage:
class: longhorn-image-99dd5 # StorageClass for image ubuntu-22.04
valuesFiles:
- values.yaml
resources:
- content: |2-
helm:
chart: ./rke2
releaseName: rke2-mgmt
valuesFiles:
- values.yaml
name: fleet.yaml
- content: |-
apiVersion: v2
name: rke2-cluster
description: RKE2 cluster designed for usage directly on Harvester
type: application
version: 0.1.1
appVersion: 0.1.1
name: rke2/Chart.yaml
- content: "{{- range $i := until (.Values.control_plane.node_count | int) }}\n---\napiVersion:
v1\nkind: Secret\nmetadata:\n name: {{ $.Values.cluster_name }}-cp-{{ $i }}-cloudinit\n
\ namespace: {{ $.Values.cluster_namespace }}\nstringData:\n userdata: |\n
\ #cloud-config\n {{- if $.Values.vm.qemu_agent_install }}\n package_update:
true\n packages:\n - qemu-guest-agent\n {{- end }}\n write_files:
\n {{- if $.Values.control_plane.files }}\n{{ $.Values.control_plane.files
| toYaml | indent 4 }}\n {{- end }}\n - path: /etc/rancher/rke2/config.yaml\n
\ owner: root\n content: |\n token: {{ $.Values.shared_token
}}\n {{- if ne $i 0 }}\n server: https://{{ $.Values.control_plane.vip
}}:9345\n {{- end }}\n system-default-registry: {{ $.Values.system_default_registry
}}\n tls-san:\n - {{ $.Values.cluster_name }}-cp-{{ $i }}\n
\ - {{ $.Values.control_plane.vip }}\n secrets-encryption: true\n
\ write-kubeconfig-mode: 0640\n use-service-account-credentials:
true\n {{- if hasKey $.Values \"registry_config\" }}\n - path: /etc/rancher/rke2/registries.yaml\n
\ owner: root\n content: |-\n{{ $.Values.registry_config | toYaml |
indent 8 }}\n {{- end }}\n - path: /etc/hosts\n owner: root\n content:
|\n 127.0.0.1 localhost\n 127.0.0.1 {{$.Values.cluster_name }}-cp-{{
$i }}\n runcmd:\n {{- if $.Values.vm.qemu_agent_enable }}\n - - systemctl\n
\ - enable\n - '--now'\n - qemu-guest-agent.service\n {{- end
}}\n {{- if not $.Values.vm.airgapped_image }}\n - mkdir -p /var/lib/rancher/rke2-artifacts
&& wget https://raw.githubusercontent.com/rancher/rke2/refs/heads/master/install.sh -O /var/lib/rancher/install.sh && chmod +x /var/lib/rancher/install.sh\n
\ {{- end}}\n - INSTALL_RKE2_VERSION={{ $.Values.rke2_version }} /var/lib/rancher/install.sh\n
\ - systemctl enable rke2-server.service\n - useradd -r -c \"etcd user\"
-s /sbin/nologin -M etcd -U\n - systemctl start rke2-server.service\n ssh_authorized_keys:
\n - {{ $.Values.ssh_pub_key }}\n {{- if ne $.Values.control_plane.ipam
\"dhcp\" }}\n {{- if hasKey $.Values.control_plane \"network\" }}\n networkdata:
|\n{{ index $.Values.control_plane.network $i | indent 4 }}\n {{- end}}\n {{-
else}}\n networkdata: \"\"\n {{- end}}\n{{- end}}"
name: rke2/templates/rke2_cp_secret.yaml
- content: |-
{{- range $i := until (.Values.control_plane.node_count | int) }}
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ $.Values.cluster_name }}-cp-disk-{{ $i }}
namespace: {{ $.Values.cluster_namespace }}
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ $.Values.control_plane.node_disk_gb }}Gi
storageClassName: {{ $.Values.storage.class }}
volumeMode: Block
---
apiVersion: kubevirt.io/v1
kind: VirtualMachine
metadata:
namespace: {{ $.Values.cluster_namespace }}
annotations:
# harvesterhci.io/volumeClaimTemplates: |
# [{"metadata":{"name":"{{ $.Values.cluster_name }}-cp-disk-{{ $i }}","annotations":{"harvesterhci.io/imageId":"{{ $.Values.vm.image_namespace }}/{{ $.Values.vm.image }}","helm.app":"rke2"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"{{ $.Values.control_plane.node_disk_gb }}Gi"}},"volumeMode":"Block","storageClassName":"{{ $.Values.storage.class }}"}}]
# network.harvesterhci.io/ips: '[]'
labels:
harvesterhci.io/creator: harvester
harvesterhci.io/os: {{ $.Values.vm.os }}
name: {{ $.Values.cluster_name }}-cp-{{ $i }}
finalizers:
- harvesterhci.io/VMController.UnsetOwnerOfPVCs
spec:
runStrategy: RerunOnFailure
template:
metadata:
annotations: {}
labels:
harvesterhci.io/vmName: {{ $.Values.cluster_name }}-cp-{{ $i }}
spec:
domain:
machine:
type: ''
cpu:
cores: {{ $.Values.control_plane.cpu_count }}
sockets: 1
threads: 1
devices:
interfaces:
- bridge: {}
model: virtio
name: default
disks:
- name: disk-0
disk:
bus: virtio
bootOrder: 1
- name: cloudinitdisk
disk:
bus: virtio
hostDevices: []
resources:
limits:
memory: {{ $.Values.control_plane.memory_gb }}Gi
cpu: {{ $.Values.control_plane.cpu_count }}
features:
acpi:
enabled: {{ $.Values.vm.uefi_enabled }}
firmware:
bootloader:
efi:
secureBoot: false
evictionStrategy: LiveMigrate
hostname: {{ $.Values.cluster_name }}-cp-{{ $i }}
networks:
- name: default
multus:
networkName: default/{{ $.Values.network_name }}
volumes:
- name: disk-0
persistentVolumeClaim:
claimName: {{ $.Values.cluster_name }}-cp-disk-{{ $i }}
- name: cloudinitdisk
cloudInitNoCloud:
secretRef:
name: {{ $.Values.cluster_name }}-cp-{{ $i }}-cloudinit
networkDataSecretRef:
name: {{ $.Values.cluster_name }}-cp-{{ $i }}-cloudinit
affinity: {}
terminationGracePeriodSeconds: 120
{{- end }}
name: rke2/templates/rke2_cp_vm.yaml
- content: |-
---
apiVersion: loadbalancer.harvesterhci.io/v1beta1
kind: IPPool
metadata:
name: {{ $.Values.cluster_name }}-pool
spec:
ranges:
- gateway: {{ .Values.control_plane.loadbalancer_gateway }}
rangeEnd: {{ .Values.control_plane.vip }}
rangeStart: {{ .Values.control_plane.vip }}
subnet: {{ .Values.control_plane.loadbalancer_subnet }}
selector: {}
---
apiVersion: loadbalancer.harvesterhci.io/v1beta1
kind: LoadBalancer
metadata:
name: {{ .Values.cluster_name }}-lb
namespace: default
spec:
healthCheck:
failureThreshold: 2
port: 6443
successThreshold: 3
timeoutSeconds: 5
periodSeconds: 5
ipam: pool
ipPool: {{ .Values.cluster_name }}-pool
listeners:
- name: k8s-api
port: 6443
protocol: TCP
backendPort: 6443
- name: ingress
port: 443
protocol: TCP
backendPort: 443
- name: join
port: 9345
protocol: TCP
backendPort: 9345
workloadType: vm
backendServerSelector:
harvesterhci.io/vmName:
{{- range $i := until (.Values.control_plane.node_count | int)}}
- {{ $.Values.cluster_name }}-cp-{{ $i }}
{{- end}}
name: rke2/templates/rke2_lb.yaml
- content: "{{- range $i := until (.Values.worker.node_count | int) }}\n---\napiVersion:
v1\nkind: Secret\nmetadata:\n name: {{ $.Values.cluster_name }}-worker-{{ $i
}}-cloudinit\n namespace: {{ $.Values.cluster_namespace }}\nstringData:\n userdata:
|\n #cloud-config\n {{- if $.Values.vm.qemu_agent_install }}\n package_update:
true\n packages:\n - qemu-guest-agent\n {{- end }}\n write_files:
\n {{- if $.Values.worker.files }}\n{{ $.Values.worker.files | toYaml | indent
4 }}\n {{- end }}\n - path: /etc/rancher/rke2/config.yaml\n owner:
root\n content: |\n token: {{ $.Values.shared_token }}\n {{-
if ne $i 0 }}\n server: https://{{ $.Values.control_plane.vip }}:9345\n
\ {{- end }}\n system-default-registry: {{ $.Values.system_default_registry
}}\n secrets-encryption: true\n write-kubeconfig-mode: 0640\n
\ use-service-account-credentials: true\n {{- if hasKey $.Values \"registry_config\"
}}\n - path: /etc/rancher/rke2/registries.yaml\n owner: root\n content:
|-\n{{ $.Values.registry_config | toYaml | indent 8 }}\n {{- end }}\n -
path: /etc/hosts\n owner: root\n content: |\n 127.0.0.1 localhost\n
\ 127.0.0.1 {{$.Values.cluster_name }}-worker-{{ $i }}\n runcmd:\n
\ {{- if $.Values.vm.qemu_agent_enable }}\n - - systemctl\n - enable\n
\ - '--now'\n - qemu-guest-agent.service\n {{- end }}\n {{- if
not $.Values.vm.airgapped_image }}\n - mkdir -p /var/lib/rancher/rke2-artifacts
&& wget https://raw.githubusercontent.com/rancher/rke2/refs/heads/master/install.sh -O /var/lib/rancher/install.sh && chmod +x /var/lib/rancher/install.sh\n
\ {{- end}}\n - INSTALL_RKE2_VERSION={{ $.Values.rke2_version }} INSTALL_RKE2_TYPE=\"agent\"
/var/lib/rancher/install.sh\n - systemctl enable rke2-server.service\n -
systemctl start rke2-server.service\n ssh_authorized_keys: \n - {{ $.Values.ssh_pub_key
}}\n {{- if ne $.Values.worker.ipam \"dhcp\" }}\n {{- if hasKey $.Values.worker
\"network\" }}\n networkdata: |\n{{ index $.Values.worker.network $i | indent
4 }}\n {{- end}}\n {{- else}}\n networkdata: \"\"\n {{- end}}\n{{- end}}"
name: rke2/templates/rke2_worker_secret.yaml
- content: |-
{{- range $i := until (.Values.worker.node_count | int) }}
---
apiVersion: kubevirt.io/v1
kind: VirtualMachine
metadata:
namespace: {{ $.Values.cluster_namespace }}
annotations:
harvesterhci.io/volumeClaimTemplates: |
[{"metadata":{"name":"{{ $.Values.cluster_name }}-worker-disk-{{ $i }}","annotations":{"harvesterhci.io/imageId":"{{ $.Values.vm.image_namespace }}/{{ $.Values.vm.image }}","helm.app":"rke2"}},"spec":{"accessModes":["ReadWriteMany"],"resources":{"requests":{"storage":"{{ $.Values.worker.node_disk_gb }}Gi"}},"volumeMode":"Block","storageClassName":"{{ $.Values.storage.class }}"}}]
network.harvesterhci.io/ips: '[]'
labels:
harvesterhci.io/creator: harvester
harvesterhci.io/os: {{ $.Values.vm.os }}
name: {{ $.Values.cluster_name }}-worker-{{ $i }}
finalizers:
- harvesterhci.io/VMController.UnsetOwnerOfPVCs
spec:
runStrategy: RerunOnFailure
template:
metadata:
annotations: {}
labels:
harvesterhci.io/vmName: {{ $.Values.cluster_name }}-worker-{{ $i }}
spec:
domain:
machine:
type: ''
cpu:
cores: {{ $.Values.worker.cpu_count }}
sockets: 1
threads: 1
devices:
interfaces:
- bridge: {}
model: virtio
name: default
disks:
- name: disk-0
disk:
bus: virtio
bootOrder: 1
- name: cloudinitdisk
disk:
bus: virtio
hostDevices: []
resources:
limits:
memory: {{ $.Values.worker.memory_gb }}Gi
cpu: {{ $.Values.worker.cpu_count }}
features:
acpi:
enabled: {{ $.Values.vm.uefi_enabled }}
firmware:
bootloader:
efi:
secureBoot: false
evictionStrategy: LiveMigrate
hostname: {{ $.Values.cluster_name }}-worker-{{ $i }}
networks:
- name: default
multus:
networkName: default/{{ $.Values.network_name }}
volumes:
- name: disk-0
persistentVolumeClaim:
claimName: {{ $.Values.cluster_name }}-worker-disk-{{ $i }}
- name: cloudinitdisk
cloudInitNoCloud:
secretRef:
name: {{ $.Values.cluster_name }}-worker-{{ $i }}-cloudinit
networkData: ""
affinity: {}
terminationGracePeriodSeconds: 120
{{- end }}
name: rke2/templates/rke2_worker_vm.yaml
- content: "cluster_name: mycluster\ncluster_namespace: default\n\nshared_token:
insecuretoken\nsystem_default_registry: \"\" #! empty value: use embedded
default\n #! non-empty value: use as regsitry
to source rke2 runtime image from\n #! if your
VM image contains the tarballs for RKE2, it will use those first\nrke2_version:
v1.26.10+rke2r2\n\nssh_pub_key: \"\" #! the public ssh key
to inject onto each node, required if you want to fetch a kubeconfig\n\n# registry_config:\n#
\ configs:\n# \"rgcrprod.azurecr.us\":\n# auth:\n# username:
test\n# password: test\n\nvm:\n image_namespace: default #!
namespace in your harvester cluster containing the vm base image\n image: ubuntu
\ #! name of base vm image to use for your RKE2 nodes\n os:
linux\n distro: ubuntu #! flag used for specific cloud-init
code tied to Ubuntu vs others (netplan)\n uefi_enabled: true\n qemu_agent_install:
true #! flag for installation of the qemu-agent service (Requires internet)\n
\ qemu_agent_enable: true #! flag for enabling the qemu-agent\n airgapped_image:
false #! flag to alert helm that your VM image already has the RKE2
install script (and does not need to download it)\n\n\nnetwork_name: host\n\ncontrol_plane:\n
\ node_count: 1\n cpu_count: 4\n memory_gb: 8\n node_disk_gb: 40\n loadbalancer_gateway:
10.10.0.1\n loadbalancer_subnet: 10.10.0.0/24\n files: []\n # files:\n #
- path: /tmp/test\n # owner: root\n # content: |\n # created a file\n\n
\ vip: #! this is the VIP for the Harvester LoadBalancer
object, ensure it is a routable IP\n ipam: dhcp #! this
can be dhcp or static, static requires an equal amount of cloud-init network-data
entries\n\n # network:\n # - | #! ubuntu example\n # network:\n # version:
2\n # renderer: networkd\n # ethernets:\n # enp1s0:\n # dhcp4:
no\n # addresses: [ \"10.10.0.6/24\" ]\n # gateway4: 10.10.0.1\n
\ # nameservers:\n # addresses: \n # - 10.10.0.1\n\nworker:\n
\ node_count: 1\n cpu_count: 4\n memory_gb: 8\n node_disk_gb: 40\n files:
[]\n # files:\n # - path: /tmp/test\n # owner: root\n # content: |\n
\ # created a file\n\n ipam: dhcp #! this can be dhcp
or static, static requires an equal amount of cloud-init network-data entries\n\n
\ # network:\n # - |\n # network:\n # version: 2\n # renderer:
networkd\n # ethernets:\n # enp1s0:\n # dhcp4: no\n #
\ addresses: [ \"10.10.0.20/24\" ]\n # gateway4: 10.10.0.1\n
\ # nameservers:\n # addresses: \n # - 10.10.0.1\n"
name: rke2/values.yaml
- content: "cluster_name: rke2-mgmt\nsystem_default_registry: \"\"\n\nrke2_version:
v1.29.6+rke2r1\n\nvm:\n image: ubuntu\n qemu_agent_install: true \n
\ qemu_agent_enable: true \n airgapped_image: false \nnetwork_name:
lab-workload\nssh_pub_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDK3vpDMfNKbXTkpOwC77k5xvEpUAcNyJa6pYT17YMbzvHuugSJRiouLElDtpqktto6plkDdrTRXnkDA3aVxUycCl/4IrnCEehlg8LGgFxKASNeXQYL0URIWRDicyJaolg5bFdcu3gYTA0JBtApiebrml6bj9cJGnS8lqRK9wmWIFv5lPICcrZMsw1AIRhghGI5BupUnttD+muHspAiGfjTbiiCvKo3fLmEMQ9pt/46wQuPbzOCVChpJByVG9AKO9IpdkOGgKeuy2y98ZxJIHBAx4B49jDfA8NNfyEBIdgiIvlv6QXgjFbazI5buLYM/RK36kf9JjYNBySZJuA3VMbHnWmWvZYBQUA6ypVMc4Wzvd3hhFmNQn1W+NEHl6v+bCDeo5QIv5dkpIoDgJd8CvWQ42bb2bi7zyO32v2zfaW03eDCeopFAKditMPhjqai0S2W4LRt7dRKEOCvUqPFYqZ99nBk1mmTWG8Gpp7VA/+shn171Yc/wDCwBcEyciqOYNtnW55O3eCiBHsnBcEFKy80zHJ1jckDSluypwBrsooYV5WKS8O+jqGyYfdruJ8oUCPw72b0JHs5AmFCRuhzOU6cZP6Ynghs1SkdVtq722uFjmDUR0X8+hoIZDEWutw6+91YhwnodA3MmGHtInlY+URqdz6TltOMP2X2vSMohnh2zQ==\n\nregistry_config:\n
\ configs:\n \"rgcrprod.azurecr.us\":\n auth:\n username: test\n
\ password: test\n\ncontrol_plane:\n vip: 10.2.0.20 \n loadbalancer_gateway:
10.2.0.1\n loadbalancer_subnet: 10.2.0.0/24\n \n node_count: 3 \n cpu_count:
8\n memory_gb: 16\n\n ipam: static\n network:\n - |\n network:\n version:
2\n renderer: networkd\n ethernets:\n enp1s0:\n dhcp4:
no\n addresses: [ \"10.2.0.21/24\" ]\n gateway4: 10.2.0.1\n
\ nameservers:\n addresses: \n - 10.2.0.1\n -
|\n network:\n version: 2\n renderer: networkd\n ethernets:\n
\ enp1s0:\n dhcp4: no\n addresses: [ \"10.2.0.22/24\"
]\n gateway4: 10.2.0.1\n nameservers:\n addresses:
\n - 10.2.0.1\n - |\n network:\n version: 2\n renderer:
networkd\n ethernets:\n enp1s0:\n dhcp4: no\n addresses:
[ \"10.2.0.23/24\" ]\n gateway4: 10.2.0.1\n nameservers:\n
\ addresses: \n - 10.2.0.1\n files:\n - path: /var/lib/rancher/rke2/server/manifests/certmanager.yaml\n
\ owner: root\n content: |\n apiVersion: helm.cattle.io/v1\n kind:
HelmChart\n metadata:\n name: cert-manager\n namespace: default
\ \n spec:\n bootstrap: true\n targetNamespace: cert-manager\n
\ createNamespace: true\n valuesContent: |-\n securityContext:\n
\ runAsNonRoot: true\n crds:\n enabled: true\n
\ version: v1.16.1\n repo: https://charts.jetstack.io\n chart:
cert-manager\n - path: /var/lib/rancher/rke2/server/manifests/rancher.yaml\n
\ owner: root\n content: |\n apiVersion: helm.cattle.io/v1\n kind:
HelmChart\n metadata:\n name: rancher\n namespace: default
\ \n spec:\n bootstrap: false\n targetNamespace: cattle-system\n
\ createNamespace: true\n set:\n hostname: rancher.lab.sienarfleet.systems\n
\ replicas: 3\n bootstrapPassword: admin\n valuesContent:
|-\n global:\n cattle:\n psp:\n enabled:
false\n ingress:\n tls:\n source: rancher\n
\ repo: https://releases.rancher.com/server-charts/stable\n chart:
rancher\n version: v2.10.1\nworker:\n node_count: 0"
name: values.yaml
targetRestrictions:
- clusterName: local
targets:
- clusterName: local
ignore: {}

View File

@@ -0,0 +1,493 @@
apiVersion: fleet.cattle.io/v1alpha1
kind: Bundle
metadata:
name: mgmt-cluster
namespace: fleet-local
spec:
helm:
chart: ./rke2
# releaseName: rke2-mgmt
values:
cluster_name: rke2-mgmt
control_plane:
cpu_count: 8
files:
- content: |
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: cert-manager
namespace: default
spec:
bootstrap: true
targetNamespace: cert-manager
createNamespace: true
valuesContent: |-
securityContext:
runAsNonRoot: true
crds:
enabled: true
version: v1.16.1
repo: https://charts.jetstack.io
chart: cert-manager
owner: root
path: /var/lib/rancher/rke2/server/manifests/certmanager.yaml
- content: |
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: rancher
namespace: default
spec:
bootstrap: false
targetNamespace: cattle-system
createNamespace: true
set:
hostname: rancher-mgmt.product.lan
replicas: 3
bootstrapPassword: admin
valuesContent: |-
global:
cattle:
psp:
enabled: false
ingress:
tls:
source: rancher
repo: https://releases.rancher.com/server-charts/stable
chart: rancher
version: v2.12.3
owner: root
path: /var/lib/rancher/rke2/server/manifests/rancher.yaml
ipam: static
loadbalancer_gateway: 172.27.27.1
loadbalancer_subnet: 172.27.27.0/24
memory_gb: 16
network:
- |
network:
version: 2
renderer: networkd
ethernets:
enp1s0:
dhcp4: no
addresses: [ "172.22.19.41/24" ]
gateway4: 172.22.19.1
nameservers:
addresses:
- 172.22.19.15
- 172.22.19.16
- |
network:
version: 2
renderer: networkd
ethernets:
enp1s0:
dhcp4: no
addresses: [ "172.22.19.42/24" ]
gateway4: 172.22.19.1
nameservers:
addresses:
- 172.22.19.15
- 172.22.19.16
- |
network:
version: 2
renderer: networkd
ethernets:
enp1s0:
dhcp4: no
addresses: [ "172.22.19.43/24" ]
gateway4: 172.22.19.1
nameservers:
addresses:
- 172.22.19.15
- 172.22.19.16
node_count: 3
vip: 172.27.27.40
network_name: k8s-network
registry_config:
configs:
rgcrprod.azurecr.us:
auth:
password: test
username: test
rke2_version: v1.33.4+rke2r1
ssh_pub_key: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPyW9YbYPE3efCdHMBgnP8AeVfs5Lw8MBCLhXuteliil
system_default_registry: ""
vm:
airgapped_image: false
image: ubuntu-22.04
qemu_agent_enable: true
qemu_agent_install: true
worker:
node_count: 0
storage:
class: longhorn-image-99dd5 # StorageClass for image ubuntu-22.04
valuesFiles:
- values.yaml
resources:
- content: |2-
helm:
chart: ./rke2
releaseName: rke2-mgmt
valuesFiles:
- values.yaml
name: fleet.yaml
- content: |-
apiVersion: v2
name: rke2-cluster
description: RKE2 cluster designed for usage directly on Harvester
type: application
version: 0.1.1
appVersion: 0.1.1
name: rke2/Chart.yaml
- content: "{{- range $i := until (.Values.control_plane.node_count | int) }}\n---\napiVersion:
v1\nkind: Secret\nmetadata:\n name: {{ $.Values.cluster_name }}-cp-{{ $i }}-cloudinit\n
\ namespace: {{ $.Values.cluster_namespace }}\nstringData:\n userdata: |\n
\ #cloud-config\n {{- if $.Values.vm.qemu_agent_install }}\n package_update:
true\n packages:\n - qemu-guest-agent\n {{- end }}\n write_files:
\n {{- if $.Values.control_plane.files }}\n{{ $.Values.control_plane.files
| toYaml | indent 4 }}\n {{- end }}\n - path: /etc/rancher/rke2/config.yaml\n
\ owner: root\n content: |\n token: {{ $.Values.shared_token
}}\n {{- if ne $i 0 }}\n server: https://{{ $.Values.control_plane.vip
}}:9345\n {{- end }}\n system-default-registry: {{ $.Values.system_default_registry
}}\n tls-san:\n - {{ $.Values.cluster_name }}-cp-{{ $i }}\n
\ - {{ $.Values.control_plane.vip }}\n secrets-encryption: true\n
\ write-kubeconfig-mode: 0640\n use-service-account-credentials:
true\n {{- if hasKey $.Values \"registry_config\" }}\n - path: /etc/rancher/rke2/registries.yaml\n
\ owner: root\n content: |-\n{{ $.Values.registry_config | toYaml |
indent 8 }}\n {{- end }}\n - path: /etc/hosts\n owner: root\n content:
|\n 127.0.0.1 localhost\n 127.0.0.1 {{$.Values.cluster_name }}-cp-{{
$i }}\n runcmd:\n {{- if $.Values.vm.qemu_agent_enable }}\n - - systemctl\n
\ - enable\n - '--now'\n - qemu-guest-agent.service\n {{- end
}}\n {{- if not $.Values.vm.airgapped_image }}\n - mkdir -p /var/lib/rancher/rke2-artifacts
&& wget https://raw.githubusercontent.com/rancher/rke2/refs/heads/master/install.sh -O /var/lib/rancher/install.sh && chmod +x /var/lib/rancher/install.sh\n
\ {{- end}}\n - INSTALL_RKE2_VERSION={{ $.Values.rke2_version }} /var/lib/rancher/install.sh\n
\ - systemctl enable rke2-server.service\n - useradd -r -c \"etcd user\"
-s /sbin/nologin -M etcd -U\n - systemctl start rke2-server.service\n ssh_authorized_keys:
\n - {{ $.Values.ssh_pub_key }}\n {{- if ne $.Values.control_plane.ipam
\"dhcp\" }}\n {{- if hasKey $.Values.control_plane \"network\" }}\n networkdata:
|\n{{ index $.Values.control_plane.network $i | indent 4 }}\n {{- end}}\n {{-
else}}\n networkdata: \"\"\n {{- end}}\n{{- end}}"
name: rke2/templates/rke2_cp_secret.yaml
- content: |-
{{- range $i := until (.Values.control_plane.node_count | int) }}
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ $.Values.cluster_name }}-cp-disk-{{ $i }}
namespace: {{ $.Values.cluster_namespace }}
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ $.Values.control_plane.node_disk_gb }}Gi
storageClassName: {{ $.Values.storage.class }}
volumeMode: Block
---
apiVersion: kubevirt.io/v1
kind: VirtualMachine
metadata:
namespace: {{ $.Values.cluster_namespace }}
annotations:
# harvesterhci.io/volumeClaimTemplates: |
# [{"metadata":{"name":"{{ $.Values.cluster_name }}-cp-disk-{{ $i }}","annotations":{"harvesterhci.io/imageId":"{{ $.Values.vm.image_namespace }}/{{ $.Values.vm.image }}","helm.app":"rke2"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"{{ $.Values.control_plane.node_disk_gb }}Gi"}},"volumeMode":"Block","storageClassName":"{{ $.Values.storage.class }}"}}]
# network.harvesterhci.io/ips: '[]'
labels:
harvesterhci.io/creator: harvester
harvesterhci.io/os: {{ $.Values.vm.os }}
name: {{ $.Values.cluster_name }}-cp-{{ $i }}
finalizers:
- harvesterhci.io/VMController.UnsetOwnerOfPVCs
spec:
runStrategy: RerunOnFailure
template:
metadata:
annotations: {}
labels:
harvesterhci.io/vmName: {{ $.Values.cluster_name }}-cp-{{ $i }}
spec:
domain:
machine:
type: ''
cpu:
cores: {{ $.Values.control_plane.cpu_count }}
sockets: 1
threads: 1
devices:
interfaces:
- bridge: {}
model: virtio
name: default
disks:
- name: disk-0
disk:
bus: virtio
bootOrder: 1
- name: cloudinitdisk
disk:
bus: virtio
hostDevices: []
resources:
limits:
memory: {{ $.Values.control_plane.memory_gb }}Gi
cpu: {{ $.Values.control_plane.cpu_count }}
features:
acpi:
enabled: {{ $.Values.vm.uefi_enabled }}
firmware:
bootloader:
efi:
secureBoot: false
evictionStrategy: LiveMigrate
hostname: {{ $.Values.cluster_name }}-cp-{{ $i }}
networks:
- name: default
multus:
networkName: default/{{ $.Values.network_name }}
volumes:
- name: disk-0
persistentVolumeClaim:
claimName: {{ $.Values.cluster_name }}-cp-disk-{{ $i }}
- name: cloudinitdisk
cloudInitNoCloud:
secretRef:
name: {{ $.Values.cluster_name }}-cp-{{ $i }}-cloudinit
networkDataSecretRef:
name: {{ $.Values.cluster_name }}-cp-{{ $i }}-cloudinit
affinity: {}
terminationGracePeriodSeconds: 120
{{- end }}
name: rke2/templates/rke2_cp_vm.yaml
- content: |-
---
apiVersion: loadbalancer.harvesterhci.io/v1beta1
kind: IPPool
metadata:
name: {{ $.Values.cluster_name }}-pool
spec:
ranges:
- gateway: {{ .Values.control_plane.loadbalancer_gateway }}
rangeEnd: {{ .Values.control_plane.vip }}
rangeStart: {{ .Values.control_plane.vip }}
subnet: {{ .Values.control_plane.loadbalancer_subnet }}
selector: {}
---
apiVersion: loadbalancer.harvesterhci.io/v1beta1
kind: LoadBalancer
metadata:
name: {{ .Values.cluster_name }}-lb
namespace: default
spec:
healthCheck:
failureThreshold: 2
port: 6443
successThreshold: 3
timeoutSeconds: 5
periodSeconds: 5
ipam: pool
ipPool: {{ .Values.cluster_name }}-pool
listeners:
- name: k8s-api
port: 6443
protocol: TCP
backendPort: 6443
- name: ingress
port: 443
protocol: TCP
backendPort: 443
- name: join
port: 9345
protocol: TCP
backendPort: 9345
workloadType: vm
backendServerSelector:
harvesterhci.io/vmName:
{{- range $i := until (.Values.control_plane.node_count | int)}}
- {{ $.Values.cluster_name }}-cp-{{ $i }}
{{- end}}
name: rke2/templates/rke2_lb.yaml
- content: "{{- range $i := until (.Values.worker.node_count | int) }}\n---\napiVersion:
v1\nkind: Secret\nmetadata:\n name: {{ $.Values.cluster_name }}-worker-{{ $i
}}-cloudinit\n namespace: {{ $.Values.cluster_namespace }}\nstringData:\n userdata:
|\n #cloud-config\n {{- if $.Values.vm.qemu_agent_install }}\n package_update:
true\n packages:\n - qemu-guest-agent\n {{- end }}\n write_files:
\n {{- if $.Values.worker.files }}\n{{ $.Values.worker.files | toYaml | indent
4 }}\n {{- end }}\n - path: /etc/rancher/rke2/config.yaml\n owner:
root\n content: |\n token: {{ $.Values.shared_token }}\n {{-
if ne $i 0 }}\n server: https://{{ $.Values.control_plane.vip }}:9345\n
\ {{- end }}\n system-default-registry: {{ $.Values.system_default_registry
}}\n secrets-encryption: true\n write-kubeconfig-mode: 0640\n
\ use-service-account-credentials: true\n {{- if hasKey $.Values \"registry_config\"
}}\n - path: /etc/rancher/rke2/registries.yaml\n owner: root\n content:
|-\n{{ $.Values.registry_config | toYaml | indent 8 }}\n {{- end }}\n -
path: /etc/hosts\n owner: root\n content: |\n 127.0.0.1 localhost\n
\ 127.0.0.1 {{$.Values.cluster_name }}-worker-{{ $i }}\n runcmd:\n
\ {{- if $.Values.vm.qemu_agent_enable }}\n - - systemctl\n - enable\n
\ - '--now'\n - qemu-guest-agent.service\n {{- end }}\n {{- if
not $.Values.vm.airgapped_image }}\n - mkdir -p /var/lib/rancher/rke2-artifacts
&& wget https://raw.githubusercontent.com/rancher/rke2/refs/heads/master/install.sh -O /var/lib/rancher/install.sh && chmod +x /var/lib/rancher/install.sh\n
\ {{- end}}\n - INSTALL_RKE2_VERSION={{ $.Values.rke2_version }} INSTALL_RKE2_TYPE=\"agent\"
/var/lib/rancher/install.sh\n - systemctl enable rke2-server.service\n -
systemctl start rke2-server.service\n ssh_authorized_keys: \n - {{ $.Values.ssh_pub_key
}}\n {{- if ne $.Values.worker.ipam \"dhcp\" }}\n {{- if hasKey $.Values.worker
\"network\" }}\n networkdata: |\n{{ index $.Values.worker.network $i | indent
4 }}\n {{- end}}\n {{- else}}\n networkdata: \"\"\n {{- end}}\n{{- end}}"
name: rke2/templates/rke2_worker_secret.yaml
- content: |-
{{- range $i := until (.Values.worker.node_count | int) }}
---
apiVersion: kubevirt.io/v1
kind: VirtualMachine
metadata:
namespace: {{ $.Values.cluster_namespace }}
annotations:
harvesterhci.io/volumeClaimTemplates: |
[{"metadata":{"name":"{{ $.Values.cluster_name }}-worker-disk-{{ $i }}","annotations":{"harvesterhci.io/imageId":"{{ $.Values.vm.image_namespace }}/{{ $.Values.vm.image }}","helm.app":"rke2"}},"spec":{"accessModes":["ReadWriteMany"],"resources":{"requests":{"storage":"{{ $.Values.worker.node_disk_gb }}Gi"}},"volumeMode":"Block","storageClassName":"{{ $.Values.storage.class }}"}}]
network.harvesterhci.io/ips: '[]'
labels:
harvesterhci.io/creator: harvester
harvesterhci.io/os: {{ $.Values.vm.os }}
name: {{ $.Values.cluster_name }}-worker-{{ $i }}
finalizers:
- harvesterhci.io/VMController.UnsetOwnerOfPVCs
spec:
runStrategy: RerunOnFailure
template:
metadata:
annotations: {}
labels:
harvesterhci.io/vmName: {{ $.Values.cluster_name }}-worker-{{ $i }}
spec:
domain:
machine:
type: ''
cpu:
cores: {{ $.Values.worker.cpu_count }}
sockets: 1
threads: 1
devices:
interfaces:
- bridge: {}
model: virtio
name: default
disks:
- name: disk-0
disk:
bus: virtio
bootOrder: 1
- name: cloudinitdisk
disk:
bus: virtio
hostDevices: []
resources:
limits:
memory: {{ $.Values.worker.memory_gb }}Gi
cpu: {{ $.Values.worker.cpu_count }}
features:
acpi:
enabled: {{ $.Values.vm.uefi_enabled }}
firmware:
bootloader:
efi:
secureBoot: false
evictionStrategy: LiveMigrate
hostname: {{ $.Values.cluster_name }}-worker-{{ $i }}
networks:
- name: default
multus:
networkName: default/{{ $.Values.network_name }}
volumes:
- name: disk-0
persistentVolumeClaim:
claimName: {{ $.Values.cluster_name }}-worker-disk-{{ $i }}
- name: cloudinitdisk
cloudInitNoCloud:
secretRef:
name: {{ $.Values.cluster_name }}-worker-{{ $i }}-cloudinit
networkData: ""
affinity: {}
terminationGracePeriodSeconds: 120
{{- end }}
name: rke2/templates/rke2_worker_vm.yaml
- content: "cluster_name: mycluster\ncluster_namespace: default\n\nshared_token:
insecuretoken\nsystem_default_registry: \"\" #! empty value: use embedded
default\n #! non-empty value: use as regsitry
to source rke2 runtime image from\n #! if your
VM image contains the tarballs for RKE2, it will use those first\nrke2_version:
v1.26.10+rke2r2\n\nssh_pub_key: \"\" #! the public ssh key
to inject onto each node, required if you want to fetch a kubeconfig\n\n# registry_config:\n#
\ configs:\n# \"rgcrprod.azurecr.us\":\n# auth:\n# username:
test\n# password: test\n\nvm:\n image_namespace: default #!
namespace in your harvester cluster containing the vm base image\n image: ubuntu
\ #! name of base vm image to use for your RKE2 nodes\n os:
linux\n distro: ubuntu #! flag used for specific cloud-init
code tied to Ubuntu vs others (netplan)\n uefi_enabled: true\n qemu_agent_install:
true #! flag for installation of the qemu-agent service (Requires internet)\n
\ qemu_agent_enable: true #! flag for enabling the qemu-agent\n airgapped_image:
false #! flag to alert helm that your VM image already has the RKE2
install script (and does not need to download it)\n\n\nnetwork_name: host\n\ncontrol_plane:\n
\ node_count: 1\n cpu_count: 4\n memory_gb: 8\n node_disk_gb: 40\n loadbalancer_gateway:
10.10.0.1\n loadbalancer_subnet: 10.10.0.0/24\n files: []\n # files:\n #
- path: /tmp/test\n # owner: root\n # content: |\n # created a file\n\n
\ vip: #! this is the VIP for the Harvester LoadBalancer
object, ensure it is a routable IP\n ipam: dhcp #! this
can be dhcp or static, static requires an equal amount of cloud-init network-data
entries\n\n # network:\n # - | #! ubuntu example\n # network:\n # version:
2\n # renderer: networkd\n # ethernets:\n # enp1s0:\n # dhcp4:
no\n # addresses: [ \"10.10.0.6/24\" ]\n # gateway4: 10.10.0.1\n
\ # nameservers:\n # addresses: \n # - 10.10.0.1\n\nworker:\n
\ node_count: 1\n cpu_count: 4\n memory_gb: 8\n node_disk_gb: 40\n files:
[]\n # files:\n # - path: /tmp/test\n # owner: root\n # content: |\n
\ # created a file\n\n ipam: dhcp #! this can be dhcp
or static, static requires an equal amount of cloud-init network-data entries\n\n
\ # network:\n # - |\n # network:\n # version: 2\n # renderer:
networkd\n # ethernets:\n # enp1s0:\n # dhcp4: no\n #
\ addresses: [ \"10.10.0.20/24\" ]\n # gateway4: 10.10.0.1\n
\ # nameservers:\n # addresses: \n # - 10.10.0.1\n"
name: rke2/values.yaml
- content: "cluster_name: rke2-mgmt\nsystem_default_registry: \"\"\n\nrke2_version:
v1.29.6+rke2r1\n\nvm:\n image: ubuntu\n qemu_agent_install: true \n
\ qemu_agent_enable: true \n airgapped_image: false \nnetwork_name:
lab-workload\nssh_pub_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDK3vpDMfNKbXTkpOwC77k5xvEpUAcNyJa6pYT17YMbzvHuugSJRiouLElDtpqktto6plkDdrTRXnkDA3aVxUycCl/4IrnCEehlg8LGgFxKASNeXQYL0URIWRDicyJaolg5bFdcu3gYTA0JBtApiebrml6bj9cJGnS8lqRK9wmWIFv5lPICcrZMsw1AIRhghGI5BupUnttD+muHspAiGfjTbiiCvKo3fLmEMQ9pt/46wQuPbzOCVChpJByVG9AKO9IpdkOGgKeuy2y98ZxJIHBAx4B49jDfA8NNfyEBIdgiIvlv6QXgjFbazI5buLYM/RK36kf9JjYNBySZJuA3VMbHnWmWvZYBQUA6ypVMc4Wzvd3hhFmNQn1W+NEHl6v+bCDeo5QIv5dkpIoDgJd8CvWQ42bb2bi7zyO32v2zfaW03eDCeopFAKditMPhjqai0S2W4LRt7dRKEOCvUqPFYqZ99nBk1mmTWG8Gpp7VA/+shn171Yc/wDCwBcEyciqOYNtnW55O3eCiBHsnBcEFKy80zHJ1jckDSluypwBrsooYV5WKS8O+jqGyYfdruJ8oUCPw72b0JHs5AmFCRuhzOU6cZP6Ynghs1SkdVtq722uFjmDUR0X8+hoIZDEWutw6+91YhwnodA3MmGHtInlY+URqdz6TltOMP2X2vSMohnh2zQ==\n\nregistry_config:\n
\ configs:\n \"rgcrprod.azurecr.us\":\n auth:\n username: test\n
\ password: test\n\ncontrol_plane:\n vip: 10.2.0.20 \n loadbalancer_gateway:
10.2.0.1\n loadbalancer_subnet: 10.2.0.0/24\n \n node_count: 3 \n cpu_count:
8\n memory_gb: 16\n\n ipam: static\n network:\n - |\n network:\n version:
2\n renderer: networkd\n ethernets:\n enp1s0:\n dhcp4:
no\n addresses: [ \"10.2.0.21/24\" ]\n gateway4: 10.2.0.1\n
\ nameservers:\n addresses: \n - 10.2.0.1\n -
|\n network:\n version: 2\n renderer: networkd\n ethernets:\n
\ enp1s0:\n dhcp4: no\n addresses: [ \"10.2.0.22/24\"
]\n gateway4: 10.2.0.1\n nameservers:\n addresses:
\n - 10.2.0.1\n - |\n network:\n version: 2\n renderer:
networkd\n ethernets:\n enp1s0:\n dhcp4: no\n addresses:
[ \"10.2.0.23/24\" ]\n gateway4: 10.2.0.1\n nameservers:\n
\ addresses: \n - 10.2.0.1\n files:\n - path: /var/lib/rancher/rke2/server/manifests/certmanager.yaml\n
\ owner: root\n content: |\n apiVersion: helm.cattle.io/v1\n kind:
HelmChart\n metadata:\n name: cert-manager\n namespace: default
\ \n spec:\n bootstrap: true\n targetNamespace: cert-manager\n
\ createNamespace: true\n valuesContent: |-\n securityContext:\n
\ runAsNonRoot: true\n crds:\n enabled: true\n
\ version: v1.16.1\n repo: https://charts.jetstack.io\n chart:
cert-manager\n - path: /var/lib/rancher/rke2/server/manifests/rancher.yaml\n
\ owner: root\n content: |\n apiVersion: helm.cattle.io/v1\n kind:
HelmChart\n metadata:\n name: rancher\n namespace: default
\ \n spec:\n bootstrap: false\n targetNamespace: cattle-system\n
\ createNamespace: true\n set:\n hostname: rancher.lab.sienarfleet.systems\n
\ replicas: 3\n bootstrapPassword: admin\n valuesContent:
|-\n global:\n cattle:\n psp:\n enabled:
false\n ingress:\n tls:\n source: rancher\n
\ repo: https://releases.rancher.com/server-charts/stable\n chart:
rancher\n version: v2.10.1\nworker:\n node_count: 0"
name: values.yaml
targetRestrictions:
- clusterName: local
targets:
- clusterName: local
ignore: {}

View File

@@ -0,0 +1,101 @@
cluster_name: rke2-rancher
cluster_namespace: vanderlande
control_plane:
cpu_count: 4
files:
- content: |
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: cert-manager
#namespace: default
spec:
bootstrap: true
targetNamespace: cert-manager
createNamespace: true
valuesContent: |-
securityContext:
runAsNonRoot: true
crds:
enabled: true
version: v1.16.1
repo: https://charts.jetstack.io
chart: cert-manager
owner: root
path: /var/lib/rancher/rke2/server/manifests/certmanager.yaml
- content: |
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: rancher
#namespace: default
spec:
bootstrap: false
targetNamespace: cattle-system
createNamespace: true
set:
hostname: rancher-mgmt.product.lan
replicas: 3
bootstrapPassword: admin
valuesContent: |-
global:
cattle:
psp:
enabled: false
ingress:
tls:
source: rancher
repo: https://releases.rancher.com/server-charts/stable
chart: rancher
version: v2.12.3
owner: root
path: /var/lib/rancher/rke2/server/manifests/rancher.yaml
ipam: dhcp
loadbalancer_gateway: 172.27.27.1
loadbalancer_subnet: 172.27.27.0/24
memory_gb: 12
network:
- |
network:
version: 2
renderer: networkd
ethernets:
enp1s0:
dhcp4: yes
- |
network:
version: 2
renderer: networkd
ethernets:
enp1s0:
dhcp4: yes
- |
network:
version: 2
renderer: networkd
ethernets:
enp1s0:
dhcp4: yes
node_count: 3
vip: 172.27.27.40
network_name: vm-lan
registry_config:
configs:
rgcrprod.azurecr.us:
auth:
password: test
username: test
rke2_version: v1.33.4+rke2r1
ssh_pub_key: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPyW9YbYPE3efCdHMBgnP8AeVfs5Lw8MBCLhXuteliil
system_default_registry: ""
vm:
airgapped_image: false
image: noble-server-cloudimg-amd64.img
qemu_agent_enable: true
qemu_agent_install: true
worker:
node_count: 0
storage:
class: longhorn-image-t4n82 # StorageClass for image noble-server-cloudimg-amd64.img

View File

@@ -0,0 +1,117 @@
cluster_name: rke2-mgmt
cluster_namespace: vanderlande
control_plane:
cpu_count: 4
files:
- content: |
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: cert-manager
#namespace: default
spec:
bootstrap: true
targetNamespace: cert-manager
createNamespace: true
valuesContent: |-
securityContext:
runAsNonRoot: true
crds:
enabled: true
version: v1.16.1
repo: https://charts.jetstack.io
chart: cert-manager
owner: root
path: /var/lib/rancher/rke2/server/manifests/certmanager.yaml
- content: |
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: rancher
#namespace: default
spec:
bootstrap: false
targetNamespace: cattle-system
createNamespace: true
set:
hostname: rancher-mgmt.product.lan
replicas: 3
bootstrapPassword: admin
valuesContent: |-
global:
cattle:
psp:
enabled: false
ingress:
tls:
source: rancher
repo: https://releases.rancher.com/server-charts/stable
chart: rancher
version: v2.12.3
owner: root
path: /var/lib/rancher/rke2/server/manifests/rancher.yaml
ipam: static
loadbalancer_gateway: 172.27.27.1
loadbalancer_subnet: 172.27.27.0/24
memory_gb: 12
network:
- |
network:
version: 2
renderer: networkd
ethernets:
enp1s0:
dhcp4: no
addresses: [ "172.22.19.41/24" ]
gateway4: 172.22.19.1
nameservers:
addresses:
- 172.22.19.15
- 172.22.19.16
- |
network:
version: 2
renderer: networkd
ethernets:
enp1s0:
dhcp4: no
addresses: [ "172.22.19.42/24" ]
gateway4: 172.22.19.1
nameservers:
addresses:
- 172.22.19.15
- 172.22.19.16
- |
network:
version: 2
renderer: networkd
ethernets:
enp1s0:
dhcp4: no
addresses: [ "172.22.19.43/24" ]
gateway4: 172.22.19.1
nameservers:
addresses:
- 172.22.19.15
- 172.22.19.16
node_count: 3
vip: 172.27.27.40
network_name: vm-lan
registry_config:
configs:
rgcrprod.azurecr.us:
auth:
password: test
username: test
rke2_version: v1.33.4+rke2r1
ssh_pub_key: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPyW9YbYPE3efCdHMBgnP8AeVfs5Lw8MBCLhXuteliil
system_default_registry: ""
vm:
airgapped_image: false
image: noble-server-cloudimg-amd64.img
qemu_agent_enable: true
qemu_agent_install: true
worker:
node_count: 0
storage:
class: longhorn-image-t4n82 # StorageClass for image noble-server-cloudimg-amd64.img

View File

@@ -0,0 +1,7 @@
apiVersion: v2
name: rke2-cluster
description: RKE2 cluster designed for usage directly on Harvester
type: application
version: 0.1.1
appVersion: 0.1.1

View File

@@ -0,0 +1,69 @@
{{- range $i := until (.Values.control_plane.node_count | int) }}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ $.Values.cluster_name }}-cp-{{ $i }}-cloudinit
namespace: {{ $.Values.cluster_namespace }}
stringData:
userdata: |
#cloud-config
{{- if $.Values.vm.qemu_agent_install }}
package_update: true
packages:
- qemu-guest-agent
{{- end }}
write_files:
{{- if $.Values.control_plane.files }}
{{ $.Values.control_plane.files | toYaml | indent 4 }}
{{- end }}
- path: /etc/rancher/rke2/config.yaml
owner: root
content: |
token: {{ $.Values.shared_token }}
{{- if ne $i 0 }}
server: https://{{ $.Values.control_plane.vip }}:9345
{{- end }}
system-default-registry: {{ $.Values.system_default_registry }}
tls-san:
- {{ $.Values.cluster_name }}-cp-{{ $i }}
- {{ $.Values.control_plane.vip }}
secrets-encryption: true
write-kubeconfig-mode: 0640
use-service-account-credentials: true
{{- if hasKey $.Values "registry_config" }}
- path: /etc/rancher/rke2/registries.yaml
owner: root
content: |-
{{ $.Values.registry_config | toYaml | indent 8 }}
{{- end }}
- path: /etc/hosts
owner: root
content: |
127.0.0.1 localhost
127.0.0.1 {{$.Values.cluster_name }}-cp-{{ $i }}
runcmd:
{{- if $.Values.vm.qemu_agent_enable }}
- - systemctl
- enable
- '--now'
- qemu-guest-agent.service
{{- end }}
{{- if not $.Values.vm.airgapped_image }}
- mkdir -p /var/lib/rancher/rke2-artifacts && wget https://raw.githubusercontent.com/rancher/rke2/refs/heads/master/install.sh -O /var/lib/rancher/install.sh && chmod +x /var/lib/rancher/install.sh
{{- end}}
- INSTALL_RKE2_VERSION={{ $.Values.rke2_version }} /var/lib/rancher/install.sh
- systemctl enable rke2-server.service
- useradd -r -c "etcd user" -s /sbin/nologin -M etcd -U
- systemctl start rke2-server.service
ssh_authorized_keys:
- {{ $.Values.ssh_pub_key }}
{{- if ne $.Values.control_plane.ipam "dhcp" }}
{{- if hasKey $.Values.control_plane "network" }}
networkdata: |
{{ index $.Values.control_plane.network $i | indent 4 }}
{{- end}}
{{- else}}
networkdata: ""
{{- end}}
{{- end}}

View File

@@ -0,0 +1,89 @@
{{- range $i := until (.Values.control_plane.node_count | int) }}
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ $.Values.cluster_name }}-cp-disk-{{ $i }}
namespace: {{ $.Values.cluster_namespace }}
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ $.Values.control_plane.node_disk_gb }}Gi
storageClassName: {{ $.Values.storage.class }}
volumeMode: Block
---
apiVersion: kubevirt.io/v1
kind: VirtualMachine
metadata:
namespace: {{ $.Values.cluster_namespace }}
annotations:
# harvesterhci.io/volumeClaimTemplates: |
# [{"metadata":{"name":"{{ $.Values.cluster_name }}-cp-disk-{{ $i }}","annotations":{"harvesterhci.io/imageId":"{{ $.Values.vm.image_namespace }}/{{ $.Values.vm.image }}","helm.app":"rke2"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"{{ $.Values.control_plane.node_disk_gb }}Gi"}},"volumeMode":"Block","storageClassName":"{{ $.Values.storage.class }}"}}]
# network.harvesterhci.io/ips: '[]'
labels:
harvesterhci.io/creator: harvester
harvesterhci.io/os: {{ $.Values.vm.os }}
name: {{ $.Values.cluster_name }}-cp-{{ $i }}
finalizers:
- harvesterhci.io/VMController.UnsetOwnerOfPVCs
spec:
runStrategy: RerunOnFailure
template:
metadata:
annotations: {}
labels:
harvesterhci.io/vmName: {{ $.Values.cluster_name }}-cp-{{ $i }}
spec:
domain:
machine:
type: ''
cpu:
cores: {{ $.Values.control_plane.cpu_count }}
sockets: 1
threads: 1
devices:
interfaces:
- bridge: {}
model: virtio
name: default
disks:
- name: disk-0
disk:
bus: virtio
bootOrder: 1
- name: cloudinitdisk
disk:
bus: virtio
hostDevices: []
resources:
limits:
memory: {{ $.Values.control_plane.memory_gb }}Gi
cpu: {{ $.Values.control_plane.cpu_count }}
features:
acpi:
enabled: {{ $.Values.vm.uefi_enabled }}
firmware:
bootloader:
efi:
secureBoot: false
evictionStrategy: LiveMigrate
hostname: {{ $.Values.cluster_name }}-cp-{{ $i }}
networks:
- name: default
multus:
networkName: {{ $.Values.cluster_namespace }}/{{ $.Values.network_name }}
volumes:
- name: disk-0
persistentVolumeClaim:
claimName: {{ $.Values.cluster_name }}-cp-disk-{{ $i }}
- name: cloudinitdisk
cloudInitNoCloud:
secretRef:
name: {{ $.Values.cluster_name }}-cp-{{ $i }}-cloudinit
networkDataSecretRef:
name: {{ $.Values.cluster_name }}-cp-{{ $i }}-cloudinit
affinity: {}
terminationGracePeriodSeconds: 120
{{- end }}

View File

@@ -0,0 +1,46 @@
---
apiVersion: loadbalancer.harvesterhci.io/v1beta1
kind: IPPool
metadata:
name: {{ $.Values.cluster_name }}-pool
spec:
ranges:
- gateway: {{ .Values.control_plane.loadbalancer_gateway }}
rangeEnd: {{ .Values.control_plane.vip }}
rangeStart: {{ .Values.control_plane.vip }}
subnet: {{ .Values.control_plane.loadbalancer_subnet }}
selector: {}
---
apiVersion: loadbalancer.harvesterhci.io/v1beta1
kind: LoadBalancer
metadata:
name: {{ .Values.cluster_name }}-lb
#namespace: default
spec:
healthCheck:
failureThreshold: 2
port: 6443
successThreshold: 3
timeoutSeconds: 5
periodSeconds: 5
ipam: pool
ipPool: {{ .Values.cluster_name }}-pool
listeners:
- name: k8s-api
port: 6443
protocol: TCP
backendPort: 6443
- name: ingress
port: 443
protocol: TCP
backendPort: 443
- name: join
port: 9345
protocol: TCP
backendPort: 9345
workloadType: vm
backendServerSelector:
harvesterhci.io/vmName:
{{- range $i := until (.Values.control_plane.node_count | int)}}
- {{ $.Values.cluster_name }}-cp-{{ $i }}
{{- end}}

View File

@@ -0,0 +1,66 @@
{{- range $i := until (.Values.worker.node_count | int) }}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ $.Values.cluster_name }}-worker-{{ $i }}-cloudinit
namespace: {{ $.Values.cluster_namespace }}
stringData:
userdata: |
#cloud-config
{{- if $.Values.vm.qemu_agent_install }}
package_update: true
packages:
- qemu-guest-agent
{{- end }}
write_files:
{{- if $.Values.worker.files }}
{{ $.Values.worker.files | toYaml | indent 4 }}
{{- end }}
- path: /etc/rancher/rke2/config.yaml
owner: root
content: |
token: {{ $.Values.shared_token }}
{{- if ne $i 0 }}
server: https://{{ $.Values.control_plane.vip }}:9345
{{- end }}
system-default-registry: {{ $.Values.system_default_registry }}
secrets-encryption: true
write-kubeconfig-mode: 0640
use-service-account-credentials: true
{{- if hasKey $.Values "registry_config" }}
- path: /etc/rancher/rke2/registries.yaml
owner: root
content: |-
{{ $.Values.registry_config | toYaml | indent 8 }}
{{- end }}
- path: /etc/hosts
owner: root
content: |
127.0.0.1 localhost
127.0.0.1 {{$.Values.cluster_name }}-worker-{{ $i }}
runcmd:
{{- if $.Values.vm.qemu_agent_enable }}
- - systemctl
- enable
- '--now'
- qemu-guest-agent.service
{{- end }}
{{- if not $.Values.vm.airgapped_image }}
#- mkdir -p /var/lib/rancher/rke2-artifacts && wget https://get.rke2.io -O /var/lib/rancher/install.sh && chmod +x /var/lib/rancher/install.sh
- mkdir -p /var/lib/rancher/rke2-artifacts && wget https://raw.githubusercontent.com/rancher/rke2/refs/heads/master/install.sh -O /var/lib/rancher/install.sh && chmod +x /var/lib/rancher/install.sh
{{- end}}
- INSTALL_RKE2_VERSION={{ $.Values.rke2_version }} INSTALL_RKE2_TYPE="agent" /var/lib/rancher/install.sh
- systemctl enable rke2-server.service
- systemctl start rke2-server.service
ssh_authorized_keys:
- {{ $.Values.ssh_pub_key }}
{{- if ne $.Values.worker.ipam "dhcp" }}
{{- if hasKey $.Values.worker "network" }}
networkdata: |
{{ index $.Values.worker.network $i | indent 4 }}
{{- end}}
{{- else}}
networkdata: ""
{{- end}}
{{- end}}

View File

@@ -0,0 +1,74 @@
{{- range $i := until (.Values.worker.node_count | int) }}
---
apiVersion: kubevirt.io/v1
kind: VirtualMachine
metadata:
namespace: {{ $.Values.cluster_namespace }}
annotations:
harvesterhci.io/volumeClaimTemplates: |
[{"metadata":{"name":"{{ $.Values.cluster_name }}-worker-disk-{{ $i }}","annotations":{"harvesterhci.io/imageId":"{{ $.Values.vm.image_namespace }}/{{ $.Values.vm.image }}","helm.app":"rke2"}},"spec":{"accessModes":["ReadWriteMany"],"resources":{"requests":{"storage":"{{ $.Values.worker.node_disk_gb }}Gi"}},"volumeMode":"Block","storageClassName":"{{ $.Values.storage.class }}"}}]
network.harvesterhci.io/ips: '[]'
labels:
harvesterhci.io/creator: harvester
harvesterhci.io/os: {{ $.Values.vm.os }}
name: {{ $.Values.cluster_name }}-worker-{{ $i }}
finalizers:
- harvesterhci.io/VMController.UnsetOwnerOfPVCs
spec:
runStrategy: RerunOnFailure
template:
metadata:
annotations: {}
labels:
harvesterhci.io/vmName: {{ $.Values.cluster_name }}-worker-{{ $i }}
spec:
domain:
machine:
type: ''
cpu:
cores: {{ $.Values.worker.cpu_count }}
sockets: 1
threads: 1
devices:
interfaces:
- bridge: {}
model: virtio
name: default
disks:
- name: disk-0
disk:
bus: virtio
bootOrder: 1
- name: cloudinitdisk
disk:
bus: virtio
hostDevices: []
resources:
limits:
memory: {{ $.Values.worker.memory_gb }}Gi
cpu: {{ $.Values.worker.cpu_count }}
features:
acpi:
enabled: {{ $.Values.vm.uefi_enabled }}
firmware:
bootloader:
efi:
secureBoot: false
evictionStrategy: LiveMigrate
hostname: {{ $.Values.cluster_name }}-worker-{{ $i }}
networks:
- name: default
multus:
networkName: {{ $.Values.cluster_namespace }}/{{ $.Values.network_name }}
volumes:
- name: disk-0
persistentVolumeClaim:
claimName: {{ $.Values.cluster_name }}-worker-disk-{{ $i }}
- name: cloudinitdisk
cloudInitNoCloud:
secretRef:
name: {{ $.Values.cluster_name }}-worker-{{ $i }}-cloudinit
networkData: ""
affinity: {}
terminationGracePeriodSeconds: 120
{{- end }}

View File

@@ -0,0 +1,92 @@
cluster_name: mycluster
cluster_namespace: default
shared_token: insecuretoken
system_default_registry: "" #! empty value: use embedded default
#! non-empty value: use as regsitry to source rke2 runtime image from
#! if your VM image contains the tarballs for RKE2, it will use those first
rke2_version: v1.26.10+rke2r2
ssh_pub_key: "" #! the public ssh key to inject onto each node, required if you want to fetch a kubeconfig
# registry_config:
# configs:
# "rgcrprod.azurecr.us":
# auth:
# username: test
# password: test
storage:
class: longhorn
vm:
image_namespace: default #! namespace in your harvester cluster containing the vm base image
image: ubuntu #! name of base vm image to use for your RKE2 nodes
os: linux
distro: ubuntu #! flag used for specific cloud-init code tied to Ubuntu vs others (netplan)
uefi_enabled: true
qemu_agent_install: true #! flag for installation of the qemu-agent service (Requires internet)
qemu_agent_enable: true #! flag for enabling the qemu-agent
airgapped_image: false #! flag to alert helm that your VM image already has the RKE2 install script (and does not need to download it)
network_name: host
control_plane:
node_count: 1
cpu_count: 4
memory_gb: 8
node_disk_gb: 40
loadbalancer_gateway: 10.10.0.1
loadbalancer_subnet: 10.10.0.0/24
files: []
# files:
# - path: /tmp/test
# owner: root
# content: |
# created a file
vip: #! this is the VIP for the Harvester LoadBalancer object, ensure it is a routable IP
ipam: dhcp #! this can be dhcp or static, static requires an equal amount of cloud-init network-data entries
# network:
# - | #! ubuntu example
# network:
# version: 2
# renderer: networkd
# ethernets:
# enp1s0:
# dhcp4: no
# addresses: [ "10.10.0.6/24" ]
# gateway4: 10.10.0.1
# nameservers:
# addresses:
# - 10.10.0.1
worker:
node_count: 1
cpu_count: 4
memory_gb: 8
node_disk_gb: 40
files: []
# files:
# - path: /tmp/test
# owner: root
# content: |
# created a file
ipam: dhcp #! this can be dhcp or static, static requires an equal amount of cloud-init network-data entries
# network:
# - |
# network:
# version: 2
# renderer: networkd
# ethernets:
# enp1s0:
# dhcp4: no
# addresses: [ "10.10.0.20/24" ]
# gateway4: 10.10.0.1
# nameservers:
# addresses:
# - 10.10.0.1