Drop initial code

This commit is contained in:
Danny Bessems
2026-01-15 09:58:01 +00:00
parent 227d957219
commit 1e7c9ba5cb
228 changed files with 19883 additions and 1 deletions

View File

@@ -0,0 +1,21 @@
# HELM IGNORE OPTIONS:
# Patterns to ignore when building Helm packages.
# Supports shell glob matching, relative path matching, and negation (prefixed with !)
.DS_Store
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
*.swp
*.bak
*.tmp
*.orig
*~
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,22 @@
apiVersion: v2
name: rancher-cluster-templates
version: 0.7.2
appVersion: 0.7.2
type: application
description: Hardened Rancher Cluster Templates by Rancher Government
icon: https://raw.githubusercontent.com/rancherfederal/carbide-docs/main/static/img/carbide-logo.svg
home: https://github.com/rancherfederal
sources:
- https://github.com/rancherfederal/rancher-cluster-templates
maintainers:
- name: Rancher Government
email: support@ranchergovernment.com
url: https://ranchergovernment.com
annotations:
catalog.cattle.io/type: cluster-template
catalog.cattle.io/namespace: fleet-default
classification: UNCLASSIFIED

View File

@@ -0,0 +1,105 @@
# Rancher Cluster Templates Helm Chart
| Type | Chart Version | App Version |
| :---------: | :-----------: | :---------: |
| application | `0.7.2` | `0.7.2` |
⚠️ This project is still in active development. As we continued to develop it, there will be breaking changes. ⚠️
## Supported Providers
### Currently Available
- AWS Commercial
- AWS GovCloud
- Harvester
- Digital Ocean
- VMWare vSphere
- Custom
### Pending Validation
- Microsoft Azure
## Installing the Chart
### Helm Install via Repository
```bash
helm repo add cluster-templates https://rancherfederal.github.io/rancher-cluster-templates
helm upgrade -i cluster cluster-templates/rancher-cluster-templates -n fleet-default -f values.yaml
```
## Helm Install via Registry
```bash
helm upgrade -i cluster oci://ghcr.io/rancherfederal/charts/rancher-cluster-templates -n fleet-default -f values.yaml
```
## Helm Chart Deployment Status
```bash
helm status cluster -n fleet-default
```
## Uninstalling the Chart
```bash
helm delete cluster -n fleet-default
```
## Chart/Cluster Secrets Management
### Cloud Credentials
If you do not have Cloud Credentials already created within the Rancher Manager, you can create them via `kubectl` with the command(s) below. Eventually, we will be moving these options with the Helm Chart!
#### For AWS Credentials
```bash
# with long-term credentials (accessKey and secretKey)
kubectl create secret -n cattle-global-data generic aws-creds-sts --from-literal=amazonec2credentialConfig-defaultRegion=$REGION --from-literal=amazonec2credentialConfig-accessKey=$ACCESSKEY --from-literal=amazonec2credentialConfig-secretKey=$SECRETKEY
kubectl annotate secret -n cattle-global-data aws-creds provisioning.cattle.io/driver=aws
```
```bash
# with temporary credentials (accessKey, secretKey, sessionToken)
kubectl create secret -n cattle-global-data generic aws-creds --from-literal=amazonec2credentialConfig-defaultRegion=$REGION --from-literal=amazonec2credentialConfig-accessKey=$ACCESSKEY --from-literal=amazonec2credentialConfig-secretKey=$SECRETKEY --from-literal=amazonec2credentialConfig-sessonToken=$SESSIONTOKEN
kubectl annotate secret -n cattle-global-data aws-creds provisioning.cattle.io/driver=aws
```
#### For Harvester Credentials
```bash
export CLUSTERID=$(kubectl get clusters.management.cattle.io -o=jsonpath='{range .items[?(@.metadata.labels.provider\.cattle\.io=="harvester")]}{.metadata.name}{"\n"}{end}')
kubectl create secret -n cattle-global-data generic harvester-creds --from-literal=harvestercredentialConfig-clusterId=$CLUSTERID --from-literal=harvestercredentialConfig-clusterType=imported --from-file=harvestercredentialConfig-kubeconfigContent=harvester.yaml
kubectl annotate secret -n cattle-global-data harvester-creds provisioning.cattle.io/driver=harvester
```
#### For Digital Ocean Credentials
```bash
kubectl create secret -n cattle-global-data generic digitalocean-creds --from-literal=digitaloceancredentialConfig-accessToken=$TOKEN
kubectl annotate secret -n cattle-global-data digitalocean-creds provisioning.cattle.io/driver=digitalocean
```
#### For VMWare vSphere Credentials
```bash
kubectl create secret -n cattle-global-data generic vsphere-creds --from-literal=digitaloceancredentialConfig-accessToken=$TOKEN
kubectl annotate secret -n cattle-global-data vsphere-creds provisioning.cattle.io/driver=digitalocean
```
### Registry Credentials
If you are configuring an authenticated registry and do not have Registry Credentials created in the Rancher Manager, you can create them via `kubectl` with the command below:
```bash
kubectl create secret -n fleet-default generic --type kubernetes.io/basic-auth registry-creds --from-literal=username=USERNAME --from-literal=password=PASSWORD
```

View File

@@ -0,0 +1,561 @@
# questions:
# - variable: cluster.name
# default: mycluster
# description: 'Specify the name of the cluster'
# label: 'Cluster Name'
# required: true
# type: string
# group: 'General'
# - variable: cloudCredentialSecretName
# default:
# description: 'CloudCredentialName for provisioning cluster'
# label: 'CloudCredential Name'
# type: cloudcredential
# group: 'General'
# - variable: cloudprovider
# default: custom
# description: 'Specify Infrastructure provider for underlying nodes'
# label: 'Infrastructure Provider'
# type: enum
# required: true
# options:
# - amazonec2
# - azure
# - digitalocean
# - elemental
# - harvester
# - vsphere
# - custom
# group: 'General'
# - variable: kubernetesVersion
# default: v1.31.5+rke2r1
# description: 'Specify Kubernetes Version'
# label: 'Kubernetes Version'
# type: enum
# required: true
# options:
# - v1.31.5+rke2r1
# - v1.30.9+rke2r1
# - v1.29.13+rke2r1
# group: 'General'
# - variable: localClusterAuthEndpoint.enabled
# default: false
# label: 'Local Auth Access Endpoint'
# description: 'Enable Local Auth Access Endpoint'
# type: boolean
# group: 'Auth Access Endpoint'
# show_subquestion_if: true
# subquestions:
# - variable: localClusterAuthEndpoint.fqdn
# default:
# description: 'Local Auth Access Endpoint FQDN'
# label: 'Auth Endpoint FQDN'
# type: hostname
# group: 'Auth Access Endpoint'
# - variable: localClusterAuthEndpoint.caCerts
# default:
# label: 'Auth Endpoint Cacerts'
# description: 'Local Auth Access Endpoint CACerts'
# type: multiline
# group: 'Auth Access Endpoint'
# - variable: addons.monitoring.enabled
# default: false
# label: 'Enable Monitoring'
# description: 'Enable Rancher Monitoring'
# type: boolean
# group: 'Monitoring'
# show_subquestion_if: true
# subquestions:
# - variable: monitoring.version
# default:
# label: 'Monitoring Version'
# description: 'Choose chart version of monitoring. If empty latest version will be installed'
# type: string
# group: 'Monitoring'
# - variable: monitoring.values
# default:
# label: 'Monitoring Values'
# description: 'Custom monitoring chart values'
# type: multiline
# group: 'Monitoring'
# - variable: nodepools.0.name
# default:
# description: 'Specify nodepool name'
# type: string
# label: 'Nodepool name'
# required: true
# show_if: cloudprovider=amazonec2 || cloudprovider=vsphere || cloudprovider=azure || cloudprovider=digitalocean || cloudprovider=harvester || cloudprovider=elemental
# group: 'Nodepools'
# - variable: nodepools.0.quantity
# default: 1
# description: 'Specify node count'
# type: int
# required: true
# show_if: cloudprovider=amazonec2 || cloudprovider=vsphere || cloudprovider=azure || cloudprovider=digitalocean || cloudprovider=harvester || cloudprovider=elemental
# label: 'Node count'
# group: 'Nodepools'
# - variable: nodepools.0.etcd
# default: true
# label: etcd
# type: boolean
# show_if: cloudprovider=amazonec2 || cloudprovider=vsphere || cloudprovider=azure || cloudprovider=digitalocean || cloudprovider=harvester || cloudprovider=elemental
# group: 'Nodepools'
# - variable: nodepools.0.worker
# default: true
# label: worker
# type: boolean
# show_if: cloudprovider=amazonec2 || cloudprovider=vsphere || cloudprovider=azure || cloudprovider=digitalocean || cloudprovider=harvester || cloudprovider=elemental
# group: 'Nodepools'
# - variable: nodepools.0.controlplane
# label: controlplane
# default: true
# type: boolean
# show_if: cloudprovider=amazonec2 || cloudprovider=vsphere || cloudprovider=azure || cloudprovider=digitalocean || cloudprovider=harvester || cloudprovider=elemental
# group: 'Nodepools'
# # amazonec2
# - variable: nodepools.0.region
# label: 'Region'
# default: us-east-1
# type: string
# description: 'AWS EC2 Region'
# required: true
# show_if: cloudprovider=amazonec2
# group: 'Nodepools'
# - variable: nodepools.0.zone
# label: 'Zone'
# default: a
# type: string
# description: 'AWS EC2 Zone'
# required: true
# show_if: cloudprovider=amazonec2
# group: 'Nodepools'
# - variable: nodepools.0.instanceType
# label: 'Instance Type'
# default: t3a.medium
# type: string
# description: 'AWS instance type'
# required: true
# show_if: cloudprovider=amazonec2
# group: 'Nodepools'
# - variable: nodepools.0.rootSize
# label: 'Root Disk Size'
# default: 16g
# type: string
# description: 'AWS EC2 root disk size'
# show_if: cloudprovider=amazonec2
# group: 'Nodepools'
# - variable: nodepools.0.vpcId
# label: 'VPC/SUBNET'
# default: ''
# type: string
# description: 'AWS EC2 vpc ID'
# required: true
# show_if: cloudprovider=amazonec2
# group: 'Nodepools'
# - variable: nodepools.0.iamInstanceProfile
# label: 'Instance Profile Name'
# default: ''
# type: string
# description: 'AWS EC2 Instance Profile Name'
# show_if: cloudprovider=amazonec2
# group: 'Nodepools'
# - variable: nodepools.0.ami
# label: 'AMI ID'
# default: ''
# type: string
# description: 'AWS EC2 AMI ID'
# show_if: cloudprovider=amazonec2
# group: 'Nodepools'
# - variable: nodepools.0.sshUser
# label: 'SSH Username for AMI'
# default: ubuntu
# type: string
# description: 'AWS EC2 SSH Username for AMI'
# show_if: cloudprovider=amazonec2
# group: 'Nodepools'
# - variable: nodepools.0.createSecurityGroup
# label: 'Create security group'
# default: true
# type: boolean
# description: 'Whether to create `rancher-node` security group. If false, can provide with existing security group'
# show_if: cloudprovider=amazonec2
# group: 'Nodepools'
# show_subquestion_if: false
# subquestions:
# - variable: nodepools.0.securityGroups
# label: 'Security groups'
# default:
# type: string
# description: 'Using existing security groups'
# group: 'Nodepools'
# # vsphere
# - variable: nodepools.0.vcenter
# label: 'vSphere IP/hostname'
# default: ''
# type: hostname
# description: 'vSphere IP/hostname for vCenter'
# required: true
# show_if: cloudprovider=vsphere
# group: 'Nodepools'
# - variable: nodepools.0.datacenter
# label: 'Vsphere Datacenter'
# default: ''
# type: hostname
# description: 'vSphere datacenter for virtual machine'
# required: true
# show_if: cloudprovider=vsphere
# group: 'Nodepools'
# - variable: nodepools.0.datastore
# label: 'Vsphere Datastore'
# default: ''
# type: string
# description: 'vSphere datastore for virtual machine'
# required: true
# show_if: cloudprovider=vsphere
# group: 'Nodepools'
# - variable: nodepools.0.datastoreCluster
# label: 'Vsphere DatastoreCluster'
# default: ''
# type: string
# description: 'vSphere datastore cluster for virtual machine'
# required: true
# show_if: cloudprovider=vsphere
# group: 'Nodepools'
# - variable: nodepools.0.diskSize
# label: 'Disk Size'
# default: '20480'
# type: string
# description: 'vSphere size of disk for docker VM (in MB)'
# show_if: cloudprovider=vsphere
# group: 'Nodepools'
# - variable: nodepools.0.memorySize
# label: 'Memory Size'
# default: '2048'
# type: string
# description: 'vSphere size of memory for docker VM (in MB)'
# show_if: cloudprovider=vsphere
# group: 'Nodepools'
# - variable: nodepools.0.network
# label: 'Network'
# default: ''
# type: string
# description: 'vSphere network where the virtual machine will be attached'
# show_if: cloudprovider=vsphere
# group: 'Nodepools'
# - variable: nodepools.0.pool
# label: 'Resource Pool'
# default: ''
# type: string
# description: 'vSphere resource pool for docker VM'
# show_if: cloudprovider=vsphere
# group: 'Nodepools'
# - variable: nodepools.0.sshPort
# label: 'SSH Port'
# default: '22'
# type: string
# description: 'If using a non-B2D image you can specify the ssh port'
# show_if: cloudprovider=vsphere
# group: 'Nodepools'
# - variable: nodepools.0.sshUserGroup
# label: 'SSH User Group'
# default: docker:staff
# type: hostname
# description: "If using a non-B2D image the uploaded keys will need chown'ed, defaults to staff e.g. docker:staff"
# show_if: cloudprovider=vsphere
# group: 'Nodepools'
# - variable: nodepools.0.vappIpallocationpolicy
# label: 'IP allocation policy'
# default: ''
# type: enum
# options:
# - dhcp
# - fixed
# - transient
# - fixedAllocated
# description: "'vSphere vApp IP allocation policy. Supported values are: dhcp, fixed, transient and fixedAllocated'"
# show_if: cloudprovider=vsphere
# group: 'Nodepools'
# - variable: nodepools.0.vappIpprotocol
# label: 'IP protocol'
# default: ''
# type: enum
# options:
# - IPv4
# - IPv6
# description: "'vSphere vApp IP protocol for this deployment. Supported values are: IPv4 and IPv6'"
# show_if: cloudprovider=vsphere
# group: 'Nodepools'
# # harvester
# - variable: nodepools.0.diskSize
# label: 'Disk Size'
# default: 40
# type: string
# description: 'Size of virtual hard disk in GB'
# show_if: cloudprovider=harvester
# group: 'Nodepools'
# - variable: nodepools.0.diskBus
# label: 'Disk Bus Type'
# default: string
# type: virtio
# description: 'harvester disk type'
# show_if: cloudprovider=harvester
# group: 'Nodepools'
# - variable: nodepools.0.cpuCount
# label: 'CPUs'
# default: 2
# type: string
# description: 'number of CPUs for your VM'
# show_if: cloudprovider=harvester
# group: 'Nodepools'
# - variable: nodepools.0.memorySize
# label: 'Memory Size'
# default: 4
# type: string
# description: 'Memory for VM in GB (available RAM)'
# show_if: cloudprovider=harvester
# group: 'Nodepools'
# - variable: nodepools.0.networkName
# label: 'Network'
# default: default/network-name-1
# type: string
# description: 'Name of vlan network in harvester'
# show_if: cloudprovider=harvester
# group: 'Nodepools'
# - variable: nodepools.0.imageName
# label: 'Name of Image'
# default: default/image-rand
# type: string
# description: 'Name of image in harvester'
# show_if: cloudprovider=harvester
# group: 'Nodepools'
# - variable: nodepools.0.vmNamespace
# label: 'vm Namespace'
# default: default
# type: string
# description: 'namespace to deploy the VM to'
# show_if: cloudprovider=harvester
# group: 'Nodepools'
# - variable: nodepools.0.sshUser
# label: 'SSH User'
# default: ubuntu
# type: string
# description: 'SSH username'
# show_if: cloudprovider=harvester
# group: 'Nodepools'
# # digitalocean
# - variable: nodepools.0.image
# label: 'Image'
# default: ubuntu-20-04-x64
# type: string
# description: 'Digital Ocean Image'
# show_if: cloudprovider=digitalocean
# group: 'Nodepools'
# - variable: nodepools.0.backups
# label: 'Backup'
# default: false
# type: boolean
# description: 'enable backups for droplet'
# show_if: cloudprovider=digitalocean
# group: 'Nodepools'
# - variable: nodepools.0.ipv6
# label: 'IPv6'
# default: false
# type: boolean
# description: 'enable ipv6 for droplet'
# show_if: cloudprovider=digitalocean
# group: 'Nodepools'
# - variable: nodepools.0.monitoring
# label: 'Monitoring'
# default: false
# type: boolean
# description: 'enable monitoring for droplet'
# show_if: cloudprovider=digitalocean
# group: 'Nodepools'
# - variable: nodepools.0.privateNetworking
# label: 'Private Networking'
# default: false
# type: boolean
# description: 'enable private networking for droplet'
# show_if: cloudprovider=digitalocean
# group: 'Nodepools'
# - variable: nodepools.0.region
# label: 'Region'
# default: sfo3
# type: string
# description: 'Digital Ocean region'
# show_if: cloudprovider=digitalocean
# group: 'Nodepools'
# - variable: nodepools.0.size
# label: 'Size'
# default: s-4vcpu-8gb
# type: string
# description: 'Digital Ocean size'
# show_if: cloudprovider=digitalocean
# group: 'Nodepools'
# - variable: nodepools.0.userdata
# label: 'Userdata'
# default:
# type: multiline
# description: 'File contents for userdata'
# show_if: cloudprovider=digitalocean
# group: 'Nodepools'
# - variable: nodepools.0.sshPort
# label: 'SSH Port'
# default: 22
# type: string
# description: 'SSH port'
# show_if: cloudprovider=digitalocean
# group: 'Nodepools'
# - variable: nodepools.0.sshUser
# label: 'SSH User'
# default: root
# type: string
# description: 'SSH username'
# show_if: cloudprovider=digitalocean
# group: 'Nodepools'
# # azure
# - variable: nodepools.0.availabilitySet
# label: 'Availability Set'
# default: docker-machine
# type: string
# description: 'Azure Availability Set to place the virtual machine into'
# show_if: cloudprovider=azure
# group: 'Nodepools'
# - variable: nodepools.0.diskSize
# label: 'Disk Size'
# default: ''
# type: string
# description: 'Disk size if using managed disk(Gib)'
# show_if: cloudprovider=azure
# group: 'Nodepools'
# - variable: nodepools.0.dns
# label: 'DNS'
# default: ''
# type: string
# description: 'A unique DNS label for the public IP adddress'
# show_if: cloudprovider=azure
# group: 'Nodepools'
# - variable: nodepools.0.environment
# label: 'Environment'
# default: AzurePublicCloud
# type: enum
# options:
# - AzurePublicCloud
# - AzureGermanCloud
# - AzureChinaCloud
# - AzureUSGovernmentCloud
# description: 'Azure environment'
# show_if: cloudprovider=azure
# group: 'Nodepools'
# - variable: nodepools.0.faultDomainCount
# label: 'Fault Domain Count'
# default: ''
# type: string
# description: 'Fault domain count to use for availability set'
# show_if: cloudprovider=azure
# group: 'Nodepools'
# - variable: nodepools.0.image
# label: 'Image'
# default: canonical:UbuntuServer:18.04-LTS:latest
# type: string
# description: 'Azure virtual machine OS image'
# show_if: cloudprovider=azure
# group: 'Nodepools'
# - variable: nodepools.0.location
# label: 'Location'
# default: westus
# type: string
# description: 'Azure region to create the virtual machine'
# show_if: cloudprovider=azure
# group: 'Nodepools'
# - variable: nodepools.0.managedDisks
# label: 'Managed Disks'
# default: false
# type: boolean
# description: 'Configures VM and availability set for managed disks'
# show_if: cloudprovider=azure
# group: 'Nodepools'
# - variable: nodepools.0.noPublicIp
# label: 'No Public IP'
# default: false
# type: boolean
# description: 'Do not create a public IP address for the machine'
# show_if: cloudprovider=azure
# group: 'Nodepools'
# - variable: nodepools.0.privateIpAddress
# label: 'Private IP Address'
# default: ''
# type: string
# description: 'Specify a static private IP address for the machine'
# show_if: cloudprovider=azure
# group: 'Nodepools'
# - variable: nodepools.0.resourceGroup
# label: 'Resource Group'
# default: docker-machine
# type: string
# description: 'Azure Resource Group name (will be created if missing)'
# show_if: cloudprovider=azure
# group: 'Nodepools'
# - variable: nodepools.0.size
# label: 'Size'
# default: 'Standard_D2_v2'
# type: string
# description: 'Size for Azure Virtual Machine'
# show_if: cloudprovider=azure
# group: 'Nodepools'
# - variable: nodepools.0.sshUser
# label: 'SSH Username'
# default: docker-user
# type: string
# description: 'Username for SSH login'
# show_if: cloudprovider=azure
# group: 'Nodepools'
# - variable: nodepools.0.staticPublicIp
# label: 'Static Public IP'
# default: false
# type: boolean
# description: 'Assign a static public IP address to the machine'
# show_if: cloudprovider=azure
# group: 'Nodepools'
# - variable: nodepools.0.storageType
# label: 'Storage Account'
# default: 'Standard_LRS'
# type: string
# description: 'Type of Storage Account to host the OS Disk for the machine'
# show_if: cloudprovider=azure
# group: 'Nodepools'
# - variable: nodepools.0.subnet
# label: 'Subnet'
# default: docker-machine
# type: string
# description: 'Azure Subnet Name to be used within the Virtual Network'
# show_if: cloudprovider=azure
# group: 'Nodepools'
# - variable: nodepools.0.subnetPrefix
# label: 'Subnet Prefix'
# default: '192.168.0.0/16'
# type: string
# description: 'Private CIDR block to be used for the new subnet, should comply RFC 1918'
# show_if: cloudprovider=azure
# group: 'Nodepools'
# - variable: nodepools.0.updateDomainCount
# label: 'Update Domain Count'
# default: ''
# type: string
# description: 'Update domain count to use for availability set'
# show_if: cloudprovider=azure
# group: 'Nodepools'
# - variable: nodepools.0.usePrivateIp
# label: 'Use Private IP'
# default: false
# type: boolean
# description: 'Azure Subnet Name to be used within the Virtual Network'
# show_if: cloudprovider=azure
# group: 'Nodepools'
# - variable: nodepools.0.vnet
# label: 'Vnet'
# default: 'docker-machine-vnet'
# type: string
# description: 'Azure Virtual Network name to connect the virtual machine (in [resourcegroup:]name format)'
# show_if: cloudprovider=azure
# group: 'Nodepools'

View File

@@ -0,0 +1,6 @@
Congratulations! You've successfully deployed a cluster using the Helm Chart for Rancher Cluster Templates by Rancher Government. Please be patient for the cluster to provision and deploy on your infrastructure.
View the Cluster -> https://{{ .Values.rancher.cattle.url | default "<rancher-url>" }}/dashboard/c/_/manager/provisioning.cattle.io.cluster/fleet-default/{{ .Values.cluster.name }}
View the Docs -> https://github.com/rancherfederal/rancher-cluster-templates

View File

@@ -0,0 +1,62 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "rancher-cluster-templates.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "rancher-cluster-templates.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "rancher-cluster-templates.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "rancher-cluster-templates.labels" -}}
helm.sh/chart: {{ include "rancher-cluster-templates.chart" . }}
{{ include "rancher-cluster-templates.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "rancher-cluster-templates.selectorLabels" -}}
app.kubernetes.io/name: {{ include "rancher-cluster-templates.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "rancher-cluster-templates.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "rancher-cluster-templates.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,438 @@
{{- $clustername := .Values.cluster.name -}}
apiVersion: provisioning.cattle.io/v1
kind: Cluster
metadata:
{{- if .Values.cluster.labels }}
labels:
{{ toYaml .Values.cluster.labels | indent 4 }}
{{- end }}
{{- if .Values.cluster.annotations }}
annotations:
{{ toYaml .Values.cluster.annotations | indent 4 }}
{{- end }}
name: {{ .Values.cluster.name }}
namespace: fleet-default
spec:
{{- if .Values.cluster.config.agentEnvVars }}
agentEnvVars:
{{ toYaml .Values.cluster.config.agentEnvVars | indent 4 }}
{{- end }}
{{- if .Values.cloudCredentialSecretName }}
cloudCredentialSecretName: cattle-global-data:{{ .Values.cloudCredentialSecretName }}
{{- end }}
# clusterAPIConfig:
# clusterAgentDeploymentCustomization:
{{- if .Values.cluster.config.defaultClusterRoleForProjectMembers }}
defaultClusterRoleForProjectMembers: {{ .Values.cluster.config.defaultClusterRoleForProjectMembers }}
{{- end }}
{{- if .Values.cluster.config.defaultPodSecurityAdmissionConfigurationTemplateName }}
defaultPodSecurityAdmissionConfigurationTemplateName: {{ .Values.cluster.config.defaultPodSecurityAdmissionConfigurationTemplateName }}
{{- end }}
{{- if .Values.cluster.config.defaultPodSecurityPolicyTemplateName }}
defaultPodSecurityPolicyTemplateName: {{ .Values.cluster.config.defaultPodSecurityPolicyTemplateName }}
{{- end }}
enableNetworkPolicy: {{ .Values.cluster.config.enableNetworkPolicy }}
# fleetAgentDeploymentCustomization:
{{- if .Values.cluster.config.kubernetesVersion }}
kubernetesVersion: {{ .Values.cluster.config.kubernetesVersion }}
{{- end }}
{{- if eq .Values.cluster.config.localClusterAuthEndpoint.enabled true }}
localClusterAuthEndpoint:
enabled: {{ .Values.cluster.config.localClusterAuthEndpoint.enabled }}
fqdn: {{ .Values.cluster.config.localClusterAuthEndpoint.fqdn }}
caCerts: {{ .Values.cluster.config.localClusterAuthEndpoint.caCerts }}
{{- else }}
localClusterAuthEndpoint:
enabled: false
{{- end }}
# redeploySystemAgentGeneration:
rkeConfig:
{{- with $.Values.cluster.config.chartValues }}
chartValues:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with $.Values.cluster.config.additionalManifests }}
additionalManifest:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- if .Values.cluster.config.etcd }}
etcd:
disableSnapshots: {{ .Values.cluster.config.etcd.disableSnapshots }}
snapshotRetention: {{ .Values.cluster.config.etcd.snapshotRetention }}
snapshotScheduleCron: {{ .Values.cluster.config.etcd.snapshotScheduleCron }}
{{- if .Values.cluster.config.etcd.s3 }}
s3:
bucket: {{ .Values.cluster.config.etcd.s3.bucket }}
cloudCredentialName: cattle-global-data:{{ .Values.cluster.config.etcd.s3.cloudCredentialSecretName }}
{{- if .Values.cluster.config.etcd.s3.folder }}
folder: {{ .Values.cluster.config.etcd.s3.folder }}
{{- end }}
region: {{ .Values.cluster.config.etcd.s3.region }}
skipSSLVerify: {{ .Values.cluster.config.etcd.s3.skipSSLVerify }}
endpoint: {{ .Values.cluster.config.etcd.s3.endpoint }}
{{- if .Values.cluster.config.etcd.s3.endpointCA }}
endpointCA: |-
{{ .Values.cluster.config.etcd.s3.endpointCA | indent 10 }}
{{- end }}
{{- end }}
{{- end }}
# etcdSnapshotCreate:
# etcdSnapshotRestore:
# infrastructureRef:
{{- if .Values.cluster.config.globalConfig }}
machineGlobalConfig:
{{- if .Values.cluster.config.globalConfig.cni }}
cni: {{ .Values.cluster.config.globalConfig.cni }}
{{- end }}
{{- if .Values.cluster.config.globalConfig.cluster_cidr }}
cluster-cidr: {{ .Values.cluster.config.globalConfig.cluster_cidr }}
{{- end }}
{{- if .Values.cluster.config.globalConfig.service_cidr }}
service-cidr: {{ .Values.cluster.config.globalConfig.service_cidr }}
{{- end }}
{{- if .Values.cluster.config.globalConfig.docker }}
docker: {{ .Values.cluster.config.globalConfig.docker }}
{{- end }}
{{- if .Values.cluster.config.globalConfig.disable }}
disable: {{ .Values.cluster.config.globalConfig.disable | toRawJson }}
{{- end }}
{{- if .Values.cluster.config.globalConfig.disable_scheduler }}
disable-scheduler: {{ .Values.cluster.config.globalConfig.disable_scheduler }}
{{- end }}
{{- if .Values.cluster.config.globalConfig.disable_cloud_controller }}
disable-cloud-controller: {{ .Values.cluster.config.globalConfig.disable_cloud_controller }}
{{- end }}
{{- if .Values.cluster.config.globalConfig.disable_kube_proxy }}
disable-kube-proxy: {{ .Values.cluster.config.globalConfig.disable_kube_proxy }}
{{- end }}
{{- if .Values.cluster.config.globalConfig.etcd_expose_metrics }}
etcd-expose-metrics: {{ .Values.cluster.config.globalConfig.etcd_expose_metrics }}
{{- end }}
{{- if .Values.cluster.config.globalConfig.profile }}
profile: {{ .Values.cluster.config.globalConfig.profile }}
{{- end }}
{{- if .Values.cluster.config.globalConfig.selinux }}
selinux: {{ .Values.cluster.config.globalConfig.selinux }}
{{- end }}
{{- if .Values.cluster.config.globalConfig.tls_san }}
tls-san: {{ .Values.cluster.config.globalConfig.tls_san | toRawJson }}
{{- end }}
{{- if .Values.cluster.config.globalConfig.token }}
token: {{ .Values.cluster.config.globalConfig.token }}
{{- end }}
{{- if .Values.cluster.config.globalConfig.systemDefaultRegistry }}
system-default-registry: {{ .Values.cluster.config.globalConfig.systemDefaultRegistry }}
{{- end }}
{{- if .Values.cluster.config.globalConfig.secrets_encryption }}
secrets-encryption: {{ .Values.cluster.config.globalConfig.secrets_encryption }}
{{- end }}
{{- if .Values.cluster.config.globalConfig.write_kubeconfig_mode }}
write-kubeconfig-mode: {{ .Values.cluster.config.globalConfig.write_kubeconfig_mode }}
{{- end }}
{{- if .Values.cluster.config.globalConfig.use_service_account_credentials }}
use-service-account-credentials: {{ .Values.cluster.config.globalConfig.use_service_account_credentials }}
{{- end }}
{{- if .Values.cluster.config.globalConfig.protect_kernel_defaults }}
protect-kernel-defaults: {{ .Values.cluster.config.globalConfig.protect_kernel_defaults }}
{{- end }}
{{- if .Values.cluster.config.globalConfig.cloud_provider_name }}
cloud-provider-name: {{ .Values.cluster.config.globalConfig.cloud_provider_name }}
{{- end }}
{{- if .Values.cluster.config.globalConfig.cloud_provider_config }}
cloud-provider-config: {{ .Values.cluster.config.globalConfig.cloud_provider_config }}
{{- end }}
{{- if .Values.cluster.config.globalConfig.kube_controller_manager_arg }}
kube-controller-manager-arg: {{ .Values.cluster.config.globalConfig.kube_controller_manager_arg | toRawJson }}
{{- end }}
{{- if .Values.cluster.config.globalConfig.kube_scheduler_arg }}
kube-scheduler-arg: {{ .Values.cluster.config.globalConfig.kube_scheduler_arg | toRawJson }}
{{- end }}
{{- if .Values.cluster.config.globalConfig.kube_apiserver_arg }}
kube-apiserver-arg: {{ .Values.cluster.config.globalConfig.kube_apiserver_arg | toRawJson }}
{{- end }}
{{- if .Values.cluster.config.globalConfig.kubelet_proxy_arg }}
kubelet-proxy-arg: {{ .Values.cluster.config.globalConfig.kubelet_proxy_arg | toRawJson }}
{{- end }}
{{- if .Values.cluster.config.globalConfig.kubelet_arg }}
kubelet-arg: {{ .Values.cluster.config.globalConfig.kubelet_arg | toRawJson }}
{{- end }}
{{- end }}
# machinePoolDefaults:
{{- if ne .Values.cloudprovider "custom" }}
machinePools:
{{- if .Values.nodepools }} {{ range $index, $nodepool := .Values.nodepools }}
- name: {{ $nodepool.name }}
quantity: {{ $nodepool.quantity }}
controlPlaneRole: {{ $nodepool.controlplane }}
etcdRole: {{ $nodepool.etcd }}
workerRole: {{ $nodepool.worker }}
{{- if $nodepool.labels }}
labels:
{{ toYaml $nodepool.labels | indent 8 }}
{{- end }}
{{- if $nodepool.taints }}
taints:
{{ toYaml $nodepool.taints | indent 8 }}
{{- end }}
machineConfigRef:
{{- if eq $.Values.cloudprovider "amazonec2" }}
kind: Amazonec2Config
{{- else if eq $.Values.cloudprovider "vsphere" }}
kind: VmwarevsphereConfig
{{- else if eq $.Values.cloudprovider "harvester" }}
kind: HarvesterConfig
{{- else if eq $.Values.cloudprovider "digitalocean" }}
kind: DigitaloceanConfig
{{- else if eq $.Values.cloudprovider "azure" }}
kind: AzureConfig
{{- else if eq $.Values.cloudprovider "elemental" }}
apiVersion: elemental.cattle.io/v1beta1
kind: MachineInventorySelectorTemplate
{{- end}}
name: {{ $clustername }}-{{ $nodepool.name }}
displayName: {{ $nodepool.displayName | default $nodepool.name }}
{{- if $nodepool.drainBeforeDelete }}
drainBeforeDelete: {{ $nodepool.drainBeforeDelete }}
{{- end }}
{{- if $nodepool.drainBeforeDeleteTimeout }}
drainBeforeDeleteTimeout: {{ $nodepool.drainBeforeDeleteTimeout }}
{{- end }}
{{- if $nodepool.machineDeploymentLabels }}
machineDeploymentLabels:
{{ toYaml $nodepool.machineDeploymentLabels | indent 8 }}
{{- end }}
{{- if $nodepool.machineDeploymentAnnotations }}
machineDeploymentAnnotations:
{{ toYaml $nodepool.machineDeploymentAnnotations | indent 8 }}
{{- end }}
paused: {{ $nodepool.paused }}
{{- if $nodepool.rollingUpdate }}
rollingUpdate:
maxUnavailable: {{ $nodepool.rollingUpdate.maxUnavailable }}
maxSurge: {{ $nodepool.rollingUpdate.maxSurge }}
{{- end }}
{{- if $nodepool.unhealthyNodeTimeout }}
unhealthyNodeTimeout: {{ $nodepool.unhealthyNodeTimeout }}
{{- end }}
{{- end }}
{{- end }}
{{- if or .Values.cluster.config.controlPlaneConfig .Values.cluster.config.workerConfig}}
machineSelectorConfig:
{{- if .Values.cluster.config.controlPlaneConfig }}
- config:
{{- if .Values.cluster.config.controlPlaneConfig.cni }}
cni: {{ .Values.cluster.config.controlPlaneConfig.cni }}
{{- end }}
{{- if .Values.cluster.config.controlPlaneConfig.docker }}
docker: {{ .Values.cluster.config.controlPlaneConfig.docker }}
{{- end }}
{{- if .Values.cluster.config.globalConfig.disable }}
disable: {{ .Values.cluster.config.globalConfig.disable | toRawJson }}
{{- end }}
{{- if .Values.cluster.config.globalConfig.disable_scheduler }}
disable-scheduler: {{ .Values.cluster.config.globalConfig.disable_scheduler }}
{{- end }}
{{- if .Values.cluster.config.globalConfig.disable_cloud_controller }}
disable-cloud-controller: {{ .Values.cluster.config.globalConfig.disable_cloud_controller }}
{{- end }}
{{- if .Values.cluster.config.controlPlaneConfig.disable_kube_proxy }}
disable-kube-proxy: {{ .Values.cluster.config.controlPlaneConfig.disable_kube_proxy }}
{{- end }}
{{- if .Values.cluster.config.controlPlaneConfig.etcd_expose_metrics }}
etcd-expose-metrics: {{ .Values.cluster.config.controlPlaneConfig.etcd_expose_metrics }}
{{- end }}
{{- if .Values.cluster.config.controlPlaneConfig.profile }}
profile: {{ .Values.cluster.config.controlPlaneConfig.profile }}
{{- end }}
{{- if .Values.cluster.config.controlPlaneConfig.selinux }}
selinux: {{ .Values.cluster.config.controlPlaneConfig.selinux }}
{{- end }}
{{- if .Values.cluster.config.controlPlaneConfig.tls_san }}
tls-san: {{ .Values.cluster.config.controlPlaneConfig.tls_san | toRawJson }}
{{- end }}
{{- if .Values.cluster.config.controlPlaneConfig.token }}
token: {{ .Values.cluster.config.controlPlaneConfig.token }}
{{- end }}
{{- if .Values.cluster.config.controlPlaneConfig.systemDefaultRegistry }}
system-default-registry: {{ .Values.cluster.config.controlPlaneConfig.systemDefaultRegistry }}
{{- end }}
{{- if .Values.cluster.config.controlPlaneConfig.secrets_encryption }}
secrets-encryption: {{ .Values.cluster.config.controlPlaneConfig.secrets_encryption }}
{{- end }}
{{- if .Values.cluster.config.controlPlaneConfig.write_kubeconfig_mode }}
write-kubeconfig-mode: {{ .Values.cluster.config.controlPlaneConfig.write_kubeconfig_mode }}
{{- end }}
{{- if .Values.cluster.config.controlPlaneConfig.use_service_account_credentials }}
use-service-account-credentials: {{ .Values.cluster.config.controlPlaneConfig.use_service_account_credentials }}
{{- end }}
{{- if .Values.cluster.config.controlPlaneConfig.protect_kernel_defaults }}
protect-kernel-defaults: {{ .Values.cluster.config.controlPlaneConfig.protect_kernel_defaults }}
{{- end }}
{{- if .Values.cluster.config.controlPlaneConfig.cloud_provider_name }}
cloud-provider-name: {{ .Values.cluster.config.controlPlaneConfig.cloud_provider_name }}
{{- end }}
{{- if .Values.cluster.config.controlPlaneConfig.cloud_provider_config }}
cloud-provider-config: {{ .Values.cluster.config.controlPlaneConfig.cloud_provider_config }}
{{- end }}
{{- if .Values.cluster.config.controlPlaneConfig.kube_controller_manager_arg }}
kube-controller-manager-arg: {{ .Values.cluster.config.controlPlaneConfig.kube_controller_manager_arg | toRawJson }}
{{- end }}
{{- if .Values.cluster.config.controlPlaneConfig.kube_scheduler_arg }}
kube-scheduler-arg: {{ .Values.cluster.config.controlPlaneConfig.kube_scheduler_arg | toRawJson }}
{{- end }}
{{- if .Values.cluster.config.controlPlaneConfig.kube_apiserver_arg }}
kube-apiserver-arg: {{ .Values.cluster.config.controlPlaneConfig.kube_apiserver_arg | toRawJson }}
{{- end }}
{{- if .Values.cluster.config.controlPlaneConfig.kubelet_proxy_arg }}
kubelet-proxy-arg: {{ .Values.cluster.config.controlPlaneConfig.kubelet_proxy_arg | toRawJson }}
{{- end }}
{{- if .Values.cluster.config.controlPlaneConfig.kubelet_arg }}
kubelet-arg: {{ .Values.cluster.config.controlPlaneConfig.kubelet_arg | toRawJson }}
{{- end }}
machineLabelSelector:
matchLabels:
node-role.kubernetes.io/control-plane: "true"
{{- end }}
{{- if .Values.cluster.config.workerConfig }}
- config:
{{- if .Values.cluster.config.workerConfig.cni }}
cni: {{ .Values.cluster.config.workerConfig.cni }}
{{- end }}
{{- if .Values.cluster.config.workerConfig.docker }}
docker: {{ .Values.cluster.config.workerConfig.docker }}
{{- end }}
{{- if .Values.cluster.config.globalConfig.disable }}
disable: {{ .Values.cluster.config.globalConfig.disable | toRawJson }}
{{- end }}
{{- if .Values.cluster.config.globalConfig.disable_scheduler }}
disable-scheduler: {{ .Values.cluster.config.globalConfig.disable_scheduler }}
{{- end }}
{{- if .Values.cluster.config.globalConfig.disable_cloud_controller }}
disable-cloud-controller: {{ .Values.cluster.config.globalConfig.disable_cloud_controller }}
{{- end }}
{{- if .Values.cluster.config.workerConfig.disable_kube_proxy }}
disable-kube-proxy: {{ .Values.cluster.config.workerConfig.disable_kube_proxy }}
{{- end }}
{{- if .Values.cluster.config.workerConfig.etcd_expose_metrics }}
etcd-expose-metrics: {{ .Values.cluster.config.workerConfig.etcd_expose_metrics }}
{{- end }}
{{- if .Values.cluster.config.workerConfig.profile }}
profile: {{ .Values.cluster.config.workerConfig.profile }}
{{- end }}
{{- if .Values.cluster.config.workerConfig.selinux }}
selinux: {{ .Values.cluster.config.workerConfig.selinux }}
{{- end }}
{{- if .Values.cluster.config.workerConfig.tls_san }}
tls-san: {{ .Values.cluster.config.workerConfig.tls_san | toRawJson }}
{{- end }}
{{- if .Values.cluster.config.workerConfig.token }}
token: {{ .Values.cluster.config.workerConfig.token }}
{{- end }}
{{- if .Values.cluster.config.workerConfig.systemDefaultRegistry }}
system-default-registry: {{ .Values.cluster.config.workerConfig.systemDefaultRegistry }}
{{- end }}
{{- if .Values.cluster.config.workerConfig.secrets_encryption }}
secrets-encryption: {{ .Values.cluster.config.workerConfig.secrets_encryption }}
{{- end }}
{{- if .Values.cluster.config.workerConfig.write_kubeconfig_mode }}
write-kubeconfig-mode: {{ .Values.cluster.config.workerConfig.write_kubeconfig_mode }}
{{- end }}
{{- if .Values.cluster.config.workerConfig.use_service_account_credentials }}
use-service-account-credentials: {{ .Values.cluster.config.workerConfig.use_service_account_credentials }}
{{- end }}
{{- if .Values.cluster.config.workerConfig.protect_kernel_defaults }}
protect-kernel-defaults: {{ .Values.cluster.config.workerConfig.protect_kernel_defaults }}
{{- end }}
{{- if .Values.cluster.config.workerConfig.cloud_provider_name }}
cloud-provider-name: {{ .Values.cluster.config.workerConfig.cloud_provider_name }}
{{- end }}
{{- if .Values.cluster.config.workerConfig.cloud_provider_config }}
cloud-provider-config: {{ .Values.cluster.config.workerConfig.cloud_provider_config }}
{{- end }}
{{- if .Values.cluster.config.workerConfig.kube_controller_manager_arg }}
kube-controller-manager-arg: {{ .Values.cluster.config.workerConfig.kube_controller_manager_arg | toRawJson }}
{{- end }}
{{- if .Values.cluster.config.workerConfig.kube_scheduler_arg }}
kube-scheduler-arg: {{ .Values.cluster.config.workerConfig.kube_scheduler_arg | toRawJson }}
{{- end }}
{{- if .Values.cluster.config.workerConfig.kube_apiserver_arg }}
kube-apiserver-arg: {{ .Values.cluster.config.workerConfig.kube_apiserver_arg | toRawJson }}
{{- end }}
{{- if .Values.cluster.config.workerConfig.kubelet_proxy_arg }}
kubelet-proxy-arg: {{ .Values.cluster.config.workerConfig.kubelet_proxy_arg | toRawJson }}
{{- end }}
{{- if .Values.cluster.config.workerConfig.kubelet_arg }}
kubelet-arg: {{ .Values.cluster.config.workerConfig.kubelet_arg | toRawJson }}
{{- end }}
machineLabelSelector:
matchLabels:
rke.cattle.io/worker-role: "true"
{{- end }}
{{- end }}
{{- end }}
# machineSelectorFiles:
# provisionGeneration:
{{- if and .Values.cluster.config.registries (eq .Values.cluster.config.registries.enabled true) }}
registries:
configs:
{{- range .Values.cluster.config.registries.configs }}
{{ .name }}:
authConfigSecretName: {{ .authConfigSecretName }}
caBundle: {{ .caBundle }}
insecureSkipVerify: {{ .insecureSkipVerify }}
tlsSecretName: {{ .tlsSecretName }}
{{- end }}
{{- if .Values.cluster.config.registries.mirrors }}
mirrors:
{{- range .Values.cluster.config.registries.mirrors }}
{{ .name | quote }}:
endpoint:
{{- range .endpoints }}
- {{ . }}
{{- end }}
{{- if .rewrite }}
rewrite:
{{- range $key, $value := .rewrite }}
"{{ $key }}": "{{ $value }}"
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
# rotateCertificates:
# rotateEncryptionKeys:
{{- if .Values.cluster.config.upgradeStrategy }}
upgradeStrategy:
controlPlaneConcurrency: {{ .Values.cluster.config.upgradeStrategy.controlPlaneConcurrency }}
{{- if eq .Values.cluster.config.upgradeStrategy.controlPlaneDrainOptions.enabled true }}
controlPlaneDrainOptions:
enabled: {{ .Values.cluster.config.upgradeStrategy.controlPlaneDrainOptions.enabled }}
deleteEmptyDirData: {{ .Values.cluster.config.upgradeStrategy.controlPlaneDrainOptions.deleteEmptyDirData }}
disableEviction: {{ .Values.cluster.config.upgradeStrategy.controlPlaneDrainOptions.disableEviction }}
force: {{ .Values.cluster.config.upgradeStrategy.controlPlaneDrainOptions.force }}
gracePeriod: {{ .Values.cluster.config.upgradeStrategy.controlPlaneDrainOptions.gracePeriod }}
ignoreDaemonSets: {{ .Values.cluster.config.upgradeStrategy.controlPlaneDrainOptions.ignoreDaemonSets }}
ignoreErrors: {{ .Values.cluster.config.upgradeStrategy.controlPlaneDrainOptions.ignoreErrors }}
skipWaitForDeleteTimeoutSeconds: {{ .Values.cluster.config.upgradeStrategy.controlPlaneDrainOptions.skipWaitForDeleteTimeoutSeconds }}
timeout: {{ .Values.cluster.config.upgradeStrategy.controlPlaneDrainOptions.timeout }}
{{- else }}
controlPlaneDrainOptions:
enabled: {{ .Values.cluster.config.upgradeStrategy.controlPlaneDrainOptions.enabled }}
{{- end }}
workerConcurrency: {{ .Values.cluster.config.upgradeStrategy.workerConcurrency }}
{{- if eq .Values.cluster.config.upgradeStrategy.workerDrainOptions.enabled true }}
workerDrainOptions:
enabled: {{ .Values.cluster.config.upgradeStrategy.workerDrainOptions.enabled }}
deleteEmptyDirData: {{ .Values.cluster.config.upgradeStrategy.workerDrainOptions.deleteEmptyDirData }}
disableEviction: {{ .Values.cluster.config.upgradeStrategy.workerDrainOptions.disableEviction }}
force: {{ .Values.cluster.config.upgradeStrategy.workerDrainOptions.force }}
gracePeriod: {{ .Values.cluster.config.upgradeStrategy.workerDrainOptions.gracePeriod }}
ignoreDaemonSets: {{ .Values.cluster.config.upgradeStrategy.workerDrainOptions.ignoreDaemonSets }}
ignoreErrors: {{ .Values.cluster.config.upgradeStrategy.workerDrainOptions.ignoreErrors }}
skipWaitForDeleteTimeoutSeconds: {{ .Values.cluster.config.upgradeStrategy.workerDrainOptions.skipWaitForDeleteTimeoutSeconds }}
timeout: {{ .Values.cluster.config.upgradeStrategy.workerDrainOptions.timeout }}
{{- else }}
workerDrainOptions:
enabled: {{ .Values.cluster.config.upgradeStrategy.workerDrainOptions.enabled }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,11 @@
{{ $root := . }}
{{- range $index, $member := .Values.clusterMembers }}
apiVersion: management.cattle.io/v3
clusterName: c-m-{{ trunc 8 (sha256sum (printf "%s/%s" $root.Release.Namespace $root.Values.cluster.name)) }}
kind: ClusterRoleTemplateBinding
metadata:
name: ctrb-{{ trunc 8 (sha256sum (printf "%s/%s" $root.Release.Namespace $member.principalName )) }}
namespace: c-m-{{ trunc 8 (sha256sum (printf "%s/%s" $root.Release.Namespace $root.Values.cluster.name)) }}
roleTemplateName: {{ $member.roleTemplateName }}
userPrincipalName: {{ $member.principalName }}
{{- end }}

View File

@@ -0,0 +1,33 @@
{{- $clustername := .Values.cluster.name -}}
{{- range .Values.nodepools }}
{{- if eq .controlplane true }}
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: MachineHealthCheck
metadata:
name: {{ $clustername }}-controlplane-healthcheck
namespace: fleet-default
spec:
clusterName: {{ $clustername }}
selector:
matchLabels:
cluster.x-k8s.io/control-plane: 'true'
cluster.x-k8s.io/cluster-name: {{ $clustername }}
# SAFETY FUSE:
# "40%" prevents a 1-node CP from trying to self-heal (which would kill it).
# If you have 3 nodes, this allows 1 to fail.
maxUnhealthy: 40%
# TIMEOUTS (v1beta1 uses duration strings like "10m", not integers)
nodeStartupTimeout: 600s
unhealthyConditions:
- type: Ready
status: Unknown
timeout: 300s
- type: Ready
status: "False"
timeout: 300s
{{- end }}
{{- end }}

View File

@@ -0,0 +1,25 @@
{{- $clustername := .Values.cluster.name -}}
{{- range .Values.nodepools }}
{{- if eq .worker true }}
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: MachineHealthCheck
metadata:
name: {{ $clustername }}-worker-healthcheck
namespace: fleet-default
spec:
clusterName: {{ $clustername }}
selector:
matchLabels:
rke.cattle.io/worker-role: "true"
# USE $ HERE TOO
cluster.x-k8s.io/cluster-name: {{ $clustername }}
maxUnhealthy: 100%
nodeStartupTimeout: 10m
unhealthyConditions:
- type: Ready
status: "False"
timeout: 300s
{{- end }}
{{- end }}

View File

@@ -0,0 +1,201 @@
{{- if .Values.addons.monitoring }}
{{- if .Values.addons.monitoring.enabled }}
apiVersion: management.cattle.io/v3
kind: ManagedChart
metadata:
name: monitoring-crd-{{ .Values.cluster.name }}
namespace: fleet-default
spec:
chart: "rancher-monitoring-crd"
repoName: "rancher-charts"
releaseName: "rancher-monitoring-crd"
version: {{ .Values.addons.monitoring.version }}
{{- if .Values.addons.monitoring.values }}
values:
{{ toYaml .Values.addons.monitoring.values | indent 4 }}
{{- end }}
defaultNamespace: "cattle-monitoring-system"
targets:
- clusterName: {{ .Values.cluster.name }}
---
apiVersion: management.cattle.io/v3
kind: ManagedChart
metadata:
name: monitoring-{{ .Values.cluster.name }}
namespace: fleet-default
spec:
chart: "rancher-monitoring"
repoName: "rancher-charts"
releaseName: "rancher-monitoring"
version: {{ .Values.addons.monitoring.version }}
{{- if .Values.addons.monitoring.values }}
values:
{{ toYaml .Values.addons.monitoring.values | indent 4 }}
{{- end }}
defaultNamespace: "cattle-monitoring-system"
targets:
- clusterName: {{ .Values.cluster.name }}
diff:
comparePatches:
- apiVersion: admissionregistration.k8s.io/v1beta1
kind: MutatingWebhookConfiguration
name: rancher-monitoring-admission
jsonPointers:
- /webhooks/0/failurePolicy
- apiVersion: admissionregistration.k8s.io/v1beta1
kind: ValidatingWebhookConfiguration
name: rancher-monitoring-admission
jsonPointers:
- /webhooks/0/failurePolicy
- apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
name: rancher-monitoring-kubelet
namespace: kube-system
jsonPointers:
- /spec/endpoints
---
{{- end }}
{{- end }}
{{- if .Values.addons.logging }}
{{- if .Values.addons.logging.enabled }}
apiVersion: management.cattle.io/v3
kind: ManagedChart
metadata:
name: logging-crd-{{ .Values.cluster.name }}
namespace: fleet-default
spec:
chart: "rancher-logging-crd"
repoName: "rancher-charts"
releaseName: "rancher-logging-crd"
version: {{ .Values.addons.logging.version }}
{{- if .Values.addons.logging.values }}
values:
{{ toYaml .Values.addons.logging.values | indent 4 }}
{{- end }}
defaultNamespace: "cattle-logging-system"
targets:
- clusterName: {{ .Values.cluster.name }}
---
apiVersion: management.cattle.io/v3
kind: ManagedChart
metadata:
name: logging-{{ .Values.cluster.name }}
namespace: fleet-default
spec:
chart: "rancher-logging"
repoName: "rancher-charts"
releaseName: "rancher-logging"
version: {{ .Values.addons.logging.version }}
{{- if .Values.addons.logging.values }}
values:
{{ toYaml .Values.addons.logging.values | indent 4 }}
{{- end }}
defaultNamespace: "cattle-logging-system"
targets:
- clusterName: {{ .Values.cluster.name }}
---
{{- end }}
{{- end }}
{{- if .Values.addons.longhorn }}
{{- if .Values.addons.longhorn.enabled }}
apiVersion: management.cattle.io/v3
kind: ManagedChart
metadata:
name: longhorn-crd-{{ .Values.cluster.name }}
namespace: fleet-default
spec:
chart: "longhorn-crd"
repoName: "rancher-charts"
releaseName: "longhorn-crd"
version: {{ .Values.addons.longhorn.version }}
{{- if .Values.addons.longhorn.values }}
values:
{{ toYaml .Values.addons.longhorn.values | indent 4 }}
{{- end }}
defaultNamespace: "longhorn-system"
targets:
- clusterName: {{ .Values.cluster.name }}
diff:
comparePatches:
- apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
name: engineimages.longhorn.io
jsonPointers:
- /status/acceptedNames
- /status/conditions
- /status/storedVersions
- apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
name: nodes.longhorn.io
jsonPointers:
- /status/acceptedNames
- /status/conditions
- /status/storedVersions
- apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
name: volumes.longhorn.io
jsonPointers:
- /status/acceptedNames
- /status/conditions
- /status/storedVersions
---
apiVersion: management.cattle.io/v3
kind: ManagedChart
metadata:
name: longhorn-{{ .Values.cluster.name }}
namespace: fleet-default
spec:
chart: "longhorn"
repoName: "rancher-charts"
releaseName: "longhorn"
version: {{ .Values.addons.longhorn.version }}
{{- if .Values.addons.longhorn.values }}
values:
{{ toYaml .Values.addons.longhorn.values | indent 4 }}
{{- end }}
defaultNamespace: "longhorn-system"
targets:
- clusterName: {{ .Values.cluster.name }}
---
{{- end }}
{{- end }}
{{- if .Values.addons.neuvector }}
{{- if .Values.addons.neuvector.enabled }}
apiVersion: management.cattle.io/v3
kind: ManagedChart
metadata:
name: neuvector-crd-{{ .Values.cluster.name }}
namespace: fleet-default
spec:
chart: "neuvector-crd"
repoName: "rancher-charts"
releaseName: "neuvector-crd"
version: {{ .Values.addons.neuvector.version }}
{{- if .Values.addons.neuvector.values }}
values:
{{ toYaml .Values.addons.neuvector.values | indent 4 }}
{{- end }}
defaultNamespace: "cattle-neuvector-system"
targets:
- clusterName: {{ .Values.cluster.name }}
---
apiVersion: management.cattle.io/v3
kind: ManagedChart
metadata:
name: neuvector-{{ .Values.cluster.name }}
namespace: fleet-default
spec:
chart: "neuvector"
repoName: "rancher-charts"
releaseName: "neuvector"
version: {{ .Values.addons.neuvector.version }}
{{- if .Values.addons.neuvector.values }}
values:
{{ toYaml .Values.addons.neuvector.values | indent 4 }}
{{- end }}
defaultNamespace: "cattle-neuvector-system"
targets:
- clusterName: {{ .Values.cluster.name }}
---
{{- end }}
{{- end }}

View File

@@ -0,0 +1,251 @@
{{- $clustername := .Values.cluster.name -}}
{{- if eq .Values.cloudprovider "amazonec2" }}
{{- range $index, $nodepool := .Values.nodepools }}
apiVersion: rke-machine-config.cattle.io/v1
kind: Amazonec2Config
metadata:
name: {{ $clustername }}-{{ $nodepool.name }}
namespace: fleet-default
{{- if $nodepool.accessKey }}
accessKey: {{ $nodepool.accessKey }}
{{- end }}
{{- if $nodepool.ami }}
ami: {{ $nodepool.ami }}
{{- end }}
{{- if $nodepool.blockDurationMinutes }}
blockDurationMinutes: {{ $nodepool.blockDurationMinutes }}
{{- end }}
{{- if $nodepool.deviceName }}
deviceName: {{ $nodepool.deviceName }}
{{- end }}
{{- if $nodepool.encryptEbsVolume }}
encryptEbsVolume: {{ $nodepool.encryptEbsVolume }}
{{- end }}
{{- if $nodepool.endpoint }}
endpoint: {{ $nodepool.endpoint }}
{{- end }}
{{- if $nodepool.httpEndpoint }}
httpEndpoint: {{ $nodepool.httpEndpoint }}
{{- end }}
{{- if $nodepool.httpTokens }}
httpTokens: {{ $nodepool.httpTokens }}
{{- end }}
{{- if $nodepool.iamInstanceProfile }}
iamInstanceProfile: {{ $nodepool.iamInstanceProfile }}
{{- end }}
{{- if $nodepool.insecureTransport }}
insecureTransport: {{ $nodepool.insecureTransport }}
{{- end }}
{{- if $nodepool.instanceType }}
instanceType: {{ $nodepool.instanceType }}
{{- end }}
{{- if $nodepool.keypairName }}
keypairName: {{ $nodepool.keypairName }}
{{- end }}
{{- if $nodepool.kmsKey }}
kmsKey: {{ $nodepool.kmsKey }}
{{- end }}
{{- if $nodepool.monitoring }}
monitoring: {{ $nodepool.monitoring }}
{{- end }}
{{- if $nodepool.openPort}}
openPort:
{{- range $i, $port := $nodepool.openPort }}
- {{ $port | squote }}
{{- end }}
{{- end }}
{{- if $nodepool.privateAddressOnly }}
privateAddressOnly: {{ $nodepool.privateAddressOnly }}
{{- end }}
{{- if $nodepool.region }}
region: {{ $nodepool.region }}
{{- end }}
{{- if $nodepool.requestSpotInstance }}
requestSpotInstance: {{ $nodepool.requestSpotInstance }}
{{- end }}
{{- if $nodepool.retries }}
retries: {{ $nodepool.retries | squote }}
{{- end }}
{{- if $nodepool.rootSize }}
rootSize: {{ $nodepool.rootSize | squote }}
{{- end }}
{{- if $nodepool.secretKey }}
secretKey: {{ $nodepool.secretKey }}
{{- end }}
securityGroup:
{{- if $nodepool.createSecurityGroup }}
- rancher-nodes
{{- else }}
{{ toYaml $nodepool.securityGroups }}
{{- end }}
{{- if $nodepool.securityGroupReadonly }}
securityGroupReadonly: {{ $nodepool.securityGroupReadonly }}
{{- end }}
{{- if $nodepool.sessionToken }}
sessionToken: {{ $nodepool.sessionToken }}
{{- end }}
{{- if $nodepool.spotPrice }}
spotPrice: {{ $nodepool.spotPrice }}
{{- end }}
{{- if $nodepool.sshKeyContents }}
sshKeyContents: {{ $nodepool.sshKeyContents }}
{{- end }}
{{- if $nodepool.sshUser }}
sshUser: {{ $nodepool.sshUser }}
{{- end }}
{{- if $nodepool.subnetId }}
subnetId: {{ $nodepool.subnetId }}
{{- end }}
{{- if $nodepool.tags }}
tags: {{ $nodepool.tags }}
{{- end }}
{{- if $nodepool.useEbsOptimizedInstance }}
useEbsOptimizedInstance: {{ $nodepool.useEbsOptimizedInstance }}
{{- end }}
{{- if $nodepool.usePrivateAddress }}
usePrivateAddress: {{ $nodepool.usePrivateAddress }}
{{- end }}
{{- if $nodepool.userData }}
userdata: {{- $nodepool.userData | toYaml | indent 1 }}
{{- end }}
{{- if $nodepool.volumeType }}
volumeType: {{ $nodepool.volumeType }}
{{- end }}
{{- if $nodepool.vpcId }}
vpcId: {{ $nodepool.vpcId }}
{{- end }}
{{- if $nodepool.zone }}
zone: {{ $nodepool.zone }}
{{- end }}
---
{{- end }}
{{ $nodepool := .Values.nodepool }}
{{- if $nodepool }}
apiVersion: rke-machine-config.cattle.io/v1
kind: Amazonec2Config
metadata:
name: {{ $clustername }}-{{ $nodepool.name }}
namespace: fleet-default
common:
{{- if $nodepool.labels }}
labels:
{{ toYaml $nodepool.labels | indent 4 }}
{{- end }}
{{- if $nodepool.taints }}
taints:
{{ toYaml $nodepool.taints | indent 4 }}
{{- end }}
{{- if $nodepool.accessKey }}
accessKey: {{ $nodepool.accessKey }}
{{- end }}
{{- if $nodepool.ami }}
ami: {{ $nodepool.ami }}
{{- end }}
{{- if $nodepool.blockDurationMinutes }}
blockDurationMinutes: {{ $nodepool.blockDurationMinutes }}
{{- end }}
{{- if $nodepool.deviceName }}
deviceName: {{ $nodepool.deviceName }}
{{- end }}
{{- if $nodepool.encryptEbsVolume }}
encryptEbsVolume: {{ $nodepool.encryptEbsVolume }}
{{- end }}
{{- if $nodepool.endpoint }}
endpoint: {{ $nodepool.endpoint }}
{{- end }}
{{- if $nodepool.httpEndpoint }}
httpEndpoint: {{ $nodepool.httpEndpoint }}
{{- end }}
{{- if $nodepool.httpTokens }}
httpTokens: {{ $nodepool.httpTokens }}
{{- end }}
{{- if $nodepool.iamInstanceProfile }}
iamInstanceProfile: {{ $nodepool.iamInstanceProfile }}
{{- end }}
{{- if $nodepool.insecureTransport }}
insecureTransport: {{ $nodepool.insecureTransport }}
{{- end }}
{{- if $nodepool.instanceType }}
instanceType: {{ $nodepool.instanceType }}
{{- end }}
{{- if $nodepool.keypairName }}
keypairName: {{ $nodepool.keypairName }}
{{- end }}
{{- if $nodepool.kmsKey }}
kmsKey: {{ $nodepool.kmsKey }}
{{- end }}
{{- if $nodepool.monitoring }}
monitoring: {{ $nodepool.monitoring }}
{{- end }}
{{- if $nodepool.openPort}}
openPort:
{{- range $i, $port := $nodepool.openPort }}
- {{ $port | squote }}
{{- end }}
{{- end }}
{{- if $nodepool.privateAddressOnly }}
privateAddressOnly: {{ $nodepool.privateAddressOnly }}
{{- end }}
{{- if $nodepool.region }}
region: {{ $nodepool.region }}
{{- end }}
{{- if $nodepool.requestSpotInstance }}
requestSpotInstance: {{ $nodepool.requestSpotInstance }}
{{- end }}
{{- if $nodepool.retries }}
retries: {{ $nodepool.retries | squote }}
{{- end }}
{{- if $nodepool.rootSize }}
rootSize: {{ $nodepool.rootSize | squote }}
{{- end }}
{{- if $nodepool.secretKey }}
secretKey: {{ $nodepool.secretKey }}
{{- end }}
{{- if $nodepool.createSecurityGroup }}
securityGroup:
- rancher-nodes
{{- else if $nodepool.securityGroups }}
securityGroup:
{{ toYaml $nodepool.securityGroups }}
{{- end }}
{{- if $nodepool.securityGroupReadonly }}
securityGroupReadonly: {{ $nodepool.securityGroupReadonly }}
{{- end }}
{{- if $nodepool.sessionToken }}
sessionToken: {{ $nodepool.sessionToken }}
{{- end }}
{{- if $nodepool.spotPrice }}
spotPrice: {{ $nodepool.spotPrice }}
{{- end }}
{{- if $nodepool.sshKeyContents }}
sshKeyContents: {{ $nodepool.sshKeyContents }}
{{- end }}
{{- if $nodepool.sshUser }}
sshUser: {{ $nodepool.sshUser }}
{{- end }}
{{- if $nodepool.subnetId }}
subnetId: {{ $nodepool.subnetId }}
{{- end }}
{{- if $nodepool.tags }}
tags: {{ $nodepool.tags }}
{{- end }}
{{- if $nodepool.useEbsOptimizedInstance }}
useEbsOptimizedInstance: {{ $nodepool.useEbsOptimizedInstance }}
{{- end }}
{{- if $nodepool.usePrivateAddress }}
usePrivateAddress: {{ $nodepool.usePrivateAddress }}
{{- end }}
{{- if $nodepool.userData }}
userdata: {{- $nodepool.userData | toYaml | indent 1 }}
{{- end }}
{{- if $nodepool.volumeType }}
volumeType: {{ $nodepool.volumeType }}
{{- end }}
{{- if $nodepool.vpcId }}
vpcId: {{ $nodepool.vpcId }}
{{- end }}
{{- if $nodepool.zone }}
zone: {{ $nodepool.zone }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,95 @@
{{- $clustername := .Values.cluster.name -}}
{{- if eq .Values.cloudprovider "azure" }}
{{- range $index, $nodepool := .Values.nodepools }}
apiVersion: rke-machine-config.cattle.io/v1
kind: AzureConfig
metadata:
name: {{ $clustername }}-{{ $nodepool.name }}
namespace: fleet-default
common:
{{- if $nodepool.labels }}
labels:
{{ toYaml $nodepool.labels | indent 4 }}
{{- end }}
{{- if $nodepool.taints }}
taints:
{{ toYaml $nodepool.taints | indent 4 }}
{{- end }}
availabilitySet: {{ $nodepool.availabilitySet }}
clientId: {{ $nodepool.clientId }}
customData: {{ $nodepool.customData }}
diskSize: {{ $nodepool.diskSize }}
dns: {{ $nodepool.dns }}
environment: {{ $nodepool.environment }}
faultDomainCount: {{ $nodepool.faultDomainCount }}
image: {{ $nodepool.image }}
location: {{ $nodepool.location }}
managedDisks: {{ $nodepool.managedDisks }}
noPublicIp: {{ $nodepool.noPublicIp }}
{{- if $nodepool.openPort}}
openPort:
{{- range $i, $port := $nodepool.openPort }}
- {{ $port }}
{{- end }}
{{- end }}
privateIpAddress: {{ $nodepool.privateIpAddress }}
resourceGroup: {{ $nodepool.resourceGroup }}
size: {{ $nodepool.size }}
sshUser: {{ $nodepool.sshUser }}
staticPublicIp: {{ $nodepool.staticPublicIp }}
storageType: {{ $nodepool.storageType }}
subnet: {{ $nodepool.subnet }}
subnetPrefix: {{ $nodepool.subnetPrefix }}
subscriptionId: {{ $nodepool.subscriptionId }}
updateDomainCount: {{ $nodepool.updateDomainCount }}
usePrivateIp: {{ $nodepool.usePrivateIp }}
vnet: {{ $nodepool.vnet }}
---
{{- end }}
{{ $nodepool := .Values.nodepool }}
{{- if $nodepool }}
apiVersion: rke-machine-config.cattle.io/v1
kind: AzureConfig
metadata:
name: {{ $clustername }}-{{ $nodepool.name }}
namespace: fleet-default
common:
{{- if $nodepool.labels }}
labels:
{{ toYaml $nodepool.labels | indent 4 }}
{{- end }}
{{- if $nodepool.taints }}
taints:
{{ toYaml $nodepool.taints | indent 4 }}
{{- end }}
availabilitySet: {{ $nodepool.availabilitySet }}
clientId: {{ $nodepool.clientId }}
customData: {{ $nodepool.customData }}
diskSize: {{ $nodepool.diskSize }}
dns: {{ $nodepool.dns }}
environment: {{ $nodepool.environment }}
faultDomainCount: {{ $nodepool.faultDomainCount }}
image: {{ $nodepool.image }}
location: {{ $nodepool.location }}
managedDisks: {{ $nodepool.managedDisks }}
noPublicIp: {{ $nodepool.noPublicIp }}
{{- if $nodepool.openPort}}
openPort:
{{- range $i, $port := $nodepool.openPort }}
- {{ $port }}
{{- end }}
{{- end }}
privateIpAddress: {{ $nodepool.privateIpAddress }}
resourceGroup: {{ $nodepool.resourceGroup }}
size: {{ $nodepool.size }}
sshUser: {{ $nodepool.sshUser }}
staticPublicIp: {{ $nodepool.staticPublicIp }}
storageType: {{ $nodepool.storageType }}
subnet: {{ $nodepool.subnet }}
subnetPrefix: {{ $nodepool.subnetPrefix }}
subscriptionId: {{ $nodepool.subscriptionId }}
updateDomainCount: {{ $nodepool.updateDomainCount }}
usePrivateIp: {{ $nodepool.usePrivateIp }}
vnet: {{ $nodepool.vnet }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,103 @@
{{- $clustername := .Values.cluster.name -}}
{{- if eq .Values.cloudprovider "digitalocean" }}
{{- range $index, $nodepool := .Values.nodepools }}
apiVersion: rke-machine-config.cattle.io/v1
kind: DigitaloceanConfig
metadata:
name: {{ $clustername }}-{{ $nodepool.name }}
namespace: fleet-default
{{- if $nodepool.accessToken }}
accessToken: {{ $nodepool.accessToken }}
{{- end }}
{{- if $nodepool.backups }}
backups: {{ $nodepool.backups }}
{{- end }}
{{- if $nodepool.image }}
image: {{ $nodepool.image }}
{{- end }}
{{- if $nodepool.ipv6 }}
ipv6: {{ $nodepool.ipv6 }}
{{- end }}
{{- if $nodepool.monitoring }}
monitoring: {{ $nodepool.monitoring }}
{{- end }}
{{- if $nodepool.privateNetworking }}
privateNetworking: {{ $nodepool.privateNetworking }}
{{- end }}
{{- if $nodepool.region }}
region: {{ $nodepool.region }}
{{- end }}
{{- if $nodepool.size }}
size: {{ $nodepool.size }}
{{- end }}
{{- if $nodepool.sshKeyContents }}
sshKeyContents: {{ $nodepool.sshKeyContents }}
{{- end }}
{{- if $nodepool.sshKeyFingerprint }}
sshKeyFingerprint: {{ $nodepool.sshKeyFingerprint }}
{{- end }}
{{- if $nodepool.sshPort }}
sshPort: {{ $nodepool.sshPort | squote }}
{{- end }}
{{- if $nodepool.sshUser }}
sshUser: {{ $nodepool.sshUser }}
{{- end }}
{{- if $nodepool.tags }}
tags: {{ $nodepool.tags }}
{{- end }}
{{- if $nodepool.userData }}
userdata: {{- $nodepool.userData | toYaml | indent 1 }}
{{- end }}
---
{{- end }}
{{ $nodepool := .Values.nodepool }}
{{- if $nodepool }}
apiVersion: rke-machine-config.cattle.io/v1
kind: DigitaloceanConfig
metadata:
name: {{ $clustername }}-{{ $nodepool.name }}
namespace: fleet-default
{{- if $nodepool.accessToken }}
accessToken: {{ $nodepool.accessToken }}
{{- end }}
{{- if $nodepool.backups }}
backups: {{ $nodepool.backups }}
{{- end }}
{{- if $nodepool.image }}
image: {{ $nodepool.image }}
{{- end }}
{{- if $nodepool.ipv6 }}
ipv6: {{ $nodepool.ipv6 }}
{{- end }}
{{- if $nodepool.monitoring }}
monitoring: {{ $nodepool.monitoring }}
{{- end }}
{{- if $nodepool.privateNetworking }}
privateNetworking: {{ $nodepool.privateNetworking }}
{{- end }}
{{- if $nodepool.region }}
region: {{ $nodepool.region }}
{{- end }}
{{- if $nodepool.size }}
size: {{ $nodepool.size }}
{{- end }}
{{- if $nodepool.sshKeyContents }}
sshKeyContents: {{ $nodepool.sshKeyContents }}
{{- end }}
{{- if $nodepool.sshKeyFingerprint }}
sshKeyFingerprint: {{ $nodepool.sshKeyFingerprint }}
{{- end }}
{{- if $nodepool.sshPort }}
sshPort: {{ $nodepool.sshPort | squote }}
{{- end }}
{{- if $nodepool.sshUser }}
sshUser: {{ $nodepool.sshUser }}
{{- end }}
{{- if $nodepool.tags }}
tags: {{ $nodepool.tags }}
{{- end }}
{{- if $nodepool.userData }}
userdata: {{- $nodepool.userData | toYaml | indent 1 }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,15 @@
{{- $clustername := .Values.cluster.name -}}
{{- if eq .Values.cloudprovider "elemental" }}
{{- range $index, $nodepool := .Values.nodepools }}
apiVersion: elemental.cattle.io/v1beta1
kind: MachineInventorySelectorTemplate
metadata:
name: {{ $clustername }}-{{ $nodepool.name }}
namespace: fleet-default
spec:
template:
spec:
selector:
{{- toYaml $nodepool.selector | nindent 8 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,166 @@
{{- $clustername := .Values.cluster.name -}}
{{- if eq .Values.cloudprovider "harvester" }}
{{- range $index, $nodepool := .Values.nodepools }}
apiVersion: rke-machine-config.cattle.io/v1
kind: HarvesterConfig
metadata:
name: {{ $clustername }}-{{ $nodepool.name }}
namespace: fleet-default
{{- if $nodepool.cloudConfig }}
cloudConfig: {{$nodepool.cloudconfig }}
{{- end }}
{{- if $nodepool.clusterId }}
clusterId: {{ $nodepool.clusterId }}
{{- end }}
{{- if $nodepool.clusterType }}
clusterType: {{ $nodepool.clusterType }}
{{- end }}
{{- if $nodepool.cpuCount }}
cpuCount: {{ $nodepool.cpuCount | squote }}
{{- end }}
{{- if $nodepool.diskBus }}
diskBus: {{ $nodepool.diskBus }}
{{- end }}
{{- if $nodepool.diskInfo }}
diskInfo: {{ $nodepool.diskInfo }}
{{- end }}
{{- if $nodepool.diskSize }}
diskSize: {{ $nodepool.diskSize | squote }}
{{- end }}
{{- if $nodepool.imageName }}
imageName: {{ $nodepool.imageName }}
{{- end }}
{{- if $nodepool.keyPairName }}
keyPairName: {{ $nodepool.keyPairName }}
{{- end }}
{{- if $nodepool.kubeconfigContent }}
kubeconfigContent: {{- $nodepool.kubeconfigContent | toYaml }}
{{- end }}
{{- if $nodepool.memorySize }}
memorySize: {{ $nodepool.memorySize | squote }}
{{- end }}
{{- if $nodepool.networkData }}
networkData: {{- $nodepool.networkData | toYaml | indent 1 }}
{{- end }}
{{- if $nodepool.networkInfo }}
networkInfo: {{ $nodepool.networkInfo }}
{{- end }}
{{- if $nodepool.networkModel }}
networkModel: {{ $nodepool.networkModel }}
{{- end }}
{{- if $nodepool.networkName }}
networkName: {{ $nodepool.networkName }}
{{- end }}
{{- if $nodepool.networkType }}
networkType: {{ $nodepool.networkType }}
{{- end }}
{{- if $nodepool.sshPassword }}
sshPassword: {{ $nodepool.sshPassword }}
{{- end }}
{{- if $nodepool.sshPort }}
sshPort: {{ $nodepool.sshPort | squote }}
{{- end }}
{{- if $nodepool.sshPrivateKeyPath }}
sshPrivateKeyPath: {{ $nodepool.sshPrivateKeyPath }}
{{- end }}
{{- if $nodepool.sshUser }}
sshUser: {{ $nodepool.sshUser }}
{{- end }}
{{- if $nodepool.userData }}
userData: {{ $nodepool.userData | toYaml }}
{{- end }}
{{- if $nodepool.vmAffinity }}
vmAffinity: {{ $nodepool.vmAffinity}}
{{- end }}
{{- if $nodepool.vmNamespace }}
vmNamespace: {{ $nodepool.vmNamespace }}
{{- end }}
---
{{- end }}
{{ $nodepool := .Values.nodepool }}
{{- if $nodepool }}
apiVersion: rke-machine-config.cattle.io/v1
kind: HarvesterConfig
metadata:
name: {{ $clustername }}-{{ $nodepool.name }}
namespace: fleet-default
common:
{{- if $nodepool.labels }}
labels:
{{ toYaml $nodepool.labels | indent 4 }}
{{- end }}
{{- if $nodepool.taints }}
taints:
{{ toYaml $nodepool.taints | indent 4 }}
{{- end }}
{{- if $nodepool.cloudConfig }}
cloudConfig: {{$nodepool.cloudconfig }}
{{- end }}
{{- if $nodepool.clusterId }}
clusterId: {{ $nodepool.clusterId }}
{{- end }}
{{- if $nodepool.clusterType }}
clusterType: {{ $nodepool.clusterType }}
{{- end }}
{{- if $nodepool.cpuCount }}
cpuCount: {{ $nodepool.cpuCount | squote }}
{{- end }}
{{- if $nodepool.diskBus }}
diskBus: {{ $nodepool.diskBus }}
{{- end }}
{{- if $nodepool.diskInfo }}
diskInfo: {{ $nodepool.diskInfo }}
{{- end }}
{{- if $nodepool.diskSize }}
diskSize: {{ $nodepool.diskSize | squote }}
{{- end }}
{{- if $nodepool.imageName }}
imageName: {{ $nodepool.imageName }}
{{- end }}
{{- if $nodepool.keyPairName }}
keyPairName: {{ $nodepool.keyPairName }}
{{- end }}
{{- if $nodepool.kubeconfigContent }}
kubeconfigContent: {{- $nodepool.kubeconfigContent | toYaml }}
{{- end }}
{{- if $nodepool.memorySize }}
memorySize: {{ $nodepool.memorySize | squote }}
{{- end }}
{{- if $nodepool.networkData }}
networkData: {{- $nodepool.networkData | toYaml | indent 1 }}
{{- end }}
{{- if $nodepool.networkInfo }}
networkInfo: {{ $nodepool.networkInfo }}
{{- end }}
{{- if $nodepool.networkModel }}
networkModel: {{ $nodepool.networkModel }}
{{- end }}
{{- if $nodepool.networkName }}
networkName: {{ $nodepool.networkName }}
{{- end }}
{{- if $nodepool.networkType }}
networkType: {{ $nodepool.networkType }}
{{- end }}
{{- if $nodepool.sshPassword }}
sshPassword: {{ $nodepool.sshPassword }}
{{- end }}
{{- if $nodepool.sshPort }}
sshPort: {{ $nodepool.sshPort | squote }}
{{- end }}
{{- if $nodepool.sshPrivateKeyPath }}
sshPrivateKeyPath: {{ $nodepool.sshPrivateKeyPath }}
{{- end }}
{{- if $nodepool.sshUser }}
sshUser: {{ $nodepool.sshUser }}
{{- end }}
{{- if $nodepool.userData }}
userData: {{ $nodepool.userData | toYaml }}
{{- end }}
{{- if $nodepool.vmAffinity }}
vmAffinity: {{ $nodepool.vmAffinity }}
{{- end }}
{{- if $nodepool.vmNamespace }}
vmNamespace: {{ $nodepool.vmNamespace }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,97 @@
{{- $clustername := .Values.cluster.name -}}
{{- if eq .Values.cloudprovider "vsphere" }}
{{- range $index, $nodepool := .Values.nodepools }}
apiVersion: rke-machine-config.cattle.io/v1
kind: VmwarevsphereConfig
metadata:
name: {{ $clustername }}-{{ $nodepool.name }}
namespace: fleet-default
common:
{{- if $nodepool.labels }}
labels:
{{ toYaml $nodepool.labels | indent 4 }}
{{- end }}
{{- if $nodepool.taints }}
taints:
{{ toYaml $nodepool.taints | indent 4 }}
{{- end }}
{{- if $nodepool.cfgparam }}
cfgparam: {{ $nodepool.cfgparam }}
{{- end }}
cloneFrom: {{ $nodepool.cloneFrom }}
cloudConfig: |-
{{ $nodepool.cloudConfig | indent 2 }}
cloudinit: {{ $nodepool.cloudinit }}
contentLibrary: {{ $nodepool.contentLibrary }}
cpuCount: {{ $nodepool.cpuCount | squote }}
creationType: {{ $nodepool.creationType }}
customAttribute: {{ $nodepool.customAttribute }}
datacenter: {{ $nodepool.datacenter }}
datastore: {{ $nodepool.datastore }}
datastoreCluster: {{ $nodepool.datastoreCluster }}
diskSize: {{ $nodepool.diskSize | squote }}
folder: {{ $nodepool.folder }}
hostsystem: {{ $nodepool.hostsystem }}
memorySize: {{ $nodepool.memorySize | squote }}
network: {{ $nodepool.network }}
pool: {{ $nodepool.pool }}
sshPort: {{ $nodepool.sshPort | squote }}
sshUser: {{ $nodepool.sshUser }}
sshUserGroup: {{ $nodepool.sshUserGroup }}
tag: {{ $nodepool.tag }}
vappIpallocationpolicy: {{ $nodepool.vappIpallocationpolicy }}
vappIpprotocol: {{ $nodepool.vappIpprotocol }}
vappProperty: {{ $nodepool.vappProperty }}
vappTransport: {{ $nodepool.vappTransport }}
vcenter: {{ $nodepool.vcenter }}
vcenterPort: {{ $nodepool.vcenterPort | squote }}
---
{{- end }}
{{ $nodepool := .Values.nodepool }}
{{- if $nodepool }}
apiVersion: rke-machine-config.cattle.io/v1
kind: VmwarevsphereConfig
metadata:
name: {{ $clustername }}-{{ $nodepool.name }}
namespace: fleet-default
common:
{{- if $nodepool.labels }}
labels:
{{ toYaml $nodepool.labels | indent 4 }}
{{- end }}
{{- if $nodepool.taints }}
taints:
{{ toYaml $nodepool.taints | indent 4 }}
{{- end }}
{{- if $nodepool.cfgparam }}
cfgparam: {{ $nodepool.cfgparam }}
{{- end }}
cloneFrom: {{ $nodepool.cloneFrom }}
cloudConfig: |-
{{ $nodepool.cloudConfig | indent 2 }}
cloudinit: {{ $nodepool.cloudinit }}
contentLibrary: {{ $nodepool.contentLibrary }}
cpuCount: {{ $nodepool.cpuCount | squote }}
creationType: {{ $nodepool.creationType }}
customAttribute: {{ $nodepool.customAttribute }}
datacenter: {{ $nodepool.datacenter }}
datastore: {{ $nodepool.datastore }}
datastoreCluster: {{ $nodepool.datastoreCluster }}
diskSize: {{ $nodepool.diskSize | squote }}
folder: {{ $nodepool.folder }}
hostsystem: {{ $nodepool.hostsystem }}
memorySize: {{ $nodepool.memorySize | squote }}
network: {{ $nodepool.network }}
pool: {{ $nodepool.pool }}
sshPort: {{ $nodepool.sshPort | squote }}
sshUser: {{ $nodepool.sshUser }}
sshUserGroup: {{ $nodepool.sshUserGroup }}
tag: {{ $nodepool.tag }}
vappIpallocationpolicy: {{ $nodepool.vappIpallocationpolicy }}
vappIpprotocol: {{ $nodepool.vappIpprotocol }}
vappProperty: {{ $nodepool.vappProperty }}
vappTransport: {{ $nodepool.vappTransport }}
vcenter: {{ $nodepool.vcenter }}
vcenterPort: {{ $nodepool.vcenterPort | squote }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,433 @@
# amazonec2, azure, digitalocean, harvester, vsphere, custom
cloudprovider: harvester
# cloud provider credentials
cloudCredentialSecretName: cc-mrklm
# rancher manager url
rancher:
cattle:
url: rancher-mgmt.product.lan
# cluster values
cluster:
name: default-cluster
# labels:
# key: value
config:
kubernetesVersion: v1.33.5+rke2r1
enableNetworkPolicy: true
localClusterAuthEndpoint:
enabled: false
# Pod Security Standard (Replaces PSP)
defaultPodSecurityAdmissionConfigurationTemplateName: "rancher-restricted"
globalConfig:
systemDefaultRegistry: docker.io
cni: canal
docker: false
disable_scheduler: false
disable_cloud_controller: false
disable_kube_proxy: false
etcd_expose_metrics: false
profile: 'cis'
selinux: false
secrets_encryption: true
write_kubeconfig_mode: 0600
use_service_account_credentials: false
protect_kernel_defaults: true
kube_apiserver_arg:
- "service-account-extend-token-expiration=false"
- "anonymous-auth=false"
- "enable-admission-plugins=NodeRestriction,PodSecurity,EventRateLimit,DenyServiceExternalIPs"
- "admission-control-config-file=/etc/rancher/rke2/rke2-admission.yaml"
- "audit-policy-file=/etc/rancher/rke2/audit-policy.yaml"
- "audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log"
- "audit-log-maxage=30"
- "audit-log-maxbackup=10"
- "audit-log-maxsize=100"
kubelet_arg:
# Strong Ciphers (CIS 4.2.12)
- "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305"
# PID Limit (CIS 4.2.13)
- "pod-max-pids=4096"
# Seccomp Default (CIS 4.2.14)
- "seccomp-default=true"
- "protect-kernel-defaults=true"
- "make-iptables-util-chains=true"
upgradeStrategy:
controlPlaneConcurrency: 10%
controlPlaneDrainOptions:
enabled: false
workerConcurrency: 10%
workerDrainOptions:
enabled: false
# node and nodepool(s) values
nodepools:
- name: control-plane-nodes
displayName: cp-nodes
quantity: 1
etcd: true
controlplane: true
worker: false
paused: false
cpuCount: 4
diskSize: 40
imageName: vanderlande/image-qhtpc
memorySize: 8
networkName: vanderlande/vm-lan
sshUser: rancher
vmNamespace: vanderlande
# ---------------------------------------------------------
# Cloud-Init: Creates the Security Files
# ---------------------------------------------------------
userData: &userData |
#cloud-config
package_update: false
package_upgrade: false
snap:
commands:
00: snap refresh --hold=forever
package_reboot_if_required: true
packages:
- qemu-guest-agent
- yq
- jq
- curl
- wget
bootcmd:
- sysctl -w net.ipv6.conf.all.disable_ipv6=1
- sysctl -w net.ipv6.conf.default.disable_ipv6=1
write_files:
# ----------------------------------------------------------------
# 1. CNI Permission Fix Script & Cron (CIS 1.1.9 Persistence)
# ----------------------------------------------------------------
- path: /usr/local/bin/fix-cni-perms.sh
permissions: '0700'
owner: root:root
content: |
#!/bin/bash
# Wait 60s on boot for RKE2 to write files
[ "$1" == "boot" ] && sleep 60
# Enforce 600 on CNI files (CIS 1.1.9)
if [ -d /etc/cni/net.d ]; then
find /etc/cni/net.d -type f -exec chmod 600 {} \;
fi
if [ -d /var/lib/cni/networks ]; then
find /var/lib/cni/networks -type f -exec chmod 600 {} \;
fi
# Every RKE2 service restart can reset CNI file permissions, so we run
# this script on reboot and daily via cron to maintain CIS compliance.
- path: /etc/cron.d/cis-cni-fix
permissions: '0644'
owner: root:root
content: |
# Run on Reboot (with delay) to fix files created during startup
@reboot root /usr/local/bin/fix-cni-perms.sh boot
# Run once daily at 00:00 to correct any drift
0 0 * * * root /usr/local/bin/fix-cni-perms.sh
# ----------------------------------------------------------------
# 2. RKE2 Admission Config
# ----------------------------------------------------------------
- path: /etc/rancher/rke2/rke2-admission.yaml
permissions: '0600'
owner: root:root
content: |
apiVersion: apiserver.config.k8s.io/v1
kind: AdmissionConfiguration
plugins:
- name: PodSecurity
configuration:
apiVersion: pod-security.admission.config.k8s.io/v1beta1
kind: PodSecurityConfiguration
defaults:
enforce: "restricted"
enforce-version: "latest"
audit: "restricted"
audit-version: "latest"
warn: "restricted"
warn-version: "latest"
exemptions:
usernames: []
runtimeClasses: []
namespaces: [compliance-operator-system,kube-system, cis-operator-system, tigera-operator, calico-system, rke2-ingress-nginx, cattle-system, cattle-fleet-system, longhorn-system, cattle-neuvector-system]
- name: EventRateLimit
configuration:
apiVersion: eventratelimit.admission.k8s.io/v1alpha1
kind: Configuration
limits:
- type: Server
qps: 5000
burst: 20000
# ----------------------------------------------------------------
# 3. RKE2 Audit Policy
# ----------------------------------------------------------------
- path: /etc/rancher/rke2/audit-policy.yaml
permissions: '0600'
owner: root:root
content: |
apiVersion: audit.k8s.io/v1
kind: Policy
rules:
- level: None
users: ["system:kube-controller-manager", "system:kube-scheduler", "system:serviceaccount:kube-system:endpoint-controller"]
verbs: ["get", "update"]
resources:
- group: ""
resources: ["endpoints", "services", "services/status"]
- level: None
verbs: ["get"]
resources:
- group: ""
resources: ["nodes", "nodes/status", "pods", "pods/status"]
- level: None
users: ["kube-proxy"]
verbs: ["watch"]
resources:
- group: ""
resources: ["endpoints", "services", "services/status", "configmaps"]
- level: Metadata
resources:
- group: ""
resources: ["secrets", "configmaps"]
- level: RequestResponse
omitStages:
- RequestReceived
# ----------------------------------------------------------------
# 4. Static NetworkPolicies
# ----------------------------------------------------------------
- path: /var/lib/rancher/rke2/server/manifests/cis-network-policy.yaml
permissions: '0600'
owner: root:root
content: |
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: default-deny-ingress
namespace: default
spec:
podSelector: {}
policyTypes:
- Ingress
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: allow-all-metrics
namespace: kube-public
spec:
podSelector: {}
ingress:
- {}
policyTypes:
- Ingress
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: allow-all-system
namespace: kube-system
spec:
podSelector: {}
ingress:
- {}
policyTypes:
- Ingress
# ----------------------------------------------------------------
# 5. Service Account Hardening
# ----------------------------------------------------------------
- path: /var/lib/rancher/rke2/server/manifests/cis-sa-config.yaml
permissions: '0600'
owner: root:root
content: |
apiVersion: v1
kind: ServiceAccount
metadata:
name: default
namespace: default
automountServiceAccountToken: false
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: default
namespace: kube-system
automountServiceAccountToken: false
- path: /var/lib/rancher/rke2/server/manifests/cis-sa-cron.yaml
permissions: '0600'
owner: root:root
content: |
apiVersion: v1
kind: ServiceAccount
metadata: {name: sa-cleaner, namespace: kube-system}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata: {name: sa-cleaner-role}
rules:
- apiGroups: [""]
resources: ["namespaces", "serviceaccounts"]
verbs: ["get", "list", "patch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata: {name: sa-cleaner-binding}
subjects: [{kind: ServiceAccount, name: sa-cleaner, namespace: kube-system}]
roleRef: {kind: ClusterRole, name: sa-cleaner-role, apiGroup: rbac.authorization.k8s.io}
---
apiVersion: batch/v1
kind: CronJob
metadata:
name: sa-cleaner
namespace: kube-system
spec:
schedule: "0 */6 * * *" # Run every 6 hours
jobTemplate:
spec:
template:
spec:
serviceAccountName: sa-cleaner
containers:
- name: cleaner
image: rancher/kubectl:v1.26.0
command:
- /bin/bash
- -c
- |
# Get all namespaces
for ns in $(kubectl get ns -o jsonpath='{.items[*].metadata.name}'); do
# Check if default SA has automount=true (or null)
automount=$(kubectl get sa default -n $ns -o jsonpath='{.automountServiceAccountToken}')
if [ "$automount" != "false" ]; then
echo "Securing default SA in namespace: $ns"
kubectl patch sa default -n $ns -p '{"automountServiceAccountToken": false}'
fi
done
restartPolicy: OnFailure
# ----------------------------------------------------------------
# 6. OS Sysctls Hardening
# ----------------------------------------------------------------
- path: /etc/sysctl.d/60-rke2-cis.conf
permissions: '0644'
content: |
vm.overcommit_memory=1
vm.max_map_count=65530
vm.panic_on_oom=0
fs.inotify.max_user_watches=1048576
fs.inotify.max_user_instances=8192
kernel.panic=10
kernel.panic_on_oops=1
net.ipv4.conf.all.rp_filter=1
net.ipv4.conf.default.rp_filter=1
net.ipv4.conf.all.accept_source_route=0
net.ipv4.conf.default.accept_source_route=0
net.ipv4.conf.all.accept_redirects=0
net.ipv4.conf.default.accept_redirects=0
net.ipv4.conf.all.send_redirects=0
net.ipv4.conf.default.send_redirects=0
net.ipv4.conf.all.log_martians=1
net.ipv4.conf.default.log_martians=1
net.ipv4.icmp_echo_ignore_broadcasts=1
net.ipv4.icmp_ignore_bogus_error_responses=1
net.ipv6.conf.all.disable_ipv6=1
net.ipv6.conf.default.disable_ipv6=1
fs.protected_hardlinks=1
fs.protected_symlinks=1
# ----------------------------------------------------------------
# 7. Environment & Setup Scripts
# ----------------------------------------------------------------
- path: /etc/profile.d/rke2.sh
permissions: '0644'
content: |
export PATH=$PATH:/var/lib/rancher/rke2/bin:/opt/rke2/bin
export KUBECONFIG=/etc/rancher/rke2/rke2.yaml
- path: /root/updates.sh
permissions: '0550'
content: |
#!/bin/bash
export DEBIAN_FRONTEND=noninteractive
apt-mark hold linux-headers-generic
apt-mark hold linux-headers-virtual
apt-mark hold linux-image-virtual
apt-mark hold linux-virtual
apt-get update
apt-get upgrade -y
apt-get autoremove -y
users:
- name: rancher
gecos: Rancher service account
hashed_passwd: $6$Mas.x2i7B2cefjUy$59363FmEuoU.LiTLNRZmtemlH2W0D0SWsig22KSZ3QzOmfxeZXxdSx5wIw9wO7GXF/M9W.9SHoKVBOYj1HPX3.
lock_passwd: false
shell: /bin/bash
groups: [users, sudo, docker]
sudo: ALL=(ALL:ALL) ALL
ssh_authorized_keys:
- 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEwWnnOTAu0LlAZRczQ0Z0KvNlUdPhGQhpZie+nF1O3s'
- name: etcd
gecos: "etcd user"
shell: /sbin/nologin
system: true
lock_passwd: true
disable_root: true
ssh_pwauth: true
runcmd:
- systemctl enable --now qemu-guest-agent
- sysctl --system
- /root/updates.sh
# Immediate run of fix script
- /usr/local/bin/fix-cni-perms.sh
final_message: |
VI_CNV_CLOUD_INIT has been applied successfully.
Cluster ready for Rancher!
- name: worker-nodes
displayName: wk-nodes
quantity: 2
etcd: false
controlplane: false
worker: true
paused: false
cpuCount: 2
diskSize: 40
imageName: vanderlande/image-qmx5q
memorySize: 8
networkName: vanderlande/vm-lan
sshUser: rancher
vmNamespace: vanderlande
userData: *userData
addons:
monitoring:
enabled: false
logging:
enabled: false
longhorn:
enabled: false
neuvector:
enabled: false