# amazonec2, azure, digitalocean, harvester, vsphere, custom cloudprovider: harvester # cloud provider credentials cloudCredentialSecretName: cc-mrklm # rancher manager url rancher: cattle: url: rancher-mgmt.product.lan # cluster values cluster: name: default-cluster # labels: # key: value config: kubernetesVersion: v1.33.5+rke2r1 enableNetworkPolicy: true localClusterAuthEndpoint: enabled: false # Pod Security Standard (Replaces PSP) defaultPodSecurityAdmissionConfigurationTemplateName: "rancher-restricted" globalConfig: systemDefaultRegistry: docker.io cni: canal docker: false disable_scheduler: false disable_cloud_controller: false disable_kube_proxy: false etcd_expose_metrics: false profile: 'cis' selinux: false secrets_encryption: true write_kubeconfig_mode: 0600 use_service_account_credentials: false protect_kernel_defaults: true kube_apiserver_arg: - "service-account-extend-token-expiration=false" - "anonymous-auth=false" - "enable-admission-plugins=NodeRestriction,PodSecurity,EventRateLimit,DenyServiceExternalIPs" - "admission-control-config-file=/etc/rancher/rke2/rke2-admission.yaml" - "audit-policy-file=/etc/rancher/rke2/audit-policy.yaml" - "audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log" - "audit-log-maxage=30" - "audit-log-maxbackup=10" - "audit-log-maxsize=100" kubelet_arg: # Strong Ciphers (CIS 4.2.12) - "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" # PID Limit (CIS 4.2.13) - "pod-max-pids=4096" # Seccomp Default (CIS 4.2.14) - "seccomp-default=true" - "protect-kernel-defaults=true" - "make-iptables-util-chains=true" upgradeStrategy: controlPlaneConcurrency: 10% controlPlaneDrainOptions: enabled: false workerConcurrency: 10% workerDrainOptions: enabled: false # node and nodepool(s) values nodepools: - name: control-plane-nodes displayName: cp-nodes quantity: 1 etcd: true controlplane: true worker: false paused: false cpuCount: 4 diskSize: 40 imageName: vanderlande/image-qhtpc memorySize: 8 networkName: vanderlande/vm-lan sshUser: rancher vmNamespace: vanderlande # --------------------------------------------------------- # Cloud-Init: Creates the Security Files # --------------------------------------------------------- userData: &userData | #cloud-config package_update: false package_upgrade: false snap: commands: 00: snap refresh --hold=forever package_reboot_if_required: true packages: - qemu-guest-agent - yq - jq - curl - wget bootcmd: - sysctl -w net.ipv6.conf.all.disable_ipv6=1 - sysctl -w net.ipv6.conf.default.disable_ipv6=1 write_files: # ---------------------------------------------------------------- # 1. CNI Permission Fix Script & Cron (CIS 1.1.9 Persistence) # ---------------------------------------------------------------- - path: /usr/local/bin/fix-cni-perms.sh permissions: '0700' owner: root:root content: | #!/bin/bash # Wait 60s on boot for RKE2 to write files [ "$1" == "boot" ] && sleep 60 # Enforce 600 on CNI files (CIS 1.1.9) if [ -d /etc/cni/net.d ]; then find /etc/cni/net.d -type f -exec chmod 600 {} \; fi if [ -d /var/lib/cni/networks ]; then find /var/lib/cni/networks -type f -exec chmod 600 {} \; fi # Every RKE2 service restart can reset CNI file permissions, so we run # this script on reboot and daily via cron to maintain CIS compliance. - path: /etc/cron.d/cis-cni-fix permissions: '0644' owner: root:root content: | # Run on Reboot (with delay) to fix files created during startup @reboot root /usr/local/bin/fix-cni-perms.sh boot # Run once daily at 00:00 to correct any drift 0 0 * * * root /usr/local/bin/fix-cni-perms.sh # ---------------------------------------------------------------- # 2. RKE2 Admission Config # ---------------------------------------------------------------- - path: /etc/rancher/rke2/rke2-admission.yaml permissions: '0600' owner: root:root content: | apiVersion: apiserver.config.k8s.io/v1 kind: AdmissionConfiguration plugins: - name: PodSecurity configuration: apiVersion: pod-security.admission.config.k8s.io/v1beta1 kind: PodSecurityConfiguration defaults: enforce: "restricted" enforce-version: "latest" audit: "restricted" audit-version: "latest" warn: "restricted" warn-version: "latest" exemptions: usernames: [] runtimeClasses: [] namespaces: [compliance-operator-system,kube-system, cis-operator-system, tigera-operator, calico-system, rke2-ingress-nginx, cattle-system, cattle-fleet-system, longhorn-system, cattle-neuvector-system] - name: EventRateLimit configuration: apiVersion: eventratelimit.admission.k8s.io/v1alpha1 kind: Configuration limits: - type: Server qps: 5000 burst: 20000 # ---------------------------------------------------------------- # 3. RKE2 Audit Policy # ---------------------------------------------------------------- - path: /etc/rancher/rke2/audit-policy.yaml permissions: '0600' owner: root:root content: | apiVersion: audit.k8s.io/v1 kind: Policy rules: - level: None users: ["system:kube-controller-manager", "system:kube-scheduler", "system:serviceaccount:kube-system:endpoint-controller"] verbs: ["get", "update"] resources: - group: "" resources: ["endpoints", "services", "services/status"] - level: None verbs: ["get"] resources: - group: "" resources: ["nodes", "nodes/status", "pods", "pods/status"] - level: None users: ["kube-proxy"] verbs: ["watch"] resources: - group: "" resources: ["endpoints", "services", "services/status", "configmaps"] - level: Metadata resources: - group: "" resources: ["secrets", "configmaps"] - level: RequestResponse omitStages: - RequestReceived # ---------------------------------------------------------------- # 4. Static NetworkPolicies # ---------------------------------------------------------------- - path: /var/lib/rancher/rke2/server/manifests/cis-network-policy.yaml permissions: '0600' owner: root:root content: | apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: default-deny-ingress namespace: default spec: podSelector: {} policyTypes: - Ingress --- apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: allow-all-metrics namespace: kube-public spec: podSelector: {} ingress: - {} policyTypes: - Ingress --- apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: allow-all-system namespace: kube-system spec: podSelector: {} ingress: - {} policyTypes: - Ingress # ---------------------------------------------------------------- # 5. Service Account Hardening # ---------------------------------------------------------------- - path: /var/lib/rancher/rke2/server/manifests/cis-sa-config.yaml permissions: '0600' owner: root:root content: | apiVersion: v1 kind: ServiceAccount metadata: name: default namespace: default automountServiceAccountToken: false --- apiVersion: v1 kind: ServiceAccount metadata: name: default namespace: kube-system automountServiceAccountToken: false - path: /var/lib/rancher/rke2/server/manifests/cis-sa-cron.yaml permissions: '0600' owner: root:root content: | apiVersion: v1 kind: ServiceAccount metadata: {name: sa-cleaner, namespace: kube-system} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: {name: sa-cleaner-role} rules: - apiGroups: [""] resources: ["namespaces", "serviceaccounts"] verbs: ["get", "list", "patch"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: {name: sa-cleaner-binding} subjects: [{kind: ServiceAccount, name: sa-cleaner, namespace: kube-system}] roleRef: {kind: ClusterRole, name: sa-cleaner-role, apiGroup: rbac.authorization.k8s.io} --- apiVersion: batch/v1 kind: CronJob metadata: name: sa-cleaner namespace: kube-system spec: schedule: "0 */6 * * *" # Run every 6 hours jobTemplate: spec: template: spec: serviceAccountName: sa-cleaner containers: - name: cleaner image: rancher/kubectl:v1.26.0 command: - /bin/bash - -c - | # Get all namespaces for ns in $(kubectl get ns -o jsonpath='{.items[*].metadata.name}'); do # Check if default SA has automount=true (or null) automount=$(kubectl get sa default -n $ns -o jsonpath='{.automountServiceAccountToken}') if [ "$automount" != "false" ]; then echo "Securing default SA in namespace: $ns" kubectl patch sa default -n $ns -p '{"automountServiceAccountToken": false}' fi done restartPolicy: OnFailure # ---------------------------------------------------------------- # 6. OS Sysctls Hardening # ---------------------------------------------------------------- - path: /etc/sysctl.d/60-rke2-cis.conf permissions: '0644' content: | vm.overcommit_memory=1 vm.max_map_count=65530 vm.panic_on_oom=0 fs.inotify.max_user_watches=1048576 fs.inotify.max_user_instances=8192 kernel.panic=10 kernel.panic_on_oops=1 net.ipv4.conf.all.rp_filter=1 net.ipv4.conf.default.rp_filter=1 net.ipv4.conf.all.accept_source_route=0 net.ipv4.conf.default.accept_source_route=0 net.ipv4.conf.all.accept_redirects=0 net.ipv4.conf.default.accept_redirects=0 net.ipv4.conf.all.send_redirects=0 net.ipv4.conf.default.send_redirects=0 net.ipv4.conf.all.log_martians=1 net.ipv4.conf.default.log_martians=1 net.ipv4.icmp_echo_ignore_broadcasts=1 net.ipv4.icmp_ignore_bogus_error_responses=1 net.ipv6.conf.all.disable_ipv6=1 net.ipv6.conf.default.disable_ipv6=1 fs.protected_hardlinks=1 fs.protected_symlinks=1 # ---------------------------------------------------------------- # 7. Environment & Setup Scripts # ---------------------------------------------------------------- - path: /etc/profile.d/rke2.sh permissions: '0644' content: | export PATH=$PATH:/var/lib/rancher/rke2/bin:/opt/rke2/bin export KUBECONFIG=/etc/rancher/rke2/rke2.yaml - path: /root/updates.sh permissions: '0550' content: | #!/bin/bash export DEBIAN_FRONTEND=noninteractive apt-mark hold linux-headers-generic apt-mark hold linux-headers-virtual apt-mark hold linux-image-virtual apt-mark hold linux-virtual apt-get update apt-get upgrade -y apt-get autoremove -y users: - name: rancher gecos: Rancher service account hashed_passwd: $6$Mas.x2i7B2cefjUy$59363FmEuoU.LiTLNRZmtemlH2W0D0SWsig22KSZ3QzOmfxeZXxdSx5wIw9wO7GXF/M9W.9SHoKVBOYj1HPX3. lock_passwd: false shell: /bin/bash groups: [users, sudo, docker] sudo: ALL=(ALL:ALL) ALL ssh_authorized_keys: - 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEwWnnOTAu0LlAZRczQ0Z0KvNlUdPhGQhpZie+nF1O3s' - name: etcd gecos: "etcd user" shell: /sbin/nologin system: true lock_passwd: true disable_root: true ssh_pwauth: true runcmd: - systemctl enable --now qemu-guest-agent - sysctl --system - /root/updates.sh # Immediate run of fix script - /usr/local/bin/fix-cni-perms.sh final_message: | VI_CNV_CLOUD_INIT has been applied successfully. Cluster ready for Rancher! - name: worker-nodes displayName: wk-nodes quantity: 2 etcd: false controlplane: false worker: true paused: false cpuCount: 2 diskSize: 40 imageName: vanderlande/image-qmx5q memorySize: 8 networkName: vanderlande/vm-lan sshUser: rancher vmNamespace: vanderlande userData: *userData addons: monitoring: enabled: false logging: enabled: false longhorn: enabled: false neuvector: enabled: false