diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..0df08ce --- /dev/null +++ b/.gitignore @@ -0,0 +1,5 @@ +.vscode +ignore +.version +*.log +*.tmp \ No newline at end of file diff --git a/.releaserc b/.releaserc new file mode 100644 index 0000000..a19b60b --- /dev/null +++ b/.releaserc @@ -0,0 +1,36 @@ +branches: + - name: main + channel: stable + + - name: development + prerelease: rc + channel: beta + + +plugins: + - - "@semantic-release/commit-analyzer" + - releaseRules: + - type: backport + release: patch + + - - "@semantic-release/release-notes-generator" + - presetConfig: + types: + - type: backport + section: Backports + - type: feat + section: Features + - type: fix + section: Bug Fixes + + + - - "@semantic-release/changelog" + - changelogFile: CHANGELOG.md + + - - "@semantic-release/git" + - assets: + - CHANGELOG.md + message: "chore(release): ${nextRelease.version} [skip ci]\n\n${nextRelease.notes}" + +preset: conventionalcommits +ci: false \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..04514c6 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,63 @@ +## [1.0.0-rc.4](https://devstash.vanderlande.com/scm/ittp/as-vi-cnv/compare/v1.0.0-rc.3...v1.0.0-rc.4) (2026-01-07) + + +### Features + +* TPINF-871 - README.md and CAPI deployment added. ([792907b](https://devstash.vanderlande.com/scm/ittp/as-vi-cnv/commit/792907b901dc9d407429ac00fa4f885864fea628)) +* TPINF-1346 - created RKE2 CloudInit templates for CP nodes ([c04cd9a](https://devstash.vanderlande.com/scm/ittp/as-vi-cnv/commit/c04cd9a902e1cbf3b64d0b91573584f87ef3c17e)) +* TPINF-1346 - CronJob to patch default SA in namespaces ([7687bca](https://devstash.vanderlande.com/scm/ittp/as-vi-cnv/commit/7687bca1be97ebc3794bf492321ddb6788e57f45)) +* TPINF-1346 - patch default sa to not mount token ([d5b880d](https://devstash.vanderlande.com/scm/ittp/as-vi-cnv/commit/d5b880dd53a9f4754ed75f6c654ae9de0542edce)) +* TPINF-871 - FLeet DHCP deployment changed to dhcp ([e02ccc7](https://devstash.vanderlande.com/scm/ittp/as-vi-cnv/commit/e02ccc7cbb42af3ed5b8680494ef5609c23ce097)) +* TPINF-871 - Added capi attempts to boot Rancger in Harvester ([9b5d74c](https://devstash.vanderlande.com/scm/ittp/as-vi-cnv/commit/9b5d74cf3811fb0e165231d573af070202ae37db)) +* TPINF-871 - Attempt to upgarde vcluster chart to v0.30.1 ([520f23b](https://devstash.vanderlande.com/scm/ittp/as-vi-cnv/commit/520f23b863a6eb85a8b5b17f468c05a23c177c14)) +* TPINF-871 - Default namespace commented in LB definition ([29ec3f0](https://devstash.vanderlande.com/scm/ittp/as-vi-cnv/commit/29ec3f0377329aa63423029f63ea5a58bff9e2ab)) +* TPINF-871 - Helm chart config split and Fleet update ([618d16f](https://devstash.vanderlande.com/scm/ittp/as-vi-cnv/commit/618d16fefccc996660c1b689bf13fcf30bb4c8dc)) +* TPINF-871 - make Helm deployment namespace configurable ([f125160](https://devstash.vanderlande.com/scm/ittp/as-vi-cnv/commit/f125160031c538cd39d35dab78471ca61e7f04cd)) +* TPINF-871 - RGS Helm chart updated to RnD environment ([1280bef](https://devstash.vanderlande.com/scm/ittp/as-vi-cnv/commit/1280befc775aff54e97a82f8da4f4202d5c534ad)) + +## [1.0.0-rc.3](https://devstash.vanderlande.com/scm/ittp/as-vi-cnv/compare/v1.0.0-rc.2...v1.0.0-rc.3) (2025-12-16) + + +### Features + +* TPINF-1093 - Template and cloudinit for Ubuntu 24.04 ([93ca097](https://devstash.vanderlande.com/scm/ittp/as-vi-cnv/commit/93ca097226c0c87a2c8c382535f40a2be81b7383)) + +## [1.0.0-rc.2](https://devstash.vanderlande.com/scm/ittp/as-vi-cnv/compare/v1.0.0-rc.1...v1.0.0-rc.2) (2025-12-03) + + +### Features + +* TPINF-1093 - Virtualization baselines ([b989a59](https://devstash.vanderlande.com/scm/ittp/as-vi-cnv/commit/b989a5954fc97c4d1e7838e7775753ecaba7918f)) + +## 1.0.0-rc.1 (2025-10-08) + + +### Features + +* Inital commit of CI/Docs ([95cc946](https://devstash.vanderlande.com/scm/ittp/as-vi-cnv/commit/95cc946ed13fe45a30ed3bdce2a9368124d9e5fc)) + +## 1.0.0-rc.1 (2025-10-08) + + +### Features + +* Inital commit of CI/Docs ([95cc946](https://devstash.vanderlande.com/scm/ittp/as-vi-cnv/commit/95cc946ed13fe45a30ed3bdce2a9368124d9e5fc)) + +## 1.0.0-rc.1 (2025-10-08) + + +### Features + +* Inital commit of CI/Docs ([95cc946](https://devstash.vanderlande.com/scm/ittp/as-vi-cnv/commit/95cc946ed13fe45a30ed3bdce2a9368124d9e5fc)) + +## 1.0.0-rc.1 (2025-10-07) + + +### Features + +* Semantic RC release with document integration ([f9ea626](https://devstash.vanderlande.com/scm/ittp/as-vi-cnv/commit/f9ea6266acf2cabe57b76cfb88ee7a39891bd31d)) + + +### Bug Fixes + +* Dummy commit to trigger semantic-release ([c0b12ed](https://devstash.vanderlande.com/scm/ittp/as-vi-cnv/commit/c0b12ed052eb1baf27b89454c1265857e354899a)) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..6138a2c --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,3 @@ +# Contributing Guidelines + +[Contribution Wiki](https://devcolla.vanderlande.com/display/ITTP/How+to+contribute) diff --git a/README.md b/README.md index f966f72..cf67d0c 100644 --- a/README.md +++ b/README.md @@ -1 +1,7 @@ - \ No newline at end of file + + + + +# {{ release_version }} - TEST + +TEST123 diff --git a/RELEASENOTES_IM-CNV-v100.md b/RELEASENOTES_IM-CNV-v100.md new file mode 100755 index 0000000..82216ed --- /dev/null +++ b/RELEASENOTES_IM-CNV-v100.md @@ -0,0 +1,67 @@ +# 🚀 Release Notes - IM Cloud Native Virtualization v1.0.0 (im-cnv) + +**Release Date:** 2025-12-01 + +--- + +## 📘 Summary +IM Cloud Native Virtualization v1.0.0 is based on SUSE Virtualization (previously known as Harvester) and offers virtualization as well as container orchestration. + +Within Vanderlande this is the practical successor to Container Platform (CP2/CP3) and VI_IT_VIRTUALIZATION. + +Capabilities of IM-CNV are effectively the same: running any modern Operating System in virtual machines, as well as Kubernetes orchestration of workload clusters enable all of Vanderlande's existing Modules to be run. +There are however, key differences in underlying technology and implementation specifics, which are highlighted below: + +- ### Persistent Storage location (*inside* vs *outside* Kubernetes cluster) + - *CP2/CP3*: Persistent storage (provided through Longhorn) was stored *within* Kubernetes clusters on designated "worker-storage" nodes. + - *IM-CNV*: Respective volumes and replicas (still provided through Longhorn - _though_ seemingly with different `StorageClass` names) are now stored directly on the Hypervisor nodes. + + **There are *no* dedicated storage node pools; cluster nodes can now be reprovisioned _without_ extensive wait periods for replication to finish.** +- ### Services of type `LoadBalancer` managed by different controllers + - *CP2/CP3*: Services of type `LoadBalancer` were managed by **MetalLB**; exposing MetalLB-specific annotations for configuration. + - *IM-CNV*: Services of type `LoadBalancer` are managed by **Harvester's integrated Cloud Controller Manager**. + + **Both load balancers differ in the OSI layers they operate in (MetalLB: Layer 2 and 3, Harvester CMM: Layer 4), however feature parity is maintained for common use cases within Vanderlande.** + ***NOTE:** IP-address pinning is currently not supported through annotations and requires explicit administrator intervention.* +- ### Virtual Machine templating + - *VI_IT_VIRTUALIZATION*: Virtual machine template export & import supported in the format `ova`/`ovf`; virtual machine's disks were stored as `vmdk` files. + - *IM-CNV*: No virtual machine template export & import functionality; virtual machines can be created based on disk images in the formats `qcow2`, `raw`. + + **Harvester includes an addon to connect to a vCenter instance to import virtual machines directly; negating the need to export & import with an intermediate file format.** + + +## 🔗 Related Links +- ~~**Release Bundle**: [Link](#TBD)~~ +- ~~**Changelog**: [Link](#TBD)~~ +- **Jira Release**: [Link](https://devtrack.vanderlande.com/projects/TPINF/versions/144780) +- ~~**SBOM**: [Link](#TBD)~~ +- **Test Results**: [Link](https://devtrack.vanderlande.com/secure/attachment/1179276/VI_IT_VI_IT_CNV-1_0_0.pdf) + +## 🧩 Compatibility Matrix +- Supported Kubernetes Versions: + - **RKE2**: 1.31, 1.32, 1.33 + - **K3S** _(experimental)_: 1.31, 1.32, 1.33 + +- Supported Guest Operating Versions: + - **Suse Linux Enterprise Server**: 15 SP6, 15 SP7 + - **Suse Linux Enterprise Micro**: 6.0, 6.1 + - **Ubuntu**: 22.04, 24.04 + - **RHEL**: 9, 10 + - **Windows**: Windows 11, Windows Server 2025 (up to and including) + +- Bundled Component Versions: + - **Rancher**: v2.12 + - **Longhorn**: v1.9.1 + +## ⚠️ Breaking Changes +Refer to aforementioned key differences to understand how migration to IM-CNV might affect your workloads. + +## 🔄 Migration / Upgrade Steps +_There is no upgrade path from any version of Container Platform; Kubernetes clusters and respective workloads need to be reprovisioned_ + +## 📦 Delivery Artifacts +- **Installation Files:** + - [Installation Manual](https://vanderlande.sharepoint.com/:w:/r/sites/T_Technolo-17-TeamAppStackEdgeComputing/Shared%20Documents/Team%20Application%20Stack/Cloud%20Native%20Virtualization%20Installation%20Manual.docx?d=w143df45494e7454b9c00247ac76c3dc3&csf=1&web=1&e=tnswwo) + - [Operator Manual](https://vanderlande.sharepoint.com/:w:/r/sites/T_Technolo-17-TeamAppStackEdgeComputing/Shared%20Documents/Team%20Application%20Stack/Cloud%20Native%20Virtualization%20Operator%20Manual.docx?d=w5b544952345b40d9ba0e5db01a3f703c&csf=1&web=1&e=XTXc4M) + +--- diff --git a/as-vi-cnv-rig-operator@e1c9e3e68e8.zip b/as-vi-cnv-rig-operator@e1c9e3e68e8.zip new file mode 100755 index 0000000..ab19ab7 Binary files /dev/null and b/as-vi-cnv-rig-operator@e1c9e3e68e8.zip differ diff --git a/bamboo-specs/bamboo.yaml b/bamboo-specs/bamboo.yaml new file mode 100644 index 0000000..6dccd67 --- /dev/null +++ b/bamboo-specs/bamboo.yaml @@ -0,0 +1,99 @@ +--- +version: 2 +## Plan properties +plan: + project-key: ITTP + key: ASVICNV + name: AS-VI-Cloud Native Virtualization + +branches: + delete: + after-deleted-days: 2 + after-inactive-days: never + +other: + concurrent-build-plugin: 5 + all-other-apps: + buildExpiryConfig: + duration: 5 + enabled: true + expiryTypeResult: true + maximumBuildsToKeep: 5 + period: days + +## variables used in the jobs +variables: + ## OVA build variables + hostvolume: /data/bamboo/${bamboo.capability.AGENT_ID}/xml-data/build-dir/${bamboo.buildKey} + containerregistry_release: devstore.vanderlande.com:6555 + containerregistry_virtual: devstore.vanderlande.com:6559 + container_agent: ${bamboo.containerregistry_release}/com/vanderlande/conpl/bamboo-agent-extended:1.5.0-linuxbase + container_semrel: ${bamboo.containerregistry_virtual}/com/vanderlande/conpl/bamboo-semantic-release:v23.0.2 + container_mark: kovetskiy/mark:12.2.0 + ## SemRel variables + httpsaccesskey_secret: BAMSCRT@0@0@FyHDe+gBcijblOU8jpGcEEwxpYBWQ0cl2NxEgACy5MidjyRlcZKAS4YXC/nLS8sOXZKHKBF3Siyeh2fdnAjOeg== + ## confluence documentation patch + confluence_url: https://devcolla.vanderlande.com + confluence_username: srv.conpldocs + confluence_password: BAMSCRT@0@0@UxPtDd1NpJ/YoYuImly6ZLqS62SCxPQK5uonPqkfF94= + confluence_space: ITTP + + +stages: + - Prepare: + - import-variables + - semantic-release-dryrun + - Validate: + - docs-dryrun + - Documentation: + - docs-changesonly + +import-variables: !include "prepare/import-variables.yaml" +semantic-release-dryrun: !include "prepare/semantic-release-dryrun.yaml" +docs-dryrun: !include "validate/docs-dryrun.yaml" +docs-changesonly: !include "validate/docs-changesonly.yaml" + +branch-overrides: + - docs-.*: + stages: + - Prepare: + - import-variables + - Documentation: + - docs-dryrun + - docs-changesonly + docs-changesonly: !include "validate/docs-changesonly.yaml" + import-variables: !include "prepare/import-variables.yaml" + docs-dryrun: !include "validate/docs-dryrun.yaml" + + - development: + stages: + - Prepare: + - import-variables + - semantic-release-dryrun + - Validate: + - docs-dryrun + - Release: + - semantic-release + - Documentation: + - docs-changesonly + import-variables: !include "prepare/import-variables.yaml" + semantic-release-dryrun: !include "prepare/semantic-release-dryrun.yaml" + docs-dryrun: !include "validate/docs-dryrun.yaml" + docs-changesonly: !include "validate/docs-changesonly.yaml" + semantic-release: !include "release/semantic-release.yaml" + + - main|^.*.x: + stages: + - Prepare: + - import-variables + - semantic-release-dryrun + - Release: + - semantic-release + - Documentation: + - docs + import-variables: !include "prepare/import-variables.yaml" + semantic-release-dryrun: !include "prepare/semantic-release-dryrun.yaml" + docs: !include "validate/docs.yaml" + semantic-release: !include "release/semantic-release.yaml" + + diff --git a/bamboo-specs/prepare/import-variables.yaml b/bamboo-specs/prepare/import-variables.yaml new file mode 100644 index 0000000..2bbb35b --- /dev/null +++ b/bamboo-specs/prepare/import-variables.yaml @@ -0,0 +1,40 @@ +tasks: + - script: | + #!/bin/bash + set -ex + + case ${bamboo_planRepository_branch} in + main) + USER=${bamboo.release_deployer_username} + PASSWORD=${bamboo.release_deployer_password} + REPOSITORY="nlveg-gen-release-local-01" + ;; + *.x) + USER=${bamboo.release_deployer_username} + PASSWORD=${bamboo.release_deployer_password} + REPOSITORY="nlveg-gen-release-local-01" + ;; + + *) + USER=${bamboo.snapshot_deployer_username} + PASSWORD=${bamboo.snapshot_deployer_password} + REPOSITORY="nlveg-gen-devteam-local-01" + ;; + esac + + + # Inject custom variables into inject-variables source file (inception) + # (Bamboo does not allow proper variable substition operations) + echo -e "\nvmname=conpl_${bamboo.buildNumber}_$(date +"%m-%d-%Y")_$(echo "${bamboo.planRepository.revision}" | head -c7 -z)" >> pipeline.parameters + echo "artifactory_username=${USER}" >> pipeline.parameters + echo "artifactory_password=${PASSWORD}" >> pipeline.parameters + echo "artifactory_repository=${REPOSITORY}" >> pipeline.parameters + echo "var_file=${VAR_FILE}" >> pipeline.parameters + + - inject-variables: + file: pipeline.parameters + scope: RESULT +other: + clean-working-dir: true +requirements: + - AGENT_TYPE: Linux_Base_Agent diff --git a/bamboo-specs/prepare/semantic-release-dryrun.yaml b/bamboo-specs/prepare/semantic-release-dryrun.yaml new file mode 100644 index 0000000..00ccb2c --- /dev/null +++ b/bamboo-specs/prepare/semantic-release-dryrun.yaml @@ -0,0 +1,55 @@ +tasks: + - checkout: + force-clean-build: 'true' + - script: | + #!/bin/bash + set -ex + + docker run --rm --user 555:555 -v ${bamboo.hostvolume}:/code -w /code \ + ${bamboo.container_semrel} \ + npx semantic-release \ + --dry-run --repository-url https://${bamboo.httpsaccesskey_secret}@devstash.vanderlande.com/scm/ittp/as-vi-cnv.git \ + --verifyRelease @semantic-release/exec \ + --verifyReleaseCmd 'echo "${nextRelease.version}" > .version' + + # Function to determine the version tag + get_version_tag() { + if [ -f .version ]; then + echo "$(cat .version)" + else + echo "$(git describe --abbrev=0 --tags | awk '{gsub("^v", ""); print}')" + fi + } + + # Function to determine the commit hash + get_commit_hash() { + echo "$(git log -1 --pretty=format:%h)" + } + + # Get version tag and commit hash + version_tag=$(get_version_tag) + commit_hash=$(get_commit_hash) + override=$(git log -1 --pretty=format:%s | grep -oP '\[docs-override v\K[^\]]+') || true + + # Determine gtag and template_suffix based on branch + if [[ "${bamboo_planRepository_branch}" == "main" || "${bamboo_planRepository_branch}" =~ ^[0-9]+\.[0-9]+\.x$ ]]; then + template_suffix="${version_tag}" + elif [[ "${bamboo_planRepository_branch}" == docs-* && -n $override ]]; then + version_tag="${override}" + template_suffix="${override}" + else + template_suffix="${version_tag}-${commit_hash}" + fi + + # Write to pipeline.parameters + echo -e "\ngtag=${version_tag}" >> pipeline.parameters + echo -e "\ntemplate_suffix=${template_suffix}" >> pipeline.parameters + + - inject-variables: + file: pipeline.parameters + scope: RESULT +other: + clean-working-dir: true +requirements: + - system.docker.executable + - AGENT_TYPE: Linux_Base_Agent \ No newline at end of file diff --git a/bamboo-specs/release/semantic-release.yaml b/bamboo-specs/release/semantic-release.yaml new file mode 100644 index 0000000..66919cf --- /dev/null +++ b/bamboo-specs/release/semantic-release.yaml @@ -0,0 +1,14 @@ +tasks: + - checkout: + force-clean-build: 'true' + - script: | + set -x + docker run --rm --user 555:555 -v ${bamboo.hostvolume}:/code -w /code \ + ${bamboo.container_semrel} \ + npx semantic-release \ + --repository-url https://${bamboo.httpsaccesskey_secret}@devstash.vanderlande.com/scm/ittp/as-vi-cnv.git +other: + clean-working-dir: true +requirements: + - system.docker.executable + - AGENT_TYPE: Linux_Base_Agent diff --git a/bamboo-specs/validate/ansible-lint.yaml b/bamboo-specs/validate/ansible-lint.yaml new file mode 100644 index 0000000..d63169d --- /dev/null +++ b/bamboo-specs/validate/ansible-lint.yaml @@ -0,0 +1,22 @@ +--- +## Molecule deploy and test +tasks: + - script: | + #!/bin/bash + set -ex + + + # Run ansible-lint for the first set of roles (cp/lifecycle) + if ! docker run --rm --volume ${bamboo.hostvolume}:/data \ + --workdir=/data \ + ${bamboo.container_molecule} \ + ansible-lint -c .ansible-lint.yml; then + echo "ERROR: Ansible Lint failed. Check the output for details." + exit 1 # Stop the script immediately + fi + echo "Ansible Lint successful for all ansible/collections/ansible_collections!" +other: + clean-working-dir: true +requirements: + - system.docker.executable + - AGENT_TYPE: Linux_Base_Agent diff --git a/bamboo-specs/validate/artifactory-ping.yaml b/bamboo-specs/validate/artifactory-ping.yaml new file mode 100644 index 0000000..d76a1b6 --- /dev/null +++ b/bamboo-specs/validate/artifactory-ping.yaml @@ -0,0 +1,10 @@ +tasks: + - script: | + #!/bin/bash + set -ex + + docker run --rm ${bamboo.container_jfrog} jfrog rt ping --user ${bamboo.snapshot_deployer_username} --password ${bamboo.snapshot_deployer_password} --url https://devstore.vanderlande.com/artifactory +other: + clean-working-dir: true +requirements: + - AGENT_TYPE: Linux_Base_Agent diff --git a/bamboo-specs/validate/docs-changesonly.yaml b/bamboo-specs/validate/docs-changesonly.yaml new file mode 100644 index 0000000..6bb5b90 --- /dev/null +++ b/bamboo-specs/validate/docs-changesonly.yaml @@ -0,0 +1,75 @@ +tasks: + - checkout: + force-clean-build: 'true' + + - script: | + #!/bin/bash + set -euxo pipefail + + # Ensure there's at least one previous commit + if git rev-parse HEAD~1 >/dev/null 2>&1; then + # Collect changed *.md files under docs/ (ignore deletions) + CHANGED_MD_FILES=$(git diff --name-status HEAD~1 HEAD | \ + awk '$1 != "D" {print $2}' | grep '^docs/.*\.md$' || true) + else + echo "No previous commit to compare against. Skipping update." + exit 0 + fi + + if [[ -z "${CHANGED_MD_FILES}" ]]; then + echo "No relevant markdown files changed under docs/. Skipping Confluence update." + exit 0 + fi + + # Parse minor version from semantic version + MINOR_VERSION=$(echo "${bamboo.inject.gtag}" | grep -Eo "^[0-9]+\.[0-9]+") + + # Inject version numbers into documentation + sed -i "s/{{ release_version }}/${bamboo.inject.gtag}/g;s/{{ minor_version }}/${MINOR_VERSION}/g" README.md + sed -i "s/{{ release_version }}/${bamboo.inject.gtag}/g;s/{{ minor_version }}/${MINOR_VERSION}/g" docs/*.md + + # Create temporary folder + mkdir -p ./vi_certs + + # Download latest Vanderlande CA certificates + curl https://pki.vanderlande.com/pki/VanderlandeRootCA.crt -o - | \ + openssl x509 -inform DER -out ./vi_certs/VanderlandeRootCA.crt + curl https://pki.vanderlande.com/pki/VanderlandeSubordinateCA-Internal.crt \ + -o ./vi_certs/VanderlandeSubordinateCA-Internal.crt + + echo "---" + echo "Starting Confluence update for the following files:" + echo "${CHANGED_MD_FILES}" + echo "---" + + # Since -f only accepts one file, we must loop through the list of changed files. + for file in ${CHANGED_MD_FILES} + do + echo "Processing file: ${file}" + + # Run a separate docker command for each file + docker run --rm --name "confluence-docs-update" \ + -v "${bamboo.hostvolume}:/code" \ + -v "${bamboo.hostvolume}/vi_certs:/usr/local/share/ca-certificates" \ + -w /code \ + "${bamboo.container_mark}" \ + /bin/bash -c "\ + update-ca-certificates && \ + mark -u '${bamboo.confluence_username}' \ + -p '${bamboo.confluence_password}' \ + -b '${bamboo.confluence_url}' \ + --ci --changes-only \ + --title-from-h1 \ + --space ${bamboo.confluence_space} \ + --parents 'IT Technology Platform/Team Devcolla'\''s/Application Stack/Harvester Cloud Native Virtualization' \ + -f '${file}'" + echo "Finished processing ${file}." + echo "---" + done + +other: + clean-working-dir: true + +requirements: + - system.docker.executable + - AGENT_TYPE: Linux_Base_Agent \ No newline at end of file diff --git a/bamboo-specs/validate/docs-dryrun.yaml b/bamboo-specs/validate/docs-dryrun.yaml new file mode 100644 index 0000000..055a7e4 --- /dev/null +++ b/bamboo-specs/validate/docs-dryrun.yaml @@ -0,0 +1,63 @@ +tasks: + - checkout: + force-clean-build: 'true' + - script: | + #!/bin/bash + set -x + + # Parse minor version from semantic version + MINOR_VERSION=$(echo "${bamboo.inject.gtag}" | grep -Eo "^[0-9]+\.[0-9]+") + + # Inject version numbers into documentation + sed -i "s/{{ release_version }}/${bamboo.inject.gtag}/g;s/{{ minor_version }}/${MINOR_VERSION}/g" README.md + sed -i "s/{{ release_version }}/${bamboo.inject.gtag}/g;s/{{ minor_version }}/${MINOR_VERSION}/g" docs/*.md + + # Create temporary folder + mkdir -p ./vi_certs + + # Download latest Vanderlande certificate authority certificates + curl https://pki.vanderlande.com/pki/VanderlandeRootCA.crt -o - | openssl x509 -inform DER -out ./vi_certs/VanderlandeRootCA.crt + curl https://pki.vanderlande.com/pki/VanderlandeSubordinateCA-Internal.crt -o ./vi_certs/VanderlandeSubordinateCA-Internal.crt + + # Update README markdown file + docker run --rm --name confluence-docs-update \ + -v "${bamboo.hostvolume}:/code" \ + -v "${bamboo.hostvolume}/vi_certs:/usr/local/share/ca-certificates" \ + -w /code \ + "${bamboo.container_mark}" \ + /bin/bash -c "\ + update-ca-certificates && \ + mark \ + -u '${bamboo.confluence_username}' \ + -p '${bamboo.confluence_password}' \ + -b '${bamboo.confluence_url}' \ + --title-from-h1 \ + --space ${bamboo.confluence_space} \ + --parents 'IT Technology Platform/Team Devcolla'\''s/Application Stack/Harvester Cloud Native Virtualization' \ + --dry-run \ + -f './README.md' || exit 1" + + # Update all markdown files in docs/ + docker run --rm --name confluence-docs-update \ + -v "${bamboo.hostvolume}:/code" \ + -v "${bamboo.hostvolume}/vi_certs:/usr/local/share/ca-certificates" \ + -w /code \ + "${bamboo.container_mark}" \ + /bin/bash -c "\ + update-ca-certificates && \ + mark \ + -u '${bamboo.confluence_username}' \ + -p '${bamboo.confluence_password}' \ + -b '${bamboo.confluence_url}' \ + --ci --changes-only \ + --title-from-h1 \ + --space ${bamboo.confluence_space} \ + --parents 'IT Technology Platform/Team Devcolla'\''s/Application Stack/Harvester Cloud Native Virtualization' \ + --dry-run \ + -f './docs/*.md' || exit 1" + +other: + clean-working-dir: true +requirements: + - system.docker.executable + - AGENT_TYPE: Linux_Base_Agent diff --git a/bamboo-specs/validate/docs.yaml b/bamboo-specs/validate/docs.yaml new file mode 100644 index 0000000..2c433c4 --- /dev/null +++ b/bamboo-specs/validate/docs.yaml @@ -0,0 +1,61 @@ +tasks: + - checkout: + force-clean-build: 'true' + - script: | + #!/bin/bash + set -x + + # Parse minor version from semantic version + MINOR_VERSION=$(echo "${bamboo.inject.gtag}" | grep -Eo "^[0-9]+\.[0-9]+") + + # Inject version numbers into documentation + sed -i "s/{{ release_version }}/${bamboo.inject.gtag}/g;s/{{ minor_version }}/${MINOR_VERSION}/g" README.md + sed -i "s/{{ release_version }}/${bamboo.inject.gtag}/g;s/{{ minor_version }}/${MINOR_VERSION}/g" docs/*.md + + # Create temporary folder + mkdir -p ./vi_certs + + # Download latest Vanderlande certificate authority certificates + curl https://pki.vanderlande.com/pki/VanderlandeRootCA.crt -o - | openssl x509 -inform DER -out ./vi_certs/VanderlandeRootCA.crt + curl https://pki.vanderlande.com/pki/VanderlandeSubordinateCA-Internal.crt -o ./vi_certs/VanderlandeSubordinateCA-Internal.crt + + # Update README markdown file + docker run --rm --name confluence-docs-update \ + -v "${bamboo.hostvolume}:/code" \ + -v "${bamboo.hostvolume}/vi_certs:/usr/local/share/ca-certificates" \ + -w /code \ + "${bamboo.container_mark}" \ + /bin/bash -c "\ + update-ca-certificates && \ + mark \ + -u '${bamboo.confluence_username}' \ + -p '${bamboo.confluence_password}' \ + -b '${bamboo.confluence_url}' \ + --title-from-h1 \ + --space ${bamboo.confluence_space} \ + --parents 'IT Technology Platform/Team Devcolla'\''s/Application Stack/Harvester Cloud Native Virtualization' \ + -f './README.md' || exit 1" + + # Update all markdown files in docs/ + docker run --rm --name confluence-docs-update \ + -v "${bamboo.hostvolume}:/code" \ + -v "${bamboo.hostvolume}/vi_certs:/usr/local/share/ca-certificates" \ + -w /code \ + "${bamboo.container_mark}" \ + /bin/bash -c "\ + update-ca-certificates && \ + mark \ + -u '${bamboo.confluence_username}' \ + -p '${bamboo.confluence_password}' \ + -b '${bamboo.confluence_url}' \ + --ci --changes-only \ + --title-from-h1 \ + --space ${bamboo.confluence_space} \ + --parents 'IT Technology Platform/Team Devcolla'\''s/Application Stack/Harvester Cloud Native Virtualization' \ + -f './docs/*.md' || exit 1" + +other: + clean-working-dir: true +requirements: + - system.docker.executable + - AGENT_TYPE: Linux_Base_Agent diff --git a/deploy/harvester/cloud-config-templates/rke2-ubuntu-22.04-cloudinit-cp.yaml b/deploy/harvester/cloud-config-templates/rke2-ubuntu-22.04-cloudinit-cp.yaml new file mode 100644 index 0000000..5c35968 --- /dev/null +++ b/deploy/harvester/cloud-config-templates/rke2-ubuntu-22.04-cloudinit-cp.yaml @@ -0,0 +1,120 @@ +apiVersion: v1 +data: + cloudInit: | + #cloud-config + package_update: false + package_upgrade: false + snap: + commands: + 00: snap refresh --hold=forever + package_reboot_if_required: true + packages: + - qemu-guest-agent + - yq + - jq + + runcmd: + - sysctl -w net.ipv6.conf.all.disable_ipv6=1 + - systemctl enable --now qemu-guest-agent.service + - [sh, '/root/updates.sh'] + + disable_root: true + ssh_pwauth: false + groups: + - etcd + users: + - name: rancher + gecos: Rancher service account + hashed_passwd: $6$Jn9gljJAbr9tjxD2$4D4O5YokrpYvYd5lznvtuWRPWWcREo325pEhn5r5vzfIU/1fX6werOG4LlXxNNBOkmbKaabekQ9NQL32IZOiH1 + lock_passwd: false + shell: /bin/bash + groups: [users, sudo, docker] + sudo: ALL=(ALL:ALL) ALL + ssh_authorized_keys: + - 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEwWnnOTAu0LlAZRczQ0Z0KvNlUdPhGQhpZie+nF1O3s' + - name: etcd + gecos: ETCD service account + lock_passwd: true + shell: /sbin/nologin + groups: [etcd] + + + write_files: + - path: /root/updates.sh + permissions: '0550' + content: | + #!/bin/bash + export DEBIAN_FRONTEND=noninteractive + apt-mark hold linux-headers-generic + apt-mark hold linux-headers-virtual + apt-mark hold linux-image-virtual + apt-mark hold linux-virtual + apt-get update + apt-get upgrade -y + apt-get autoremove -y + - path: /var/lib/rancher/rke2/server/manifests/disable-sa-automount.yaml + permissions: '0600' + owner: root:root + content: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: disable-automount-sa + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: disable-automount-clusterrole + rules: + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["serviceaccounts"] + verbs: ["get", "patch"] + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: disable-automount-binding + subjects: + - kind: ServiceAccount + name: disable-automount-sa + namespace: kube-system + roleRef: + kind: ClusterRole + name: disable-automount-clusterrole + apiGroup: rbac.authorization.k8s.io + --- + apiVersion: batch/v1 + kind: CronJob + metadata: + name: disable-default-sa-automount + namespace: kube-system + spec: + schedule: "0 0 * * *" + concurrencyPolicy: Forbid + jobTemplate: + spec: + template: + spec: + serviceAccountName: disable-automount-sa + containers: + - name: kubectl-patcher + image: alpine/kubectl:1.35.0 + command: + - /bin/sh + - -c + - | + for n in $(kubectl get namespaces -o=jsonpath="{.items[*]['metadata.name']}"); do + echo "Patching default SA in namespace: $n" + kubectl patch serviceaccount default -p '{"automountServiceAccountToken": false}' -n $n + done + restartPolicy: OnFailure +kind: ConfigMap +metadata: + labels: + harvesterhci.io/cloud-init-template: user + name: rke2-ubuntu-22.04-cloudinit-cp + namespace: vanderlande diff --git a/deploy/harvester/cloud-config-templates/rke2-ubuntu-22.04-cloudinit.yaml b/deploy/harvester/cloud-config-templates/rke2-ubuntu-22.04-cloudinit.yaml new file mode 100755 index 0000000..f8da424 --- /dev/null +++ b/deploy/harvester/cloud-config-templates/rke2-ubuntu-22.04-cloudinit.yaml @@ -0,0 +1,52 @@ +apiVersion: v1 +data: + cloudInit: | + #cloud-config + package_update: false + package_upgrade: false + snap: + commands: + 00: snap refresh --hold=forever + package_reboot_if_required: true + packages: + - qemu-guest-agent + - yq + - jq + + runcmd: + - sysctl -w net.ipv6.conf.all.disable_ipv6=1 + - systemctl enable --now qemu-guest-agent.service + - [sh, '/root/updates.sh'] + + disable_root: true + ssh_pwauth: false + users: + - name: rancher + gecos: Rancher service account + hashed_passwd: $6$Jn9gljJAbr9tjxD2$4D4O5YokrpYvYd5lznvtuWRPWWcREo325pEhn5r5vzfIU/1fX6werOG4LlXxNNBOkmbKaabekQ9NQL32IZOiH1 + lock_passwd: false + shell: /bin/bash + groups: [users, sudo, docker] + sudo: ALL=(ALL:ALL) ALL + ssh_authorized_keys: + - 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEwWnnOTAu0LlAZRczQ0Z0KvNlUdPhGQhpZie+nF1O3s' + + write_files: + - path: /root/updates.sh + permissions: '0550' + content: | + #!/bin/bash + export DEBIAN_FRONTEND=noninteractive + apt-mark hold linux-headers-generic + apt-mark hold linux-headers-virtual + apt-mark hold linux-image-virtual + apt-mark hold linux-virtual + apt-get update + apt-get upgrade -y + apt-get autoremove -y +kind: ConfigMap +metadata: + labels: + harvesterhci.io/cloud-init-template: user + name: rke2-ubuntu-22.04-cloudinit + namespace: vanderlande diff --git a/deploy/harvester/cloud-config-templates/rke2-ubuntu-24.04-cloudinit-cp.yaml b/deploy/harvester/cloud-config-templates/rke2-ubuntu-24.04-cloudinit-cp.yaml new file mode 100644 index 0000000..5243b73 --- /dev/null +++ b/deploy/harvester/cloud-config-templates/rke2-ubuntu-24.04-cloudinit-cp.yaml @@ -0,0 +1,120 @@ +apiVersion: v1 +data: + cloudInit: | + #cloud-config + package_update: false + package_upgrade: false + snap: + commands: + 00: snap refresh --hold=forever + package_reboot_if_required: true + packages: + - qemu-guest-agent + - yq + - jq + + runcmd: + - sysctl -w net.ipv6.conf.all.disable_ipv6=1 + - systemctl enable --now qemu-guest-agent.service + - [sh, '/root/updates.sh'] + + disable_root: true + ssh_pwauth: false + groups: + - etcd + users: + - name: rancher + gecos: Rancher service account + hashed_passwd: $6$Jn9gljJAbr9tjxD2$4D4O5YokrpYvYd5lznvtuWRPWWcREo325pEhn5r5vzfIU/1fX6werOG4LlXxNNBOkmbKaabekQ9NQL32IZOiH1 + lock_passwd: false + shell: /bin/bash + groups: [users, sudo, docker] + sudo: ALL=(ALL:ALL) ALL + ssh_authorized_keys: + - 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEwWnnOTAu0LlAZRczQ0Z0KvNlUdPhGQhpZie+nF1O3s' + - name: etcd + gecos: ETCD service account + lock_passwd: true + shell: /sbin/nologin + groups: [etcd] + + write_files: + - path: /root/updates.sh + permissions: '0550' + owner: root:root + content: | + #!/bin/bash + export DEBIAN_FRONTEND=noninteractive + apt-mark hold linux-headers-generic + apt-mark hold linux-headers-virtual + apt-mark hold linux-image-virtual + apt-mark hold linux-virtual + apt-get update + apt-get upgrade -y + apt-get autoremove -y + - path: /var/lib/rancher/rke2/server/manifests/disable-sa-automount.yaml + permissions: '0600' + owner: root:root + content: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: disable-automount-sa + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: disable-automount-clusterrole + rules: + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["serviceaccounts"] + verbs: ["get", "patch"] + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: disable-automount-binding + subjects: + - kind: ServiceAccount + name: disable-automount-sa + namespace: kube-system + roleRef: + kind: ClusterRole + name: disable-automount-clusterrole + apiGroup: rbac.authorization.k8s.io + --- + apiVersion: batch/v1 + kind: CronJob + metadata: + name: disable-default-sa-automount + namespace: kube-system + spec: + schedule: "0 0 * * *" + concurrencyPolicy: Forbid + jobTemplate: + spec: + template: + spec: + serviceAccountName: disable-automount-sa + containers: + - name: kubectl-patcher + image: alpine/kubectl:1.35.0 + command: + - /bin/sh + - -c + - | + for n in $(kubectl get namespaces -o=jsonpath="{.items[*]['metadata.name']}"); do + echo "Patching default SA in namespace: $n" + kubectl patch serviceaccount default -p '{"automountServiceAccountToken": false}' -n $n + done + restartPolicy: OnFailure +kind: ConfigMap +metadata: + labels: + harvesterhci.io/cloud-init-template: user + name: rke2-ubuntu-24.04-cloudinit-cp + namespace: vanderlande diff --git a/deploy/harvester/cloud-config-templates/rke2-ubuntu-24.04-cloudinit.yaml b/deploy/harvester/cloud-config-templates/rke2-ubuntu-24.04-cloudinit.yaml new file mode 100755 index 0000000..971cb6b --- /dev/null +++ b/deploy/harvester/cloud-config-templates/rke2-ubuntu-24.04-cloudinit.yaml @@ -0,0 +1,52 @@ +apiVersion: v1 +data: + cloudInit: | + #cloud-config + package_update: false + package_upgrade: false + snap: + commands: + 00: snap refresh --hold=forever + package_reboot_if_required: true + packages: + - qemu-guest-agent + - yq + - jq + + runcmd: + - sysctl -w net.ipv6.conf.all.disable_ipv6=1 + - systemctl enable --now qemu-guest-agent.service + - [sh, '/root/updates.sh'] + + disable_root: true + ssh_pwauth: false + users: + - name: rancher + gecos: Rancher service account + hashed_passwd: $6$Jn9gljJAbr9tjxD2$4D4O5YokrpYvYd5lznvtuWRPWWcREo325pEhn5r5vzfIU/1fX6werOG4LlXxNNBOkmbKaabekQ9NQL32IZOiH1 + lock_passwd: false + shell: /bin/bash + groups: [users, sudo, docker] + sudo: ALL=(ALL:ALL) ALL + ssh_authorized_keys: + - 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEwWnnOTAu0LlAZRczQ0Z0KvNlUdPhGQhpZie+nF1O3s' + + write_files: + - path: /root/updates.sh + permissions: '0550' + content: | + #!/bin/bash + export DEBIAN_FRONTEND=noninteractive + apt-mark hold linux-headers-generic + apt-mark hold linux-headers-virtual + apt-mark hold linux-image-virtual + apt-mark hold linux-virtual + apt-get update + apt-get upgrade -y + apt-get autoremove -y +kind: ConfigMap +metadata: + labels: + harvesterhci.io/cloud-init-template: user + name: rke2-ubuntu-24.04-cloudinit + namespace: vanderlande diff --git a/deploy/harvester/image/ubuntu-22.04-2025-11-25.yaml b/deploy/harvester/image/ubuntu-22.04-2025-11-25.yaml new file mode 100755 index 0000000..112dd43 --- /dev/null +++ b/deploy/harvester/image/ubuntu-22.04-2025-11-25.yaml @@ -0,0 +1,33 @@ +apiVersion: harvesterhci.io/v1beta1 +kind: VirtualMachineImage +metadata: + annotations: + harvesterhci.io/storageClassName: harvester-longhorn + finalizers: + - wrangler.cattle.io/vm-image-controller + generateName: ubuntu-22.04- + generation: 1 + labels: + harvesterhci.io/image-type: raw_qcow2 + harvesterhci.io/imageDisplayName: ubuntu-22.04-2025-11-25 + harvesterhci.io/os-release-date: '2025-11-25' + harvesterhci.io/os-type: ubuntu + harvesterhci.io/os-version: '22.04' + name: ubuntu-22.04-7mg64 + namespace: vanderlande + uid: 894bb600-bb7d-4bd3-926f-b91616cd54be +spec: + backend: backingimage + checksum: '' + displayName: ubuntu-22.04-2025-11-25 + pvcName: '' + pvcNamespace: '' + retry: 3 + sourceType: download + storageClassParameters: + migratable: 'true' + numberOfReplicas: '3' + staleReplicaTimeout: '30' + targetStorageClassName: harvester-longhorn + url: >- + https://cloud-images.ubuntu.com/jammy/20251125/jammy-server-cloudimg-amd64.img diff --git a/deploy/harvester/image/ubuntu-24.04-2025-11-26.yaml b/deploy/harvester/image/ubuntu-24.04-2025-11-26.yaml new file mode 100755 index 0000000..b3b5d40 --- /dev/null +++ b/deploy/harvester/image/ubuntu-24.04-2025-11-26.yaml @@ -0,0 +1,33 @@ +apiVersion: harvesterhci.io/v1beta1 +kind: VirtualMachineImage +metadata: + annotations: + harvesterhci.io/storageClassName: harvester-longhorn + finalizers: + - wrangler.cattle.io/vm-image-controller + generateName: ubuntu-24.04- + generation: 1 + labels: + harvesterhci.io/image-type: raw_qcow2 + harvesterhci.io/imageDisplayName: ubuntu-24.04-2025-11-26 + harvesterhci.io/os-release-date: '2025-11-26' + harvesterhci.io/os-type: ubuntu + harvesterhci.io/os-version: '24.04' + name: ubuntu-24.04-qhtpc + namespace: vanderlande + uid: 23b60ae3-d5bd-4b10-9587-94e56b39c018 +spec: + backend: backingimage + checksum: '' + displayName: ubuntu-24.04-2025-11-26 + pvcName: '' + pvcNamespace: '' + retry: 3 + sourceType: download + storageClassParameters: + migratable: 'true' + numberOfReplicas: '3' + staleReplicaTimeout: '30' + targetStorageClassName: harvester-longhorn + url: >- + https://cloud-images.ubuntu.com/noble/20251126/noble-server-cloudimg-amd64.img diff --git a/deploy/harvester/templates/rke2-ubuntu-22.04-8fzp2.yaml b/deploy/harvester/templates/rke2-ubuntu-22.04-8fzp2.yaml new file mode 100755 index 0000000..cd9ac88 --- /dev/null +++ b/deploy/harvester/templates/rke2-ubuntu-22.04-8fzp2.yaml @@ -0,0 +1,94 @@ +apiVersion: harvesterhci.io/v1beta1 +kind: VirtualMachineTemplateVersion +metadata: + annotations: + template-version.harvesterhci.io/customName: m8HEQq4ebp + generateName: rke2-ubuntu-22.04- + generation: 2 + labels: + template.harvesterhci.io/templateID: rke2-ubuntu-22.04 + name: rke2-ubuntu-22.04-8fzp2 + namespace: vanderlande + ownerReferences: + - apiVersion: harvesterhci.io/v1beta1 + blockOwnerDeletion: true + controller: true + kind: VirtualMachineTemplate + name: rke2-ubuntu-22.04 + # UID of the VirtualMachineTemplate to link to + uid: 8358985a-2a3d-4d06-a656-eb5e69d3137d + # UID is of the VirtualMachineTemplateVersion used by the secret + uid: 0c581ffb-8681-4054-a3c1-078a22dc53d8 +spec: + templateId: vanderlande/rke2-ubuntu-22.04 + vm: + metadata: + annotations: + harvesterhci.io/enableCPUAndMemoryHotplug: 'true' + # Image StorageClass name is defined by the image suffix, i.e. ubuntu-22.04-7mg64 -> longhorn-image-7mg64 + harvesterhci.io/volumeClaimTemplates: '[{"metadata":{"name":"-disk-0-q0xip","annotations":{"harvesterhci.io/imageId":"vanderlande/image-7mg64"}},"spec":{"accessModes":["ReadWriteMany"],"resources":{"requests":{"storage":"60Gi"}},"volumeMode":"Block","storageClassName":"longhorn-image-7mg64"}}]' + template-version.harvesterhci.io/customName: m8HEQq4ebp + creationTimestamp: null + labels: + harvesterhci.io/os: ubuntu + spec: + runStrategy: RerunOnFailure + template: + metadata: + annotations: + harvesterhci.io/sshNames: '["vanderlande/harvester-cnv-node"]' + creationTimestamp: null + spec: + affinity: {} + domain: + cpu: + cores: 1 + maxSockets: 16 + sockets: 4 + threads: 1 + devices: + disks: + - bootOrder: 1 + disk: + bus: virtio + name: disk-0 + - disk: + bus: virtio + name: cloudinitdisk + inputs: + - bus: usb + name: tablet + type: tablet + interfaces: + - bridge: {} + model: virtio + name: default + features: + acpi: + enabled: true + machine: + type: '' + memory: + guest: 8Gi + maxGuest: 32Gi + resources: + limits: + cpu: '16' + memory: 32Gi + evictionStrategy: LiveMigrateIfPossible + networks: + - multus: + networkName: vanderlande/vm-lan + name: default + terminationGracePeriodSeconds: 120 + volumes: + - name: disk-0 + persistentVolumeClaim: + claimName: '-disk-0-q0xip' + - cloudInitNoCloud: + networkDataSecretRef: + name: rke2-ubuntu-22.04-lbbfn + secretRef: + name: rke2-ubuntu-22.04-lbbfn + name: cloudinitdisk + diff --git a/deploy/harvester/templates/rke2-ubuntu-22.04-secret-lbbfn.yaml b/deploy/harvester/templates/rke2-ubuntu-22.04-secret-lbbfn.yaml new file mode 100755 index 0000000..0a10d39 --- /dev/null +++ b/deploy/harvester/templates/rke2-ubuntu-22.04-secret-lbbfn.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +data: + # Updated user data should be imported from rke2-ubuntu-22.04-cloudinit and base64 encoded + networkdata: "" + userdata: I2Nsb3VkLWNvbmZpZwpwYWNrYWdlX3VwZGF0ZTogZmFsc2UKcGFja2FnZV91cGdyYWRlOiBmYWxzZQpzbmFwOgogIGNvbW1hbmRzOgogICAgMDogc25hcCByZWZyZXNoIC0taG9sZD1mb3JldmVyCnBhY2thZ2VfcmVib290X2lmX3JlcXVpcmVkOiB0cnVlCnBhY2thZ2VzOgogIC0gcWVtdS1ndWVzdC1hZ2VudAogIC0geXEKICAtIGpxCgpydW5jbWQ6CiAgLSBzeXNjdGwgLXcgbmV0LmlwdjYuY29uZi5hbGwuZGlzYWJsZV9pcHY2PTEKICAtIHN5c3RlbWN0bCBlbmFibGUgLS1ub3cgcWVtdS1ndWVzdC1hZ2VudC5zZXJ2aWNlCiAgLSAtIHNoCiAgICAtIC9yb290L3VwZGF0ZXMuc2gKCmRpc2FibGVfcm9vdDogdHJ1ZQpzc2hfcHdhdXRoOiBmYWxzZQp1c2VyczoKICAtIG5hbWU6IHJhbmNoZXIKICAgIGdlY29zOiBSYW5jaGVyIHNlcnZpY2UgYWNjb3VudAogICAgaGFzaGVkX3Bhc3N3ZDogJDYkSm45Z2xqSkFicjl0anhEMiQ0RDRPNVlva3JwWXZZZDVsem52dHVXUlBXV2NSRW8zMjVwRWhuNXI1dnpmSVUvMWZYNndlck9HNExsWHhOTkJPa21iS2FhYmVrUTlOUUwzMklaT2lIMQogICAgbG9ja19wYXNzd2Q6IGZhbHNlCiAgICBzaGVsbDogL2Jpbi9iYXNoCiAgICBncm91cHM6IFsgdXNlcnMsIHN1ZG8sIGRvY2tlciBdCiAgICBzdWRvOiBBTEw9KEFMTCkKICAgIHNzaF9hdXRob3JpemVkX2tleXM6CiAgICAgIC0gJ3NzaC1lZDI1NTE5CiAgICAgICAgQUFBQUMzTnphQzFsWkRJMU5URTVBQUFBSUV3V25uT1RBdTBMbEFaUmN6UTBaMEt2TmxVZFBoR1FocFppZStuRjFPM3MnCgp3cml0ZV9maWxlczoKICAtIHBhdGg6IC9yb290L3VwZGF0ZXMuc2gKICAgIHBlcm1pc3Npb25zOiAnMDU1MCcKICAgIGNvbnRlbnQ6IHwKICAgICAgIyEvYmluL2Jhc2gKICAgICAgZXhwb3J0IERFQklBTl9GUk9OVEVORD1ub25pbnRlcmFjdGl2ZQogICAgICBhcHQtbWFyayBob2xkIGxpbnV4LWhlYWRlcnMtZ2VuZXJpYwogICAgICBhcHQtbWFyayBob2xkIGxpbnV4LWhlYWRlcnMtdmlydHVhbAogICAgICBhcHQtbWFyayBob2xkIGxpbnV4LWltYWdlLXZpcnR1YWwKICAgICAgYXB0LW1hcmsgaG9sZCBsaW51eC12aXJ0dWFsCiAgICAgIGFwdC1nZXQgdXBkYXRlCiAgICAgIGFwdC1nZXQgdXBncmFkZSAteQogICAgICBhcHQtZ2V0IGF1dG9yZW1vdmUgLXkgICAgCnNzaF9hdXRob3JpemVkX2tleXM6CiAgLSBzc2gtZWQyNTUxOQogICAgQUFBQUMzTnphQzFsWkRJMU5URTVBQUFBSUV3V25uT1RBdTBMbEFaUmN6UTBaMEt2TmxVZFBoR1FocFppZStuRjFPM3MKICAgIEhhcnZlc3RlciBDTlYgTm9kZQo= +kind: Secret +metadata: + labels: + harvesterhci.io/cloud-init-template: harvester + name: rke2-ubuntu-22.04-lbbfn + namespace: vanderlande + ownerReferences: + - apiVersion: harvesterhci.io/v1beta1 + kind: VirtualMachineTemplateVersion + name: rke2-ubuntu-22.04-8fzp2 + # UID of the VirtualMachineTemplateVersion to link to + uid: 0c581ffb-8681-4054-a3c1-078a22dc53d8 +type: secret diff --git a/deploy/harvester/templates/rke2-ubuntu-22.04-template.yaml b/deploy/harvester/templates/rke2-ubuntu-22.04-template.yaml new file mode 100755 index 0000000..ea88dcf --- /dev/null +++ b/deploy/harvester/templates/rke2-ubuntu-22.04-template.yaml @@ -0,0 +1,10 @@ +apiVersion: harvesterhci.io/v1beta1 +kind: VirtualMachineTemplate +metadata: + name: rke2-ubuntu-22.04 + namespace: vanderlande + # UID needs to be specified explicitly as it is used in template version. + uid: 8358985a-2a3d-4d06-a656-eb5e69d3137d +spec: + defaultVersionId: vanderlande/rke2-ubuntu-22.04-8fzp2 + diff --git a/deploy/harvester/templates/rke2-ubuntu-24.04-secret-3bl5k.yaml b/deploy/harvester/templates/rke2-ubuntu-24.04-secret-3bl5k.yaml new file mode 100755 index 0000000..89f7d1d --- /dev/null +++ b/deploy/harvester/templates/rke2-ubuntu-24.04-secret-3bl5k.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +data: + networkdata: "" + # Updated user data should be imported from rke2-ubuntu-24.04-cloudinit and base64 encoded + userdata: I2Nsb3VkLWNvbmZpZwpwYWNrYWdlX3VwZGF0ZTogdHJ1ZQpwYWNrYWdlX3VwZ3JhZGU6IGZhbHNlCnNuYXA6CiAgY29tbWFuZHM6CiAgICAwOiBzbmFwIHJlZnJlc2ggLS1ob2xkPWZvcmV2ZXIKcGFja2FnZV9yZWJvb3RfaWZfcmVxdWlyZWQ6IHRydWUKcGFja2FnZXM6CiAgLSBxZW11LWd1ZXN0LWFnZW50CiAgLSB5cQogIC0ganEKCnJ1bmNtZDoKICAtIHN5c2N0bCAtdyBuZXQuaXB2Ni5jb25mLmFsbC5kaXNhYmxlX2lwdjY9MQogIC0gc3lzdGVtY3RsIGVuYWJsZSAtLW5vdyBxZW11LWd1ZXN0LWFnZW50LnNlcnZpY2UKICAtIC0gc2gKICAgIC0gL3Jvb3QvdXBkYXRlcy5zaAoKZGlzYWJsZV9yb290OiB0cnVlCnNzaF9wd2F1dGg6IGZhbHNlCnVzZXJzOgogIC0gbmFtZTogcmFuY2hlcgogICAgZ2Vjb3M6IFJhbmNoZXIgc2VydmljZSBhY2NvdW50CiAgICBoYXNoZWRfcGFzc3dkOiAkNiRKbjlnbGpKQWJyOXRqeEQyJDRENE81WW9rcnBZdllkNWx6bnZ0dVdSUFdXY1JFbzMyNXBFaG41cjV2emZJVS8xZlg2d2VyT0c0TGxYeE5OQk9rbWJLYWFiZWtROU5RTDMySVpPaUgxCiAgICBsb2NrX3Bhc3N3ZDogZmFsc2UKICAgIHNoZWxsOiAvYmluL2Jhc2gKICAgIGdyb3VwczogWyB1c2Vycywgc3VkbywgZG9ja2VyIF0KICAgIHN1ZG86IEFMTD0oQUxMOkFMTCkgQUxMCiAgICBzc2hfYXV0aG9yaXplZF9rZXlzOgogICAgICAtICdzc2gtZWQyNTUxOQogICAgICAgIEFBQUFDM056YUMxbFpESTFOVEU1QUFBQUlFd1dubk9UQXUwTGxBWlJjelEwWjBLdk5sVWRQaEdRaHBaaWUrbkYxTzNzJwoKd3JpdGVfZmlsZXM6CiAgLSBwYXRoOiAvcm9vdC91cGRhdGVzLnNoCiAgICBwZXJtaXNzaW9uczogJzA1NTAnCiAgICBjb250ZW50OiB8CiAgICAgICMhL2Jpbi9iYXNoCiAgICAgIGV4cG9ydCBERUJJQU5fRlJPTlRFTkQ9bm9uaW50ZXJhY3RpdmUKICAgICAgYXB0LW1hcmsgaG9sZCBsaW51eC1oZWFkZXJzLWdlbmVyaWMKICAgICAgYXB0LW1hcmsgaG9sZCBsaW51eC1oZWFkZXJzLXZpcnR1YWwKICAgICAgYXB0LW1hcmsgaG9sZCBsaW51eC1pbWFnZS12aXJ0dWFsCiAgICAgIGFwdC1tYXJrIGhvbGQgbGludXgtdmlydHVhbAogICAgICBhcHQtZ2V0IHVwZGF0ZQogICAgICBhcHQtZ2V0IHVwZ3JhZGUgLXkKICAgICAgYXB0LWdldCBhdXRvcmVtb3ZlIC15ICAgIApzc2hfYXV0aG9yaXplZF9rZXlzOgogIC0gc3NoLWVkMjU1MTkKICAgIEFBQUFDM056YUMxbFpESTFOVEU1QUFBQUlFd1dubk9UQXUwTGxBWlJjelEwWjBLdk5sVWRQaEdRaHBaaWUrbkYxTzNzCiAgICBIYXJ2ZXN0ZXIgQ05WIE5vZGUK +kind: Secret +metadata: + labels: + harvesterhci.io/cloud-init-template: harvester + name: rke2-ubuntu-24.04-3bl5k + namespace: vanderlande + ownerReferences: + - apiVersion: harvesterhci.io/v1beta1 + kind: VirtualMachineTemplateVersion + name: rke2-ubuntu-24.04-xrv5n + # UID of the VirtualMachineTemplateVersion to link to + uid: ad96ea4b-3d5a-4de3-adb0-0eb3c99920b2 +type: secret diff --git a/deploy/harvester/templates/rke2-ubuntu-24.04-template.yaml b/deploy/harvester/templates/rke2-ubuntu-24.04-template.yaml new file mode 100755 index 0000000..b66a6e6 --- /dev/null +++ b/deploy/harvester/templates/rke2-ubuntu-24.04-template.yaml @@ -0,0 +1,10 @@ +apiVersion: harvesterhci.io/v1beta1 +kind: VirtualMachineTemplate +metadata: + name: rke2-ubuntu-24.04 + namespace: vanderlande + # UID needs to be specified explicitly as it is used in template version and secret. + uid: cf644217-0be1-47f0-8c7f-2594f633da26 +spec: + defaultVersionId: vanderlande/rke2-ubuntu-24.04-xrv5n + diff --git a/deploy/harvester/templates/rke2-ubuntu-24.04-xrv5n.yaml b/deploy/harvester/templates/rke2-ubuntu-24.04-xrv5n.yaml new file mode 100755 index 0000000..06425ce --- /dev/null +++ b/deploy/harvester/templates/rke2-ubuntu-24.04-xrv5n.yaml @@ -0,0 +1,94 @@ +apiVersion: harvesterhci.io/v1beta1 +kind: VirtualMachineTemplateVersion +metadata: + annotations: + template-version.harvesterhci.io/customName: VfNPzXKspc + generateName: rke2-ubuntu-24.04- + generation: 2 + labels: + template.harvesterhci.io/templateID: rke2-ubuntu-24.04 + name: rke2-ubuntu-24.04-xrv5n + namespace: vanderlande + ownerReferences: + - apiVersion: harvesterhci.io/v1beta1 + blockOwnerDeletion: true + controller: true + kind: VirtualMachineTemplate + name: rke2-ubuntu-24.04 + # UID of the VirtualMachineTemplate to link to + uid: cf644217-0be1-47f0-8c7f-2594f633da26 + # UID is of the VirtualMachineTemplateVersion used by the secret + uid: ad96ea4b-3d5a-4de3-adb0-0eb3c99920b2 +spec: + templateId: vanderlande/rke2-ubuntu-24.04 + vm: + metadata: + annotations: + harvesterhci.io/enableCPUAndMemoryHotplug: "true" + # Image StorageClass name is defined by the image suffix, i.e. ubuntu-24.04-qhtpc -> longhorn-image-qhtpc + harvesterhci.io/volumeClaimTemplates: '[{"metadata":{"name":"-disk-0-jprp0","annotations":{"harvesterhci.io/imageId":"vanderlande/image-qhtpc"}},"spec":{"accessModes":["ReadWriteMany"],"resources":{"requests":{"storage":"60Gi"}},"volumeMode":"Block","storageClassName":"longhorn-image-qhtpc"}}]' + template-version.harvesterhci.io/customName: VfNPzXKspc + creationTimestamp: null + labels: + harvesterhci.io/os: ubuntu + spec: + runStrategy: RerunOnFailure + template: + metadata: + annotations: + harvesterhci.io/sshNames: '["vanderlande/harvester-cnv-node"]' + creationTimestamp: null + spec: + affinity: {} + domain: + cpu: + cores: 1 + maxSockets: 16 + sockets: 4 + threads: 1 + devices: + disks: + - bootOrder: 1 + disk: + bus: virtio + name: disk-0 + - disk: + bus: virtio + name: cloudinitdisk + inputs: + - bus: usb + name: tablet + type: tablet + interfaces: + - bridge: {} + model: virtio + name: default + features: + acpi: + enabled: true + machine: + type: "" + memory: + guest: 8Gi + maxGuest: 32Gi + resources: + limits: + cpu: "16" + memory: 32Gi + evictionStrategy: LiveMigrateIfPossible + networks: + - multus: + networkName: vanderlande/vm-lan + name: default + terminationGracePeriodSeconds: 120 + volumes: + - name: disk-0 + persistentVolumeClaim: + claimName: -disk-0-jprp0 + - cloudInitNoCloud: + networkDataSecretRef: + name: rke2-ubuntu-24.04-3bl5k + secretRef: + name: rke2-ubuntu-24.04-3bl5k + name: cloudinitdisk + diff --git a/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/.helmignore b/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/.helmignore new file mode 100644 index 0000000..5df8cd4 --- /dev/null +++ b/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/.helmignore @@ -0,0 +1,21 @@ +# HELM IGNORE OPTIONS: +# Patterns to ignore when building Helm packages. +# Supports shell glob matching, relative path matching, and negation (prefixed with !) + +.DS_Store +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +*.swp +*.bak +*.tmp +*.orig +*~ +.project +.idea/ +*.tmproj +.vscode/ diff --git a/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/Chart.yaml b/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/Chart.yaml new file mode 100644 index 0000000..e7f32c9 --- /dev/null +++ b/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/Chart.yaml @@ -0,0 +1,22 @@ +apiVersion: v2 +name: rancher-cluster-templates +version: 0.7.2 +appVersion: 0.7.2 + +type: application +description: Hardened Rancher Cluster Templates by Rancher Government +icon: https://raw.githubusercontent.com/rancherfederal/carbide-docs/main/static/img/carbide-logo.svg + +home: https://github.com/rancherfederal +sources: + - https://github.com/rancherfederal/rancher-cluster-templates + +maintainers: + - name: Rancher Government + email: support@ranchergovernment.com + url: https://ranchergovernment.com + +annotations: + catalog.cattle.io/type: cluster-template + catalog.cattle.io/namespace: fleet-default + classification: UNCLASSIFIED diff --git a/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/app-readme.md b/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/app-readme.md new file mode 100644 index 0000000..9040bd7 --- /dev/null +++ b/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/app-readme.md @@ -0,0 +1,105 @@ +# Rancher Cluster Templates Helm Chart + +| Type | Chart Version | App Version | +| :---------: | :-----------: | :---------: | +| application | `0.7.2` | `0.7.2` | + +⚠️ This project is still in active development. As we continued to develop it, there will be breaking changes. ⚠️ + +## Supported Providers + +### Currently Available + +- AWS Commercial +- AWS GovCloud +- Harvester +- Digital Ocean +- VMWare vSphere +- Custom + +### Pending Validation + +- Microsoft Azure + +## Installing the Chart + +### Helm Install via Repository + +```bash +helm repo add cluster-templates https://rancherfederal.github.io/rancher-cluster-templates +helm upgrade -i cluster cluster-templates/rancher-cluster-templates -n fleet-default -f values.yaml +``` + +## Helm Install via Registry +```bash +helm upgrade -i cluster oci://ghcr.io/rancherfederal/charts/rancher-cluster-templates -n fleet-default -f values.yaml +``` + +## Helm Chart Deployment Status + +```bash +helm status cluster -n fleet-default +``` + +## Uninstalling the Chart + +```bash +helm delete cluster -n fleet-default +``` + +## Chart/Cluster Secrets Management + +### Cloud Credentials + +If you do not have Cloud Credentials already created within the Rancher Manager, you can create them via `kubectl` with the command(s) below. Eventually, we will be moving these options with the Helm Chart! + +#### For AWS Credentials + +```bash +# with long-term credentials (accessKey and secretKey) +kubectl create secret -n cattle-global-data generic aws-creds-sts --from-literal=amazonec2credentialConfig-defaultRegion=$REGION --from-literal=amazonec2credentialConfig-accessKey=$ACCESSKEY --from-literal=amazonec2credentialConfig-secretKey=$SECRETKEY + +kubectl annotate secret -n cattle-global-data aws-creds provisioning.cattle.io/driver=aws +``` + +```bash +# with temporary credentials (accessKey, secretKey, sessionToken) +kubectl create secret -n cattle-global-data generic aws-creds --from-literal=amazonec2credentialConfig-defaultRegion=$REGION --from-literal=amazonec2credentialConfig-accessKey=$ACCESSKEY --from-literal=amazonec2credentialConfig-secretKey=$SECRETKEY --from-literal=amazonec2credentialConfig-sessonToken=$SESSIONTOKEN + +kubectl annotate secret -n cattle-global-data aws-creds provisioning.cattle.io/driver=aws +``` + +#### For Harvester Credentials + +```bash +export CLUSTERID=$(kubectl get clusters.management.cattle.io -o=jsonpath='{range .items[?(@.metadata.labels.provider\.cattle\.io=="harvester")]}{.metadata.name}{"\n"}{end}') + +kubectl create secret -n cattle-global-data generic harvester-creds --from-literal=harvestercredentialConfig-clusterId=$CLUSTERID --from-literal=harvestercredentialConfig-clusterType=imported --from-file=harvestercredentialConfig-kubeconfigContent=harvester.yaml + +kubectl annotate secret -n cattle-global-data harvester-creds provisioning.cattle.io/driver=harvester +``` + +#### For Digital Ocean Credentials + +```bash +kubectl create secret -n cattle-global-data generic digitalocean-creds --from-literal=digitaloceancredentialConfig-accessToken=$TOKEN + +kubectl annotate secret -n cattle-global-data digitalocean-creds provisioning.cattle.io/driver=digitalocean +``` + + +#### For VMWare vSphere Credentials + +```bash +kubectl create secret -n cattle-global-data generic vsphere-creds --from-literal=digitaloceancredentialConfig-accessToken=$TOKEN + +kubectl annotate secret -n cattle-global-data vsphere-creds provisioning.cattle.io/driver=digitalocean +``` + +### Registry Credentials + +If you are configuring an authenticated registry and do not have Registry Credentials created in the Rancher Manager, you can create them via `kubectl` with the command below: + +```bash +kubectl create secret -n fleet-default generic --type kubernetes.io/basic-auth registry-creds --from-literal=username=USERNAME --from-literal=password=PASSWORD +``` diff --git a/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/questions.yaml b/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/questions.yaml new file mode 100644 index 0000000..3cf6b5a --- /dev/null +++ b/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/questions.yaml @@ -0,0 +1,561 @@ +# questions: +# - variable: cluster.name +# default: mycluster +# description: 'Specify the name of the cluster' +# label: 'Cluster Name' +# required: true +# type: string +# group: 'General' +# - variable: cloudCredentialSecretName +# default: +# description: 'CloudCredentialName for provisioning cluster' +# label: 'CloudCredential Name' +# type: cloudcredential +# group: 'General' +# - variable: cloudprovider +# default: custom +# description: 'Specify Infrastructure provider for underlying nodes' +# label: 'Infrastructure Provider' +# type: enum +# required: true +# options: +# - amazonec2 +# - azure +# - digitalocean +# - elemental +# - harvester +# - vsphere +# - custom +# group: 'General' +# - variable: kubernetesVersion +# default: v1.31.5+rke2r1 +# description: 'Specify Kubernetes Version' +# label: 'Kubernetes Version' +# type: enum +# required: true +# options: +# - v1.31.5+rke2r1 +# - v1.30.9+rke2r1 +# - v1.29.13+rke2r1 +# group: 'General' +# - variable: localClusterAuthEndpoint.enabled +# default: false +# label: 'Local Auth Access Endpoint' +# description: 'Enable Local Auth Access Endpoint' +# type: boolean +# group: 'Auth Access Endpoint' +# show_subquestion_if: true +# subquestions: +# - variable: localClusterAuthEndpoint.fqdn +# default: +# description: 'Local Auth Access Endpoint FQDN' +# label: 'Auth Endpoint FQDN' +# type: hostname +# group: 'Auth Access Endpoint' +# - variable: localClusterAuthEndpoint.caCerts +# default: +# label: 'Auth Endpoint Cacerts' +# description: 'Local Auth Access Endpoint CACerts' +# type: multiline +# group: 'Auth Access Endpoint' +# - variable: addons.monitoring.enabled +# default: false +# label: 'Enable Monitoring' +# description: 'Enable Rancher Monitoring' +# type: boolean +# group: 'Monitoring' +# show_subquestion_if: true +# subquestions: +# - variable: monitoring.version +# default: +# label: 'Monitoring Version' +# description: 'Choose chart version of monitoring. If empty latest version will be installed' +# type: string +# group: 'Monitoring' +# - variable: monitoring.values +# default: +# label: 'Monitoring Values' +# description: 'Custom monitoring chart values' +# type: multiline +# group: 'Monitoring' +# - variable: nodepools.0.name +# default: +# description: 'Specify nodepool name' +# type: string +# label: 'Nodepool name' +# required: true +# show_if: cloudprovider=amazonec2 || cloudprovider=vsphere || cloudprovider=azure || cloudprovider=digitalocean || cloudprovider=harvester || cloudprovider=elemental +# group: 'Nodepools' +# - variable: nodepools.0.quantity +# default: 1 +# description: 'Specify node count' +# type: int +# required: true +# show_if: cloudprovider=amazonec2 || cloudprovider=vsphere || cloudprovider=azure || cloudprovider=digitalocean || cloudprovider=harvester || cloudprovider=elemental +# label: 'Node count' +# group: 'Nodepools' +# - variable: nodepools.0.etcd +# default: true +# label: etcd +# type: boolean +# show_if: cloudprovider=amazonec2 || cloudprovider=vsphere || cloudprovider=azure || cloudprovider=digitalocean || cloudprovider=harvester || cloudprovider=elemental +# group: 'Nodepools' +# - variable: nodepools.0.worker +# default: true +# label: worker +# type: boolean +# show_if: cloudprovider=amazonec2 || cloudprovider=vsphere || cloudprovider=azure || cloudprovider=digitalocean || cloudprovider=harvester || cloudprovider=elemental +# group: 'Nodepools' +# - variable: nodepools.0.controlplane +# label: controlplane +# default: true +# type: boolean +# show_if: cloudprovider=amazonec2 || cloudprovider=vsphere || cloudprovider=azure || cloudprovider=digitalocean || cloudprovider=harvester || cloudprovider=elemental +# group: 'Nodepools' +# # amazonec2 +# - variable: nodepools.0.region +# label: 'Region' +# default: us-east-1 +# type: string +# description: 'AWS EC2 Region' +# required: true +# show_if: cloudprovider=amazonec2 +# group: 'Nodepools' +# - variable: nodepools.0.zone +# label: 'Zone' +# default: a +# type: string +# description: 'AWS EC2 Zone' +# required: true +# show_if: cloudprovider=amazonec2 +# group: 'Nodepools' +# - variable: nodepools.0.instanceType +# label: 'Instance Type' +# default: t3a.medium +# type: string +# description: 'AWS instance type' +# required: true +# show_if: cloudprovider=amazonec2 +# group: 'Nodepools' +# - variable: nodepools.0.rootSize +# label: 'Root Disk Size' +# default: 16g +# type: string +# description: 'AWS EC2 root disk size' +# show_if: cloudprovider=amazonec2 +# group: 'Nodepools' +# - variable: nodepools.0.vpcId +# label: 'VPC/SUBNET' +# default: '' +# type: string +# description: 'AWS EC2 vpc ID' +# required: true +# show_if: cloudprovider=amazonec2 +# group: 'Nodepools' +# - variable: nodepools.0.iamInstanceProfile +# label: 'Instance Profile Name' +# default: '' +# type: string +# description: 'AWS EC2 Instance Profile Name' +# show_if: cloudprovider=amazonec2 +# group: 'Nodepools' +# - variable: nodepools.0.ami +# label: 'AMI ID' +# default: '' +# type: string +# description: 'AWS EC2 AMI ID' +# show_if: cloudprovider=amazonec2 +# group: 'Nodepools' +# - variable: nodepools.0.sshUser +# label: 'SSH Username for AMI' +# default: ubuntu +# type: string +# description: 'AWS EC2 SSH Username for AMI' +# show_if: cloudprovider=amazonec2 +# group: 'Nodepools' +# - variable: nodepools.0.createSecurityGroup +# label: 'Create security group' +# default: true +# type: boolean +# description: 'Whether to create `rancher-node` security group. If false, can provide with existing security group' +# show_if: cloudprovider=amazonec2 +# group: 'Nodepools' +# show_subquestion_if: false +# subquestions: +# - variable: nodepools.0.securityGroups +# label: 'Security groups' +# default: +# type: string +# description: 'Using existing security groups' +# group: 'Nodepools' +# # vsphere +# - variable: nodepools.0.vcenter +# label: 'vSphere IP/hostname' +# default: '' +# type: hostname +# description: 'vSphere IP/hostname for vCenter' +# required: true +# show_if: cloudprovider=vsphere +# group: 'Nodepools' +# - variable: nodepools.0.datacenter +# label: 'Vsphere Datacenter' +# default: '' +# type: hostname +# description: 'vSphere datacenter for virtual machine' +# required: true +# show_if: cloudprovider=vsphere +# group: 'Nodepools' +# - variable: nodepools.0.datastore +# label: 'Vsphere Datastore' +# default: '' +# type: string +# description: 'vSphere datastore for virtual machine' +# required: true +# show_if: cloudprovider=vsphere +# group: 'Nodepools' +# - variable: nodepools.0.datastoreCluster +# label: 'Vsphere DatastoreCluster' +# default: '' +# type: string +# description: 'vSphere datastore cluster for virtual machine' +# required: true +# show_if: cloudprovider=vsphere +# group: 'Nodepools' +# - variable: nodepools.0.diskSize +# label: 'Disk Size' +# default: '20480' +# type: string +# description: 'vSphere size of disk for docker VM (in MB)' +# show_if: cloudprovider=vsphere +# group: 'Nodepools' +# - variable: nodepools.0.memorySize +# label: 'Memory Size' +# default: '2048' +# type: string +# description: 'vSphere size of memory for docker VM (in MB)' +# show_if: cloudprovider=vsphere +# group: 'Nodepools' +# - variable: nodepools.0.network +# label: 'Network' +# default: '' +# type: string +# description: 'vSphere network where the virtual machine will be attached' +# show_if: cloudprovider=vsphere +# group: 'Nodepools' +# - variable: nodepools.0.pool +# label: 'Resource Pool' +# default: '' +# type: string +# description: 'vSphere resource pool for docker VM' +# show_if: cloudprovider=vsphere +# group: 'Nodepools' +# - variable: nodepools.0.sshPort +# label: 'SSH Port' +# default: '22' +# type: string +# description: 'If using a non-B2D image you can specify the ssh port' +# show_if: cloudprovider=vsphere +# group: 'Nodepools' +# - variable: nodepools.0.sshUserGroup +# label: 'SSH User Group' +# default: docker:staff +# type: hostname +# description: "If using a non-B2D image the uploaded keys will need chown'ed, defaults to staff e.g. docker:staff" +# show_if: cloudprovider=vsphere +# group: 'Nodepools' +# - variable: nodepools.0.vappIpallocationpolicy +# label: 'IP allocation policy' +# default: '' +# type: enum +# options: +# - dhcp +# - fixed +# - transient +# - fixedAllocated +# description: "'vSphere vApp IP allocation policy. Supported values are: dhcp, fixed, transient and fixedAllocated'" +# show_if: cloudprovider=vsphere +# group: 'Nodepools' +# - variable: nodepools.0.vappIpprotocol +# label: 'IP protocol' +# default: '' +# type: enum +# options: +# - IPv4 +# - IPv6 +# description: "'vSphere vApp IP protocol for this deployment. Supported values are: IPv4 and IPv6'" +# show_if: cloudprovider=vsphere +# group: 'Nodepools' +# # harvester +# - variable: nodepools.0.diskSize +# label: 'Disk Size' +# default: 40 +# type: string +# description: 'Size of virtual hard disk in GB' +# show_if: cloudprovider=harvester +# group: 'Nodepools' +# - variable: nodepools.0.diskBus +# label: 'Disk Bus Type' +# default: string +# type: virtio +# description: 'harvester disk type' +# show_if: cloudprovider=harvester +# group: 'Nodepools' +# - variable: nodepools.0.cpuCount +# label: 'CPUs' +# default: 2 +# type: string +# description: 'number of CPUs for your VM' +# show_if: cloudprovider=harvester +# group: 'Nodepools' +# - variable: nodepools.0.memorySize +# label: 'Memory Size' +# default: 4 +# type: string +# description: 'Memory for VM in GB (available RAM)' +# show_if: cloudprovider=harvester +# group: 'Nodepools' +# - variable: nodepools.0.networkName +# label: 'Network' +# default: default/network-name-1 +# type: string +# description: 'Name of vlan network in harvester' +# show_if: cloudprovider=harvester +# group: 'Nodepools' +# - variable: nodepools.0.imageName +# label: 'Name of Image' +# default: default/image-rand +# type: string +# description: 'Name of image in harvester' +# show_if: cloudprovider=harvester +# group: 'Nodepools' +# - variable: nodepools.0.vmNamespace +# label: 'vm Namespace' +# default: default +# type: string +# description: 'namespace to deploy the VM to' +# show_if: cloudprovider=harvester +# group: 'Nodepools' +# - variable: nodepools.0.sshUser +# label: 'SSH User' +# default: ubuntu +# type: string +# description: 'SSH username' +# show_if: cloudprovider=harvester +# group: 'Nodepools' +# # digitalocean +# - variable: nodepools.0.image +# label: 'Image' +# default: ubuntu-20-04-x64 +# type: string +# description: 'Digital Ocean Image' +# show_if: cloudprovider=digitalocean +# group: 'Nodepools' +# - variable: nodepools.0.backups +# label: 'Backup' +# default: false +# type: boolean +# description: 'enable backups for droplet' +# show_if: cloudprovider=digitalocean +# group: 'Nodepools' +# - variable: nodepools.0.ipv6 +# label: 'IPv6' +# default: false +# type: boolean +# description: 'enable ipv6 for droplet' +# show_if: cloudprovider=digitalocean +# group: 'Nodepools' +# - variable: nodepools.0.monitoring +# label: 'Monitoring' +# default: false +# type: boolean +# description: 'enable monitoring for droplet' +# show_if: cloudprovider=digitalocean +# group: 'Nodepools' +# - variable: nodepools.0.privateNetworking +# label: 'Private Networking' +# default: false +# type: boolean +# description: 'enable private networking for droplet' +# show_if: cloudprovider=digitalocean +# group: 'Nodepools' +# - variable: nodepools.0.region +# label: 'Region' +# default: sfo3 +# type: string +# description: 'Digital Ocean region' +# show_if: cloudprovider=digitalocean +# group: 'Nodepools' +# - variable: nodepools.0.size +# label: 'Size' +# default: s-4vcpu-8gb +# type: string +# description: 'Digital Ocean size' +# show_if: cloudprovider=digitalocean +# group: 'Nodepools' +# - variable: nodepools.0.userdata +# label: 'Userdata' +# default: +# type: multiline +# description: 'File contents for userdata' +# show_if: cloudprovider=digitalocean +# group: 'Nodepools' +# - variable: nodepools.0.sshPort +# label: 'SSH Port' +# default: 22 +# type: string +# description: 'SSH port' +# show_if: cloudprovider=digitalocean +# group: 'Nodepools' +# - variable: nodepools.0.sshUser +# label: 'SSH User' +# default: root +# type: string +# description: 'SSH username' +# show_if: cloudprovider=digitalocean +# group: 'Nodepools' +# # azure +# - variable: nodepools.0.availabilitySet +# label: 'Availability Set' +# default: docker-machine +# type: string +# description: 'Azure Availability Set to place the virtual machine into' +# show_if: cloudprovider=azure +# group: 'Nodepools' +# - variable: nodepools.0.diskSize +# label: 'Disk Size' +# default: '' +# type: string +# description: 'Disk size if using managed disk(Gib)' +# show_if: cloudprovider=azure +# group: 'Nodepools' +# - variable: nodepools.0.dns +# label: 'DNS' +# default: '' +# type: string +# description: 'A unique DNS label for the public IP adddress' +# show_if: cloudprovider=azure +# group: 'Nodepools' +# - variable: nodepools.0.environment +# label: 'Environment' +# default: AzurePublicCloud +# type: enum +# options: +# - AzurePublicCloud +# - AzureGermanCloud +# - AzureChinaCloud +# - AzureUSGovernmentCloud +# description: 'Azure environment' +# show_if: cloudprovider=azure +# group: 'Nodepools' +# - variable: nodepools.0.faultDomainCount +# label: 'Fault Domain Count' +# default: '' +# type: string +# description: 'Fault domain count to use for availability set' +# show_if: cloudprovider=azure +# group: 'Nodepools' +# - variable: nodepools.0.image +# label: 'Image' +# default: canonical:UbuntuServer:18.04-LTS:latest +# type: string +# description: 'Azure virtual machine OS image' +# show_if: cloudprovider=azure +# group: 'Nodepools' +# - variable: nodepools.0.location +# label: 'Location' +# default: westus +# type: string +# description: 'Azure region to create the virtual machine' +# show_if: cloudprovider=azure +# group: 'Nodepools' +# - variable: nodepools.0.managedDisks +# label: 'Managed Disks' +# default: false +# type: boolean +# description: 'Configures VM and availability set for managed disks' +# show_if: cloudprovider=azure +# group: 'Nodepools' +# - variable: nodepools.0.noPublicIp +# label: 'No Public IP' +# default: false +# type: boolean +# description: 'Do not create a public IP address for the machine' +# show_if: cloudprovider=azure +# group: 'Nodepools' +# - variable: nodepools.0.privateIpAddress +# label: 'Private IP Address' +# default: '' +# type: string +# description: 'Specify a static private IP address for the machine' +# show_if: cloudprovider=azure +# group: 'Nodepools' +# - variable: nodepools.0.resourceGroup +# label: 'Resource Group' +# default: docker-machine +# type: string +# description: 'Azure Resource Group name (will be created if missing)' +# show_if: cloudprovider=azure +# group: 'Nodepools' +# - variable: nodepools.0.size +# label: 'Size' +# default: 'Standard_D2_v2' +# type: string +# description: 'Size for Azure Virtual Machine' +# show_if: cloudprovider=azure +# group: 'Nodepools' +# - variable: nodepools.0.sshUser +# label: 'SSH Username' +# default: docker-user +# type: string +# description: 'Username for SSH login' +# show_if: cloudprovider=azure +# group: 'Nodepools' +# - variable: nodepools.0.staticPublicIp +# label: 'Static Public IP' +# default: false +# type: boolean +# description: 'Assign a static public IP address to the machine' +# show_if: cloudprovider=azure +# group: 'Nodepools' +# - variable: nodepools.0.storageType +# label: 'Storage Account' +# default: 'Standard_LRS' +# type: string +# description: 'Type of Storage Account to host the OS Disk for the machine' +# show_if: cloudprovider=azure +# group: 'Nodepools' +# - variable: nodepools.0.subnet +# label: 'Subnet' +# default: docker-machine +# type: string +# description: 'Azure Subnet Name to be used within the Virtual Network' +# show_if: cloudprovider=azure +# group: 'Nodepools' +# - variable: nodepools.0.subnetPrefix +# label: 'Subnet Prefix' +# default: '192.168.0.0/16' +# type: string +# description: 'Private CIDR block to be used for the new subnet, should comply RFC 1918' +# show_if: cloudprovider=azure +# group: 'Nodepools' +# - variable: nodepools.0.updateDomainCount +# label: 'Update Domain Count' +# default: '' +# type: string +# description: 'Update domain count to use for availability set' +# show_if: cloudprovider=azure +# group: 'Nodepools' +# - variable: nodepools.0.usePrivateIp +# label: 'Use Private IP' +# default: false +# type: boolean +# description: 'Azure Subnet Name to be used within the Virtual Network' +# show_if: cloudprovider=azure +# group: 'Nodepools' +# - variable: nodepools.0.vnet +# label: 'Vnet' +# default: 'docker-machine-vnet' +# type: string +# description: 'Azure Virtual Network name to connect the virtual machine (in [resourcegroup:]name format)' +# show_if: cloudprovider=azure +# group: 'Nodepools' diff --git a/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/templates/NOTES.txt b/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/templates/NOTES.txt new file mode 100644 index 0000000..39bdbda --- /dev/null +++ b/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/templates/NOTES.txt @@ -0,0 +1,6 @@ + +Congratulations! You've successfully deployed a cluster using the Helm Chart for Rancher Cluster Templates by Rancher Government. Please be patient for the cluster to provision and deploy on your infrastructure. + +View the Cluster -> https://{{ .Values.rancher.cattle.url | default "" }}/dashboard/c/_/manager/provisioning.cattle.io.cluster/fleet-default/{{ .Values.cluster.name }} + +View the Docs -> https://github.com/rancherfederal/rancher-cluster-templates diff --git a/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/templates/_helpers.tpl b/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/templates/_helpers.tpl new file mode 100644 index 0000000..a6bc23e --- /dev/null +++ b/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "rancher-cluster-templates.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "rancher-cluster-templates.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "rancher-cluster-templates.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "rancher-cluster-templates.labels" -}} +helm.sh/chart: {{ include "rancher-cluster-templates.chart" . }} +{{ include "rancher-cluster-templates.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "rancher-cluster-templates.selectorLabels" -}} +app.kubernetes.io/name: {{ include "rancher-cluster-templates.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "rancher-cluster-templates.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "rancher-cluster-templates.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/templates/cluster.yaml b/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/templates/cluster.yaml new file mode 100644 index 0000000..6755a8d --- /dev/null +++ b/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/templates/cluster.yaml @@ -0,0 +1,438 @@ +{{- $clustername := .Values.cluster.name -}} +apiVersion: provisioning.cattle.io/v1 +kind: Cluster +metadata: + {{- if .Values.cluster.labels }} + labels: +{{ toYaml .Values.cluster.labels | indent 4 }} + {{- end }} + {{- if .Values.cluster.annotations }} + annotations: +{{ toYaml .Values.cluster.annotations | indent 4 }} + {{- end }} + name: {{ .Values.cluster.name }} + namespace: fleet-default +spec: + {{- if .Values.cluster.config.agentEnvVars }} + agentEnvVars: +{{ toYaml .Values.cluster.config.agentEnvVars | indent 4 }} + {{- end }} + {{- if .Values.cloudCredentialSecretName }} + cloudCredentialSecretName: cattle-global-data:{{ .Values.cloudCredentialSecretName }} + {{- end }} + # clusterAPIConfig: + # clusterAgentDeploymentCustomization: + {{- if .Values.cluster.config.defaultClusterRoleForProjectMembers }} + defaultClusterRoleForProjectMembers: {{ .Values.cluster.config.defaultClusterRoleForProjectMembers }} + {{- end }} + {{- if .Values.cluster.config.defaultPodSecurityAdmissionConfigurationTemplateName }} + defaultPodSecurityAdmissionConfigurationTemplateName: {{ .Values.cluster.config.defaultPodSecurityAdmissionConfigurationTemplateName }} + {{- end }} + {{- if .Values.cluster.config.defaultPodSecurityPolicyTemplateName }} + defaultPodSecurityPolicyTemplateName: {{ .Values.cluster.config.defaultPodSecurityPolicyTemplateName }} + {{- end }} + enableNetworkPolicy: {{ .Values.cluster.config.enableNetworkPolicy }} + # fleetAgentDeploymentCustomization: + {{- if .Values.cluster.config.kubernetesVersion }} + kubernetesVersion: {{ .Values.cluster.config.kubernetesVersion }} + {{- end }} + {{- if eq .Values.cluster.config.localClusterAuthEndpoint.enabled true }} + localClusterAuthEndpoint: + enabled: {{ .Values.cluster.config.localClusterAuthEndpoint.enabled }} + fqdn: {{ .Values.cluster.config.localClusterAuthEndpoint.fqdn }} + caCerts: {{ .Values.cluster.config.localClusterAuthEndpoint.caCerts }} + {{- else }} + localClusterAuthEndpoint: + enabled: false + {{- end }} + # redeploySystemAgentGeneration: + rkeConfig: + {{- with $.Values.cluster.config.chartValues }} + chartValues: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with $.Values.cluster.config.additionalManifests }} + additionalManifest: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- if .Values.cluster.config.etcd }} + etcd: + disableSnapshots: {{ .Values.cluster.config.etcd.disableSnapshots }} + snapshotRetention: {{ .Values.cluster.config.etcd.snapshotRetention }} + snapshotScheduleCron: {{ .Values.cluster.config.etcd.snapshotScheduleCron }} + {{- if .Values.cluster.config.etcd.s3 }} + s3: + bucket: {{ .Values.cluster.config.etcd.s3.bucket }} + cloudCredentialName: cattle-global-data:{{ .Values.cluster.config.etcd.s3.cloudCredentialSecretName }} + {{- if .Values.cluster.config.etcd.s3.folder }} + folder: {{ .Values.cluster.config.etcd.s3.folder }} + {{- end }} + region: {{ .Values.cluster.config.etcd.s3.region }} + skipSSLVerify: {{ .Values.cluster.config.etcd.s3.skipSSLVerify }} + endpoint: {{ .Values.cluster.config.etcd.s3.endpoint }} + {{- if .Values.cluster.config.etcd.s3.endpointCA }} + endpointCA: |- +{{ .Values.cluster.config.etcd.s3.endpointCA | indent 10 }} + {{- end }} + {{- end }} + {{- end }} + # etcdSnapshotCreate: + # etcdSnapshotRestore: + # infrastructureRef: + {{- if .Values.cluster.config.globalConfig }} + machineGlobalConfig: + {{- if .Values.cluster.config.globalConfig.cni }} + cni: {{ .Values.cluster.config.globalConfig.cni }} + {{- end }} + {{- if .Values.cluster.config.globalConfig.cluster_cidr }} + cluster-cidr: {{ .Values.cluster.config.globalConfig.cluster_cidr }} + {{- end }} + {{- if .Values.cluster.config.globalConfig.service_cidr }} + service-cidr: {{ .Values.cluster.config.globalConfig.service_cidr }} + {{- end }} + {{- if .Values.cluster.config.globalConfig.docker }} + docker: {{ .Values.cluster.config.globalConfig.docker }} + {{- end }} + {{- if .Values.cluster.config.globalConfig.disable }} + disable: {{ .Values.cluster.config.globalConfig.disable | toRawJson }} + {{- end }} + {{- if .Values.cluster.config.globalConfig.disable_scheduler }} + disable-scheduler: {{ .Values.cluster.config.globalConfig.disable_scheduler }} + {{- end }} + {{- if .Values.cluster.config.globalConfig.disable_cloud_controller }} + disable-cloud-controller: {{ .Values.cluster.config.globalConfig.disable_cloud_controller }} + {{- end }} + {{- if .Values.cluster.config.globalConfig.disable_kube_proxy }} + disable-kube-proxy: {{ .Values.cluster.config.globalConfig.disable_kube_proxy }} + {{- end }} + {{- if .Values.cluster.config.globalConfig.etcd_expose_metrics }} + etcd-expose-metrics: {{ .Values.cluster.config.globalConfig.etcd_expose_metrics }} + {{- end }} + {{- if .Values.cluster.config.globalConfig.profile }} + profile: {{ .Values.cluster.config.globalConfig.profile }} + {{- end }} + {{- if .Values.cluster.config.globalConfig.selinux }} + selinux: {{ .Values.cluster.config.globalConfig.selinux }} + {{- end }} + {{- if .Values.cluster.config.globalConfig.tls_san }} + tls-san: {{ .Values.cluster.config.globalConfig.tls_san | toRawJson }} + {{- end }} + {{- if .Values.cluster.config.globalConfig.token }} + token: {{ .Values.cluster.config.globalConfig.token }} + {{- end }} + {{- if .Values.cluster.config.globalConfig.systemDefaultRegistry }} + system-default-registry: {{ .Values.cluster.config.globalConfig.systemDefaultRegistry }} + {{- end }} + {{- if .Values.cluster.config.globalConfig.secrets_encryption }} + secrets-encryption: {{ .Values.cluster.config.globalConfig.secrets_encryption }} + {{- end }} + {{- if .Values.cluster.config.globalConfig.write_kubeconfig_mode }} + write-kubeconfig-mode: {{ .Values.cluster.config.globalConfig.write_kubeconfig_mode }} + {{- end }} + {{- if .Values.cluster.config.globalConfig.use_service_account_credentials }} + use-service-account-credentials: {{ .Values.cluster.config.globalConfig.use_service_account_credentials }} + {{- end }} + {{- if .Values.cluster.config.globalConfig.protect_kernel_defaults }} + protect-kernel-defaults: {{ .Values.cluster.config.globalConfig.protect_kernel_defaults }} + {{- end }} + {{- if .Values.cluster.config.globalConfig.cloud_provider_name }} + cloud-provider-name: {{ .Values.cluster.config.globalConfig.cloud_provider_name }} + {{- end }} + {{- if .Values.cluster.config.globalConfig.cloud_provider_config }} + cloud-provider-config: {{ .Values.cluster.config.globalConfig.cloud_provider_config }} + {{- end }} + {{- if .Values.cluster.config.globalConfig.kube_controller_manager_arg }} + kube-controller-manager-arg: {{ .Values.cluster.config.globalConfig.kube_controller_manager_arg | toRawJson }} + {{- end }} + {{- if .Values.cluster.config.globalConfig.kube_scheduler_arg }} + kube-scheduler-arg: {{ .Values.cluster.config.globalConfig.kube_scheduler_arg | toRawJson }} + {{- end }} + {{- if .Values.cluster.config.globalConfig.kube_apiserver_arg }} + kube-apiserver-arg: {{ .Values.cluster.config.globalConfig.kube_apiserver_arg | toRawJson }} + {{- end }} + {{- if .Values.cluster.config.globalConfig.kubelet_proxy_arg }} + kubelet-proxy-arg: {{ .Values.cluster.config.globalConfig.kubelet_proxy_arg | toRawJson }} + {{- end }} + {{- if .Values.cluster.config.globalConfig.kubelet_arg }} + kubelet-arg: {{ .Values.cluster.config.globalConfig.kubelet_arg | toRawJson }} + {{- end }} + {{- end }} + # machinePoolDefaults: + {{- if ne .Values.cloudprovider "custom" }} + machinePools: + {{- if .Values.nodepools }} {{ range $index, $nodepool := .Values.nodepools }} + - name: {{ $nodepool.name }} + quantity: {{ $nodepool.quantity }} + controlPlaneRole: {{ $nodepool.controlplane }} + etcdRole: {{ $nodepool.etcd }} + workerRole: {{ $nodepool.worker }} + {{- if $nodepool.labels }} + labels: +{{ toYaml $nodepool.labels | indent 8 }} + {{- end }} + {{- if $nodepool.taints }} + taints: +{{ toYaml $nodepool.taints | indent 8 }} + {{- end }} + machineConfigRef: + {{- if eq $.Values.cloudprovider "amazonec2" }} + kind: Amazonec2Config + {{- else if eq $.Values.cloudprovider "vsphere" }} + kind: VmwarevsphereConfig + {{- else if eq $.Values.cloudprovider "harvester" }} + kind: HarvesterConfig + {{- else if eq $.Values.cloudprovider "digitalocean" }} + kind: DigitaloceanConfig + {{- else if eq $.Values.cloudprovider "azure" }} + kind: AzureConfig + {{- else if eq $.Values.cloudprovider "elemental" }} + apiVersion: elemental.cattle.io/v1beta1 + kind: MachineInventorySelectorTemplate + {{- end}} + name: {{ $clustername }}-{{ $nodepool.name }} + displayName: {{ $nodepool.displayName | default $nodepool.name }} + {{- if $nodepool.drainBeforeDelete }} + drainBeforeDelete: {{ $nodepool.drainBeforeDelete }} + {{- end }} + {{- if $nodepool.drainBeforeDeleteTimeout }} + drainBeforeDeleteTimeout: {{ $nodepool.drainBeforeDeleteTimeout }} + {{- end }} + {{- if $nodepool.machineDeploymentLabels }} + machineDeploymentLabels: +{{ toYaml $nodepool.machineDeploymentLabels | indent 8 }} + {{- end }} + {{- if $nodepool.machineDeploymentAnnotations }} + machineDeploymentAnnotations: +{{ toYaml $nodepool.machineDeploymentAnnotations | indent 8 }} + {{- end }} + paused: {{ $nodepool.paused }} + {{- if $nodepool.rollingUpdate }} + rollingUpdate: + maxUnavailable: {{ $nodepool.rollingUpdate.maxUnavailable }} + maxSurge: {{ $nodepool.rollingUpdate.maxSurge }} + {{- end }} + {{- if $nodepool.unhealthyNodeTimeout }} + unhealthyNodeTimeout: {{ $nodepool.unhealthyNodeTimeout }} + {{- end }} + {{- end }} + {{- end }} + {{- if or .Values.cluster.config.controlPlaneConfig .Values.cluster.config.workerConfig}} + machineSelectorConfig: + {{- if .Values.cluster.config.controlPlaneConfig }} + - config: + {{- if .Values.cluster.config.controlPlaneConfig.cni }} + cni: {{ .Values.cluster.config.controlPlaneConfig.cni }} + {{- end }} + {{- if .Values.cluster.config.controlPlaneConfig.docker }} + docker: {{ .Values.cluster.config.controlPlaneConfig.docker }} + {{- end }} + {{- if .Values.cluster.config.globalConfig.disable }} + disable: {{ .Values.cluster.config.globalConfig.disable | toRawJson }} + {{- end }} + {{- if .Values.cluster.config.globalConfig.disable_scheduler }} + disable-scheduler: {{ .Values.cluster.config.globalConfig.disable_scheduler }} + {{- end }} + {{- if .Values.cluster.config.globalConfig.disable_cloud_controller }} + disable-cloud-controller: {{ .Values.cluster.config.globalConfig.disable_cloud_controller }} + {{- end }} + {{- if .Values.cluster.config.controlPlaneConfig.disable_kube_proxy }} + disable-kube-proxy: {{ .Values.cluster.config.controlPlaneConfig.disable_kube_proxy }} + {{- end }} + {{- if .Values.cluster.config.controlPlaneConfig.etcd_expose_metrics }} + etcd-expose-metrics: {{ .Values.cluster.config.controlPlaneConfig.etcd_expose_metrics }} + {{- end }} + {{- if .Values.cluster.config.controlPlaneConfig.profile }} + profile: {{ .Values.cluster.config.controlPlaneConfig.profile }} + {{- end }} + {{- if .Values.cluster.config.controlPlaneConfig.selinux }} + selinux: {{ .Values.cluster.config.controlPlaneConfig.selinux }} + {{- end }} + {{- if .Values.cluster.config.controlPlaneConfig.tls_san }} + tls-san: {{ .Values.cluster.config.controlPlaneConfig.tls_san | toRawJson }} + {{- end }} + {{- if .Values.cluster.config.controlPlaneConfig.token }} + token: {{ .Values.cluster.config.controlPlaneConfig.token }} + {{- end }} + {{- if .Values.cluster.config.controlPlaneConfig.systemDefaultRegistry }} + system-default-registry: {{ .Values.cluster.config.controlPlaneConfig.systemDefaultRegistry }} + {{- end }} + {{- if .Values.cluster.config.controlPlaneConfig.secrets_encryption }} + secrets-encryption: {{ .Values.cluster.config.controlPlaneConfig.secrets_encryption }} + {{- end }} + {{- if .Values.cluster.config.controlPlaneConfig.write_kubeconfig_mode }} + write-kubeconfig-mode: {{ .Values.cluster.config.controlPlaneConfig.write_kubeconfig_mode }} + {{- end }} + {{- if .Values.cluster.config.controlPlaneConfig.use_service_account_credentials }} + use-service-account-credentials: {{ .Values.cluster.config.controlPlaneConfig.use_service_account_credentials }} + {{- end }} + {{- if .Values.cluster.config.controlPlaneConfig.protect_kernel_defaults }} + protect-kernel-defaults: {{ .Values.cluster.config.controlPlaneConfig.protect_kernel_defaults }} + {{- end }} + {{- if .Values.cluster.config.controlPlaneConfig.cloud_provider_name }} + cloud-provider-name: {{ .Values.cluster.config.controlPlaneConfig.cloud_provider_name }} + {{- end }} + {{- if .Values.cluster.config.controlPlaneConfig.cloud_provider_config }} + cloud-provider-config: {{ .Values.cluster.config.controlPlaneConfig.cloud_provider_config }} + {{- end }} + {{- if .Values.cluster.config.controlPlaneConfig.kube_controller_manager_arg }} + kube-controller-manager-arg: {{ .Values.cluster.config.controlPlaneConfig.kube_controller_manager_arg | toRawJson }} + {{- end }} + {{- if .Values.cluster.config.controlPlaneConfig.kube_scheduler_arg }} + kube-scheduler-arg: {{ .Values.cluster.config.controlPlaneConfig.kube_scheduler_arg | toRawJson }} + {{- end }} + {{- if .Values.cluster.config.controlPlaneConfig.kube_apiserver_arg }} + kube-apiserver-arg: {{ .Values.cluster.config.controlPlaneConfig.kube_apiserver_arg | toRawJson }} + {{- end }} + {{- if .Values.cluster.config.controlPlaneConfig.kubelet_proxy_arg }} + kubelet-proxy-arg: {{ .Values.cluster.config.controlPlaneConfig.kubelet_proxy_arg | toRawJson }} + {{- end }} + {{- if .Values.cluster.config.controlPlaneConfig.kubelet_arg }} + kubelet-arg: {{ .Values.cluster.config.controlPlaneConfig.kubelet_arg | toRawJson }} + {{- end }} + machineLabelSelector: + matchLabels: + node-role.kubernetes.io/control-plane: "true" + {{- end }} + {{- if .Values.cluster.config.workerConfig }} + - config: + {{- if .Values.cluster.config.workerConfig.cni }} + cni: {{ .Values.cluster.config.workerConfig.cni }} + {{- end }} + {{- if .Values.cluster.config.workerConfig.docker }} + docker: {{ .Values.cluster.config.workerConfig.docker }} + {{- end }} + {{- if .Values.cluster.config.globalConfig.disable }} + disable: {{ .Values.cluster.config.globalConfig.disable | toRawJson }} + {{- end }} + {{- if .Values.cluster.config.globalConfig.disable_scheduler }} + disable-scheduler: {{ .Values.cluster.config.globalConfig.disable_scheduler }} + {{- end }} + {{- if .Values.cluster.config.globalConfig.disable_cloud_controller }} + disable-cloud-controller: {{ .Values.cluster.config.globalConfig.disable_cloud_controller }} + {{- end }} + {{- if .Values.cluster.config.workerConfig.disable_kube_proxy }} + disable-kube-proxy: {{ .Values.cluster.config.workerConfig.disable_kube_proxy }} + {{- end }} + {{- if .Values.cluster.config.workerConfig.etcd_expose_metrics }} + etcd-expose-metrics: {{ .Values.cluster.config.workerConfig.etcd_expose_metrics }} + {{- end }} + {{- if .Values.cluster.config.workerConfig.profile }} + profile: {{ .Values.cluster.config.workerConfig.profile }} + {{- end }} + {{- if .Values.cluster.config.workerConfig.selinux }} + selinux: {{ .Values.cluster.config.workerConfig.selinux }} + {{- end }} + {{- if .Values.cluster.config.workerConfig.tls_san }} + tls-san: {{ .Values.cluster.config.workerConfig.tls_san | toRawJson }} + {{- end }} + {{- if .Values.cluster.config.workerConfig.token }} + token: {{ .Values.cluster.config.workerConfig.token }} + {{- end }} + {{- if .Values.cluster.config.workerConfig.systemDefaultRegistry }} + system-default-registry: {{ .Values.cluster.config.workerConfig.systemDefaultRegistry }} + {{- end }} + {{- if .Values.cluster.config.workerConfig.secrets_encryption }} + secrets-encryption: {{ .Values.cluster.config.workerConfig.secrets_encryption }} + {{- end }} + {{- if .Values.cluster.config.workerConfig.write_kubeconfig_mode }} + write-kubeconfig-mode: {{ .Values.cluster.config.workerConfig.write_kubeconfig_mode }} + {{- end }} + {{- if .Values.cluster.config.workerConfig.use_service_account_credentials }} + use-service-account-credentials: {{ .Values.cluster.config.workerConfig.use_service_account_credentials }} + {{- end }} + {{- if .Values.cluster.config.workerConfig.protect_kernel_defaults }} + protect-kernel-defaults: {{ .Values.cluster.config.workerConfig.protect_kernel_defaults }} + {{- end }} + {{- if .Values.cluster.config.workerConfig.cloud_provider_name }} + cloud-provider-name: {{ .Values.cluster.config.workerConfig.cloud_provider_name }} + {{- end }} + {{- if .Values.cluster.config.workerConfig.cloud_provider_config }} + cloud-provider-config: {{ .Values.cluster.config.workerConfig.cloud_provider_config }} + {{- end }} + {{- if .Values.cluster.config.workerConfig.kube_controller_manager_arg }} + kube-controller-manager-arg: {{ .Values.cluster.config.workerConfig.kube_controller_manager_arg | toRawJson }} + {{- end }} + {{- if .Values.cluster.config.workerConfig.kube_scheduler_arg }} + kube-scheduler-arg: {{ .Values.cluster.config.workerConfig.kube_scheduler_arg | toRawJson }} + {{- end }} + {{- if .Values.cluster.config.workerConfig.kube_apiserver_arg }} + kube-apiserver-arg: {{ .Values.cluster.config.workerConfig.kube_apiserver_arg | toRawJson }} + {{- end }} + {{- if .Values.cluster.config.workerConfig.kubelet_proxy_arg }} + kubelet-proxy-arg: {{ .Values.cluster.config.workerConfig.kubelet_proxy_arg | toRawJson }} + {{- end }} + {{- if .Values.cluster.config.workerConfig.kubelet_arg }} + kubelet-arg: {{ .Values.cluster.config.workerConfig.kubelet_arg | toRawJson }} + {{- end }} + machineLabelSelector: + matchLabels: + rke.cattle.io/worker-role: "true" + {{- end }} + {{- end }} + {{- end }} + # machineSelectorFiles: + # provisionGeneration: + {{- if and .Values.cluster.config.registries (eq .Values.cluster.config.registries.enabled true) }} + registries: + configs: + {{- range .Values.cluster.config.registries.configs }} + {{ .name }}: + authConfigSecretName: {{ .authConfigSecretName }} + caBundle: {{ .caBundle }} + insecureSkipVerify: {{ .insecureSkipVerify }} + tlsSecretName: {{ .tlsSecretName }} + {{- end }} + {{- if .Values.cluster.config.registries.mirrors }} + mirrors: + {{- range .Values.cluster.config.registries.mirrors }} + {{ .name | quote }}: + endpoint: + {{- range .endpoints }} + - {{ . }} + {{- end }} + {{- if .rewrite }} + rewrite: + {{- range $key, $value := .rewrite }} + "{{ $key }}": "{{ $value }}" + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + # rotateCertificates: + # rotateEncryptionKeys: + {{- if .Values.cluster.config.upgradeStrategy }} + upgradeStrategy: + controlPlaneConcurrency: {{ .Values.cluster.config.upgradeStrategy.controlPlaneConcurrency }} + {{- if eq .Values.cluster.config.upgradeStrategy.controlPlaneDrainOptions.enabled true }} + controlPlaneDrainOptions: + enabled: {{ .Values.cluster.config.upgradeStrategy.controlPlaneDrainOptions.enabled }} + deleteEmptyDirData: {{ .Values.cluster.config.upgradeStrategy.controlPlaneDrainOptions.deleteEmptyDirData }} + disableEviction: {{ .Values.cluster.config.upgradeStrategy.controlPlaneDrainOptions.disableEviction }} + force: {{ .Values.cluster.config.upgradeStrategy.controlPlaneDrainOptions.force }} + gracePeriod: {{ .Values.cluster.config.upgradeStrategy.controlPlaneDrainOptions.gracePeriod }} + ignoreDaemonSets: {{ .Values.cluster.config.upgradeStrategy.controlPlaneDrainOptions.ignoreDaemonSets }} + ignoreErrors: {{ .Values.cluster.config.upgradeStrategy.controlPlaneDrainOptions.ignoreErrors }} + skipWaitForDeleteTimeoutSeconds: {{ .Values.cluster.config.upgradeStrategy.controlPlaneDrainOptions.skipWaitForDeleteTimeoutSeconds }} + timeout: {{ .Values.cluster.config.upgradeStrategy.controlPlaneDrainOptions.timeout }} + {{- else }} + controlPlaneDrainOptions: + enabled: {{ .Values.cluster.config.upgradeStrategy.controlPlaneDrainOptions.enabled }} + {{- end }} + workerConcurrency: {{ .Values.cluster.config.upgradeStrategy.workerConcurrency }} + {{- if eq .Values.cluster.config.upgradeStrategy.workerDrainOptions.enabled true }} + workerDrainOptions: + enabled: {{ .Values.cluster.config.upgradeStrategy.workerDrainOptions.enabled }} + deleteEmptyDirData: {{ .Values.cluster.config.upgradeStrategy.workerDrainOptions.deleteEmptyDirData }} + disableEviction: {{ .Values.cluster.config.upgradeStrategy.workerDrainOptions.disableEviction }} + force: {{ .Values.cluster.config.upgradeStrategy.workerDrainOptions.force }} + gracePeriod: {{ .Values.cluster.config.upgradeStrategy.workerDrainOptions.gracePeriod }} + ignoreDaemonSets: {{ .Values.cluster.config.upgradeStrategy.workerDrainOptions.ignoreDaemonSets }} + ignoreErrors: {{ .Values.cluster.config.upgradeStrategy.workerDrainOptions.ignoreErrors }} + skipWaitForDeleteTimeoutSeconds: {{ .Values.cluster.config.upgradeStrategy.workerDrainOptions.skipWaitForDeleteTimeoutSeconds }} + timeout: {{ .Values.cluster.config.upgradeStrategy.workerDrainOptions.timeout }} + {{- else }} + workerDrainOptions: + enabled: {{ .Values.cluster.config.upgradeStrategy.workerDrainOptions.enabled }} + {{- end }} + {{- end }} diff --git a/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/templates/clusterroletemplatebinding.yaml b/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/templates/clusterroletemplatebinding.yaml new file mode 100644 index 0000000..3708ff0 --- /dev/null +++ b/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/templates/clusterroletemplatebinding.yaml @@ -0,0 +1,11 @@ +{{ $root := . }} +{{- range $index, $member := .Values.clusterMembers }} +apiVersion: management.cattle.io/v3 +clusterName: c-m-{{ trunc 8 (sha256sum (printf "%s/%s" $root.Release.Namespace $root.Values.cluster.name)) }} +kind: ClusterRoleTemplateBinding +metadata: + name: ctrb-{{ trunc 8 (sha256sum (printf "%s/%s" $root.Release.Namespace $member.principalName )) }} + namespace: c-m-{{ trunc 8 (sha256sum (printf "%s/%s" $root.Release.Namespace $root.Values.cluster.name)) }} +roleTemplateName: {{ $member.roleTemplateName }} +userPrincipalName: {{ $member.principalName }} +{{- end }} diff --git a/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/templates/machinehealthcheck-master.yaml b/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/templates/machinehealthcheck-master.yaml new file mode 100644 index 0000000..dba7331 --- /dev/null +++ b/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/templates/machinehealthcheck-master.yaml @@ -0,0 +1,33 @@ +{{- $clustername := .Values.cluster.name -}} +{{- range .Values.nodepools }} +{{- if eq .controlplane true }} +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineHealthCheck +metadata: + name: {{ $clustername }}-controlplane-healthcheck + namespace: fleet-default +spec: + clusterName: {{ $clustername }} + selector: + matchLabels: + cluster.x-k8s.io/control-plane: 'true' + cluster.x-k8s.io/cluster-name: {{ $clustername }} +# SAFETY FUSE: + # "40%" prevents a 1-node CP from trying to self-heal (which would kill it). + # If you have 3 nodes, this allows 1 to fail. + maxUnhealthy: 40% + + # TIMEOUTS (v1beta1 uses duration strings like "10m", not integers) + nodeStartupTimeout: 600s + unhealthyConditions: + - type: Ready + status: Unknown + timeout: 300s + - type: Ready + status: "False" + timeout: 300s +{{- end }} +{{- end }} + + diff --git a/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/templates/machinehealthcheck-worker.yaml b/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/templates/machinehealthcheck-worker.yaml new file mode 100644 index 0000000..06304e4 --- /dev/null +++ b/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/templates/machinehealthcheck-worker.yaml @@ -0,0 +1,25 @@ +{{- $clustername := .Values.cluster.name -}} +{{- range .Values.nodepools }} +{{- if eq .worker true }} +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineHealthCheck +metadata: + name: {{ $clustername }}-worker-healthcheck + namespace: fleet-default +spec: + clusterName: {{ $clustername }} + selector: + matchLabels: + rke.cattle.io/worker-role: "true" + # USE $ HERE TOO + cluster.x-k8s.io/cluster-name: {{ $clustername }} + maxUnhealthy: 100% + + nodeStartupTimeout: 10m + unhealthyConditions: + - type: Ready + status: "False" + timeout: 300s +{{- end }} +{{- end }} diff --git a/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/templates/managedcharts.yaml b/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/templates/managedcharts.yaml new file mode 100644 index 0000000..592f980 --- /dev/null +++ b/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/templates/managedcharts.yaml @@ -0,0 +1,201 @@ +{{- if .Values.addons.monitoring }} +{{- if .Values.addons.monitoring.enabled }} +apiVersion: management.cattle.io/v3 +kind: ManagedChart +metadata: + name: monitoring-crd-{{ .Values.cluster.name }} + namespace: fleet-default +spec: + chart: "rancher-monitoring-crd" + repoName: "rancher-charts" + releaseName: "rancher-monitoring-crd" + version: {{ .Values.addons.monitoring.version }} + {{- if .Values.addons.monitoring.values }} + values: +{{ toYaml .Values.addons.monitoring.values | indent 4 }} + {{- end }} + defaultNamespace: "cattle-monitoring-system" + targets: + - clusterName: {{ .Values.cluster.name }} +--- +apiVersion: management.cattle.io/v3 +kind: ManagedChart +metadata: + name: monitoring-{{ .Values.cluster.name }} + namespace: fleet-default +spec: + chart: "rancher-monitoring" + repoName: "rancher-charts" + releaseName: "rancher-monitoring" + version: {{ .Values.addons.monitoring.version }} + {{- if .Values.addons.monitoring.values }} + values: +{{ toYaml .Values.addons.monitoring.values | indent 4 }} + {{- end }} + defaultNamespace: "cattle-monitoring-system" + targets: + - clusterName: {{ .Values.cluster.name }} + diff: + comparePatches: + - apiVersion: admissionregistration.k8s.io/v1beta1 + kind: MutatingWebhookConfiguration + name: rancher-monitoring-admission + jsonPointers: + - /webhooks/0/failurePolicy + - apiVersion: admissionregistration.k8s.io/v1beta1 + kind: ValidatingWebhookConfiguration + name: rancher-monitoring-admission + jsonPointers: + - /webhooks/0/failurePolicy + - apiVersion: monitoring.coreos.com/v1 + kind: ServiceMonitor + name: rancher-monitoring-kubelet + namespace: kube-system + jsonPointers: + - /spec/endpoints +--- +{{- end }} +{{- end }} +{{- if .Values.addons.logging }} +{{- if .Values.addons.logging.enabled }} +apiVersion: management.cattle.io/v3 +kind: ManagedChart +metadata: + name: logging-crd-{{ .Values.cluster.name }} + namespace: fleet-default +spec: + chart: "rancher-logging-crd" + repoName: "rancher-charts" + releaseName: "rancher-logging-crd" + version: {{ .Values.addons.logging.version }} + {{- if .Values.addons.logging.values }} + values: +{{ toYaml .Values.addons.logging.values | indent 4 }} + {{- end }} + defaultNamespace: "cattle-logging-system" + targets: + - clusterName: {{ .Values.cluster.name }} +--- +apiVersion: management.cattle.io/v3 +kind: ManagedChart +metadata: + name: logging-{{ .Values.cluster.name }} + namespace: fleet-default +spec: + chart: "rancher-logging" + repoName: "rancher-charts" + releaseName: "rancher-logging" + version: {{ .Values.addons.logging.version }} + {{- if .Values.addons.logging.values }} + values: +{{ toYaml .Values.addons.logging.values | indent 4 }} + {{- end }} + defaultNamespace: "cattle-logging-system" + targets: + - clusterName: {{ .Values.cluster.name }} +--- +{{- end }} +{{- end }} +{{- if .Values.addons.longhorn }} +{{- if .Values.addons.longhorn.enabled }} +apiVersion: management.cattle.io/v3 +kind: ManagedChart +metadata: + name: longhorn-crd-{{ .Values.cluster.name }} + namespace: fleet-default +spec: + chart: "longhorn-crd" + repoName: "rancher-charts" + releaseName: "longhorn-crd" + version: {{ .Values.addons.longhorn.version }} + {{- if .Values.addons.longhorn.values }} + values: +{{ toYaml .Values.addons.longhorn.values | indent 4 }} + {{- end }} + defaultNamespace: "longhorn-system" + targets: + - clusterName: {{ .Values.cluster.name }} + diff: + comparePatches: + - apiVersion: apiextensions.k8s.io/v1 + kind: CustomResourceDefinition + name: engineimages.longhorn.io + jsonPointers: + - /status/acceptedNames + - /status/conditions + - /status/storedVersions + - apiVersion: apiextensions.k8s.io/v1 + kind: CustomResourceDefinition + name: nodes.longhorn.io + jsonPointers: + - /status/acceptedNames + - /status/conditions + - /status/storedVersions + - apiVersion: apiextensions.k8s.io/v1 + kind: CustomResourceDefinition + name: volumes.longhorn.io + jsonPointers: + - /status/acceptedNames + - /status/conditions + - /status/storedVersions +--- +apiVersion: management.cattle.io/v3 +kind: ManagedChart +metadata: + name: longhorn-{{ .Values.cluster.name }} + namespace: fleet-default +spec: + chart: "longhorn" + repoName: "rancher-charts" + releaseName: "longhorn" + version: {{ .Values.addons.longhorn.version }} + {{- if .Values.addons.longhorn.values }} + values: +{{ toYaml .Values.addons.longhorn.values | indent 4 }} + {{- end }} + defaultNamespace: "longhorn-system" + targets: + - clusterName: {{ .Values.cluster.name }} +--- +{{- end }} +{{- end }} +{{- if .Values.addons.neuvector }} +{{- if .Values.addons.neuvector.enabled }} +apiVersion: management.cattle.io/v3 +kind: ManagedChart +metadata: + name: neuvector-crd-{{ .Values.cluster.name }} + namespace: fleet-default +spec: + chart: "neuvector-crd" + repoName: "rancher-charts" + releaseName: "neuvector-crd" + version: {{ .Values.addons.neuvector.version }} + {{- if .Values.addons.neuvector.values }} + values: +{{ toYaml .Values.addons.neuvector.values | indent 4 }} + {{- end }} + defaultNamespace: "cattle-neuvector-system" + targets: + - clusterName: {{ .Values.cluster.name }} +--- +apiVersion: management.cattle.io/v3 +kind: ManagedChart +metadata: + name: neuvector-{{ .Values.cluster.name }} + namespace: fleet-default +spec: + chart: "neuvector" + repoName: "rancher-charts" + releaseName: "neuvector" + version: {{ .Values.addons.neuvector.version }} + {{- if .Values.addons.neuvector.values }} + values: +{{ toYaml .Values.addons.neuvector.values | indent 4 }} + {{- end }} + defaultNamespace: "cattle-neuvector-system" + targets: + - clusterName: {{ .Values.cluster.name }} +--- +{{- end }} +{{- end }} diff --git a/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/templates/nodeconfig-aws.yaml b/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/templates/nodeconfig-aws.yaml new file mode 100644 index 0000000..30c90ea --- /dev/null +++ b/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/templates/nodeconfig-aws.yaml @@ -0,0 +1,251 @@ +{{- $clustername := .Values.cluster.name -}} +{{- if eq .Values.cloudprovider "amazonec2" }} +{{- range $index, $nodepool := .Values.nodepools }} +apiVersion: rke-machine-config.cattle.io/v1 +kind: Amazonec2Config +metadata: + name: {{ $clustername }}-{{ $nodepool.name }} + namespace: fleet-default +{{- if $nodepool.accessKey }} +accessKey: {{ $nodepool.accessKey }} +{{- end }} +{{- if $nodepool.ami }} +ami: {{ $nodepool.ami }} +{{- end }} +{{- if $nodepool.blockDurationMinutes }} +blockDurationMinutes: {{ $nodepool.blockDurationMinutes }} +{{- end }} +{{- if $nodepool.deviceName }} +deviceName: {{ $nodepool.deviceName }} +{{- end }} +{{- if $nodepool.encryptEbsVolume }} +encryptEbsVolume: {{ $nodepool.encryptEbsVolume }} +{{- end }} +{{- if $nodepool.endpoint }} +endpoint: {{ $nodepool.endpoint }} +{{- end }} +{{- if $nodepool.httpEndpoint }} +httpEndpoint: {{ $nodepool.httpEndpoint }} +{{- end }} +{{- if $nodepool.httpTokens }} +httpTokens: {{ $nodepool.httpTokens }} +{{- end }} +{{- if $nodepool.iamInstanceProfile }} +iamInstanceProfile: {{ $nodepool.iamInstanceProfile }} +{{- end }} +{{- if $nodepool.insecureTransport }} +insecureTransport: {{ $nodepool.insecureTransport }} +{{- end }} +{{- if $nodepool.instanceType }} +instanceType: {{ $nodepool.instanceType }} +{{- end }} +{{- if $nodepool.keypairName }} +keypairName: {{ $nodepool.keypairName }} +{{- end }} +{{- if $nodepool.kmsKey }} +kmsKey: {{ $nodepool.kmsKey }} +{{- end }} +{{- if $nodepool.monitoring }} +monitoring: {{ $nodepool.monitoring }} +{{- end }} +{{- if $nodepool.openPort}} +openPort: +{{- range $i, $port := $nodepool.openPort }} +- {{ $port | squote }} +{{- end }} +{{- end }} +{{- if $nodepool.privateAddressOnly }} +privateAddressOnly: {{ $nodepool.privateAddressOnly }} +{{- end }} +{{- if $nodepool.region }} +region: {{ $nodepool.region }} +{{- end }} +{{- if $nodepool.requestSpotInstance }} +requestSpotInstance: {{ $nodepool.requestSpotInstance }} +{{- end }} +{{- if $nodepool.retries }} +retries: {{ $nodepool.retries | squote }} +{{- end }} +{{- if $nodepool.rootSize }} +rootSize: {{ $nodepool.rootSize | squote }} +{{- end }} +{{- if $nodepool.secretKey }} +secretKey: {{ $nodepool.secretKey }} +{{- end }} +securityGroup: +{{- if $nodepool.createSecurityGroup }} +- rancher-nodes +{{- else }} +{{ toYaml $nodepool.securityGroups }} +{{- end }} +{{- if $nodepool.securityGroupReadonly }} +securityGroupReadonly: {{ $nodepool.securityGroupReadonly }} +{{- end }} +{{- if $nodepool.sessionToken }} +sessionToken: {{ $nodepool.sessionToken }} +{{- end }} +{{- if $nodepool.spotPrice }} +spotPrice: {{ $nodepool.spotPrice }} +{{- end }} +{{- if $nodepool.sshKeyContents }} +sshKeyContents: {{ $nodepool.sshKeyContents }} +{{- end }} +{{- if $nodepool.sshUser }} +sshUser: {{ $nodepool.sshUser }} +{{- end }} +{{- if $nodepool.subnetId }} +subnetId: {{ $nodepool.subnetId }} +{{- end }} +{{- if $nodepool.tags }} +tags: {{ $nodepool.tags }} +{{- end }} +{{- if $nodepool.useEbsOptimizedInstance }} +useEbsOptimizedInstance: {{ $nodepool.useEbsOptimizedInstance }} +{{- end }} +{{- if $nodepool.usePrivateAddress }} +usePrivateAddress: {{ $nodepool.usePrivateAddress }} +{{- end }} +{{- if $nodepool.userData }} +userdata: {{- $nodepool.userData | toYaml | indent 1 }} +{{- end }} +{{- if $nodepool.volumeType }} +volumeType: {{ $nodepool.volumeType }} +{{- end }} +{{- if $nodepool.vpcId }} +vpcId: {{ $nodepool.vpcId }} +{{- end }} +{{- if $nodepool.zone }} +zone: {{ $nodepool.zone }} +{{- end }} +--- +{{- end }} +{{ $nodepool := .Values.nodepool }} +{{- if $nodepool }} +apiVersion: rke-machine-config.cattle.io/v1 +kind: Amazonec2Config +metadata: + name: {{ $clustername }}-{{ $nodepool.name }} + namespace: fleet-default +common: +{{- if $nodepool.labels }} + labels: +{{ toYaml $nodepool.labels | indent 4 }} +{{- end }} +{{- if $nodepool.taints }} + taints: +{{ toYaml $nodepool.taints | indent 4 }} +{{- end }} +{{- if $nodepool.accessKey }} +accessKey: {{ $nodepool.accessKey }} +{{- end }} +{{- if $nodepool.ami }} +ami: {{ $nodepool.ami }} +{{- end }} +{{- if $nodepool.blockDurationMinutes }} +blockDurationMinutes: {{ $nodepool.blockDurationMinutes }} +{{- end }} +{{- if $nodepool.deviceName }} +deviceName: {{ $nodepool.deviceName }} +{{- end }} +{{- if $nodepool.encryptEbsVolume }} +encryptEbsVolume: {{ $nodepool.encryptEbsVolume }} +{{- end }} +{{- if $nodepool.endpoint }} +endpoint: {{ $nodepool.endpoint }} +{{- end }} +{{- if $nodepool.httpEndpoint }} +httpEndpoint: {{ $nodepool.httpEndpoint }} +{{- end }} +{{- if $nodepool.httpTokens }} +httpTokens: {{ $nodepool.httpTokens }} +{{- end }} +{{- if $nodepool.iamInstanceProfile }} +iamInstanceProfile: {{ $nodepool.iamInstanceProfile }} +{{- end }} +{{- if $nodepool.insecureTransport }} +insecureTransport: {{ $nodepool.insecureTransport }} +{{- end }} +{{- if $nodepool.instanceType }} +instanceType: {{ $nodepool.instanceType }} +{{- end }} +{{- if $nodepool.keypairName }} +keypairName: {{ $nodepool.keypairName }} +{{- end }} +{{- if $nodepool.kmsKey }} +kmsKey: {{ $nodepool.kmsKey }} +{{- end }} +{{- if $nodepool.monitoring }} +monitoring: {{ $nodepool.monitoring }} +{{- end }} +{{- if $nodepool.openPort}} +openPort: +{{- range $i, $port := $nodepool.openPort }} +- {{ $port | squote }} +{{- end }} +{{- end }} +{{- if $nodepool.privateAddressOnly }} +privateAddressOnly: {{ $nodepool.privateAddressOnly }} +{{- end }} +{{- if $nodepool.region }} +region: {{ $nodepool.region }} +{{- end }} +{{- if $nodepool.requestSpotInstance }} +requestSpotInstance: {{ $nodepool.requestSpotInstance }} +{{- end }} +{{- if $nodepool.retries }} +retries: {{ $nodepool.retries | squote }} +{{- end }} +{{- if $nodepool.rootSize }} +rootSize: {{ $nodepool.rootSize | squote }} +{{- end }} +{{- if $nodepool.secretKey }} +secretKey: {{ $nodepool.secretKey }} +{{- end }} +{{- if $nodepool.createSecurityGroup }} +securityGroup: +- rancher-nodes +{{- else if $nodepool.securityGroups }} +securityGroup: +{{ toYaml $nodepool.securityGroups }} +{{- end }} +{{- if $nodepool.securityGroupReadonly }} +securityGroupReadonly: {{ $nodepool.securityGroupReadonly }} +{{- end }} +{{- if $nodepool.sessionToken }} +sessionToken: {{ $nodepool.sessionToken }} +{{- end }} +{{- if $nodepool.spotPrice }} +spotPrice: {{ $nodepool.spotPrice }} +{{- end }} +{{- if $nodepool.sshKeyContents }} +sshKeyContents: {{ $nodepool.sshKeyContents }} +{{- end }} +{{- if $nodepool.sshUser }} +sshUser: {{ $nodepool.sshUser }} +{{- end }} +{{- if $nodepool.subnetId }} +subnetId: {{ $nodepool.subnetId }} +{{- end }} +{{- if $nodepool.tags }} +tags: {{ $nodepool.tags }} +{{- end }} +{{- if $nodepool.useEbsOptimizedInstance }} +useEbsOptimizedInstance: {{ $nodepool.useEbsOptimizedInstance }} +{{- end }} +{{- if $nodepool.usePrivateAddress }} +usePrivateAddress: {{ $nodepool.usePrivateAddress }} +{{- end }} +{{- if $nodepool.userData }} +userdata: {{- $nodepool.userData | toYaml | indent 1 }} +{{- end }} +{{- if $nodepool.volumeType }} +volumeType: {{ $nodepool.volumeType }} +{{- end }} +{{- if $nodepool.vpcId }} +vpcId: {{ $nodepool.vpcId }} +{{- end }} +{{- if $nodepool.zone }} +zone: {{ $nodepool.zone }} +{{- end }} +{{- end }} +{{- end }} diff --git a/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/templates/nodeconfig-azure.yaml b/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/templates/nodeconfig-azure.yaml new file mode 100644 index 0000000..30d3526 --- /dev/null +++ b/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/templates/nodeconfig-azure.yaml @@ -0,0 +1,95 @@ +{{- $clustername := .Values.cluster.name -}} +{{- if eq .Values.cloudprovider "azure" }} +{{- range $index, $nodepool := .Values.nodepools }} +apiVersion: rke-machine-config.cattle.io/v1 +kind: AzureConfig +metadata: + name: {{ $clustername }}-{{ $nodepool.name }} + namespace: fleet-default +common: +{{- if $nodepool.labels }} + labels: +{{ toYaml $nodepool.labels | indent 4 }} +{{- end }} +{{- if $nodepool.taints }} + taints: +{{ toYaml $nodepool.taints | indent 4 }} +{{- end }} +availabilitySet: {{ $nodepool.availabilitySet }} +clientId: {{ $nodepool.clientId }} +customData: {{ $nodepool.customData }} +diskSize: {{ $nodepool.diskSize }} +dns: {{ $nodepool.dns }} +environment: {{ $nodepool.environment }} +faultDomainCount: {{ $nodepool.faultDomainCount }} +image: {{ $nodepool.image }} +location: {{ $nodepool.location }} +managedDisks: {{ $nodepool.managedDisks }} +noPublicIp: {{ $nodepool.noPublicIp }} +{{- if $nodepool.openPort}} +openPort: +{{- range $i, $port := $nodepool.openPort }} +- {{ $port }} +{{- end }} +{{- end }} +privateIpAddress: {{ $nodepool.privateIpAddress }} +resourceGroup: {{ $nodepool.resourceGroup }} +size: {{ $nodepool.size }} +sshUser: {{ $nodepool.sshUser }} +staticPublicIp: {{ $nodepool.staticPublicIp }} +storageType: {{ $nodepool.storageType }} +subnet: {{ $nodepool.subnet }} +subnetPrefix: {{ $nodepool.subnetPrefix }} +subscriptionId: {{ $nodepool.subscriptionId }} +updateDomainCount: {{ $nodepool.updateDomainCount }} +usePrivateIp: {{ $nodepool.usePrivateIp }} +vnet: {{ $nodepool.vnet }} +--- +{{- end }} +{{ $nodepool := .Values.nodepool }} +{{- if $nodepool }} +apiVersion: rke-machine-config.cattle.io/v1 +kind: AzureConfig +metadata: + name: {{ $clustername }}-{{ $nodepool.name }} + namespace: fleet-default +common: +{{- if $nodepool.labels }} + labels: +{{ toYaml $nodepool.labels | indent 4 }} +{{- end }} +{{- if $nodepool.taints }} + taints: +{{ toYaml $nodepool.taints | indent 4 }} +{{- end }} +availabilitySet: {{ $nodepool.availabilitySet }} +clientId: {{ $nodepool.clientId }} +customData: {{ $nodepool.customData }} +diskSize: {{ $nodepool.diskSize }} +dns: {{ $nodepool.dns }} +environment: {{ $nodepool.environment }} +faultDomainCount: {{ $nodepool.faultDomainCount }} +image: {{ $nodepool.image }} +location: {{ $nodepool.location }} +managedDisks: {{ $nodepool.managedDisks }} +noPublicIp: {{ $nodepool.noPublicIp }} +{{- if $nodepool.openPort}} +openPort: +{{- range $i, $port := $nodepool.openPort }} +- {{ $port }} +{{- end }} +{{- end }} +privateIpAddress: {{ $nodepool.privateIpAddress }} +resourceGroup: {{ $nodepool.resourceGroup }} +size: {{ $nodepool.size }} +sshUser: {{ $nodepool.sshUser }} +staticPublicIp: {{ $nodepool.staticPublicIp }} +storageType: {{ $nodepool.storageType }} +subnet: {{ $nodepool.subnet }} +subnetPrefix: {{ $nodepool.subnetPrefix }} +subscriptionId: {{ $nodepool.subscriptionId }} +updateDomainCount: {{ $nodepool.updateDomainCount }} +usePrivateIp: {{ $nodepool.usePrivateIp }} +vnet: {{ $nodepool.vnet }} +{{- end }} +{{- end }} diff --git a/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/templates/nodeconfig-do.yaml b/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/templates/nodeconfig-do.yaml new file mode 100644 index 0000000..5c68eb7 --- /dev/null +++ b/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/templates/nodeconfig-do.yaml @@ -0,0 +1,103 @@ +{{- $clustername := .Values.cluster.name -}} +{{- if eq .Values.cloudprovider "digitalocean" }} +{{- range $index, $nodepool := .Values.nodepools }} +apiVersion: rke-machine-config.cattle.io/v1 +kind: DigitaloceanConfig +metadata: + name: {{ $clustername }}-{{ $nodepool.name }} + namespace: fleet-default +{{- if $nodepool.accessToken }} +accessToken: {{ $nodepool.accessToken }} +{{- end }} +{{- if $nodepool.backups }} +backups: {{ $nodepool.backups }} +{{- end }} +{{- if $nodepool.image }} +image: {{ $nodepool.image }} +{{- end }} +{{- if $nodepool.ipv6 }} +ipv6: {{ $nodepool.ipv6 }} +{{- end }} +{{- if $nodepool.monitoring }} +monitoring: {{ $nodepool.monitoring }} +{{- end }} +{{- if $nodepool.privateNetworking }} +privateNetworking: {{ $nodepool.privateNetworking }} +{{- end }} +{{- if $nodepool.region }} +region: {{ $nodepool.region }} +{{- end }} +{{- if $nodepool.size }} +size: {{ $nodepool.size }} +{{- end }} +{{- if $nodepool.sshKeyContents }} +sshKeyContents: {{ $nodepool.sshKeyContents }} +{{- end }} +{{- if $nodepool.sshKeyFingerprint }} +sshKeyFingerprint: {{ $nodepool.sshKeyFingerprint }} +{{- end }} +{{- if $nodepool.sshPort }} +sshPort: {{ $nodepool.sshPort | squote }} +{{- end }} +{{- if $nodepool.sshUser }} +sshUser: {{ $nodepool.sshUser }} +{{- end }} +{{- if $nodepool.tags }} +tags: {{ $nodepool.tags }} +{{- end }} +{{- if $nodepool.userData }} +userdata: {{- $nodepool.userData | toYaml | indent 1 }} +{{- end }} +--- +{{- end }} +{{ $nodepool := .Values.nodepool }} +{{- if $nodepool }} +apiVersion: rke-machine-config.cattle.io/v1 +kind: DigitaloceanConfig +metadata: + name: {{ $clustername }}-{{ $nodepool.name }} + namespace: fleet-default +{{- if $nodepool.accessToken }} +accessToken: {{ $nodepool.accessToken }} +{{- end }} +{{- if $nodepool.backups }} +backups: {{ $nodepool.backups }} +{{- end }} +{{- if $nodepool.image }} +image: {{ $nodepool.image }} +{{- end }} +{{- if $nodepool.ipv6 }} +ipv6: {{ $nodepool.ipv6 }} +{{- end }} +{{- if $nodepool.monitoring }} +monitoring: {{ $nodepool.monitoring }} +{{- end }} +{{- if $nodepool.privateNetworking }} +privateNetworking: {{ $nodepool.privateNetworking }} +{{- end }} +{{- if $nodepool.region }} +region: {{ $nodepool.region }} +{{- end }} +{{- if $nodepool.size }} +size: {{ $nodepool.size }} +{{- end }} +{{- if $nodepool.sshKeyContents }} +sshKeyContents: {{ $nodepool.sshKeyContents }} +{{- end }} +{{- if $nodepool.sshKeyFingerprint }} +sshKeyFingerprint: {{ $nodepool.sshKeyFingerprint }} +{{- end }} +{{- if $nodepool.sshPort }} +sshPort: {{ $nodepool.sshPort | squote }} +{{- end }} +{{- if $nodepool.sshUser }} +sshUser: {{ $nodepool.sshUser }} +{{- end }} +{{- if $nodepool.tags }} +tags: {{ $nodepool.tags }} +{{- end }} +{{- if $nodepool.userData }} +userdata: {{- $nodepool.userData | toYaml | indent 1 }} +{{- end }} +{{- end }} +{{- end }} diff --git a/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/templates/nodeconfig-elemental.yaml b/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/templates/nodeconfig-elemental.yaml new file mode 100644 index 0000000..81335c8 --- /dev/null +++ b/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/templates/nodeconfig-elemental.yaml @@ -0,0 +1,15 @@ +{{- $clustername := .Values.cluster.name -}} +{{- if eq .Values.cloudprovider "elemental" }} +{{- range $index, $nodepool := .Values.nodepools }} +apiVersion: elemental.cattle.io/v1beta1 +kind: MachineInventorySelectorTemplate +metadata: + name: {{ $clustername }}-{{ $nodepool.name }} + namespace: fleet-default +spec: + template: + spec: + selector: + {{- toYaml $nodepool.selector | nindent 8 }} +{{- end }} +{{- end }} diff --git a/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/templates/nodeconfig-harvester.yaml b/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/templates/nodeconfig-harvester.yaml new file mode 100644 index 0000000..c7a35eb --- /dev/null +++ b/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/templates/nodeconfig-harvester.yaml @@ -0,0 +1,166 @@ +{{- $clustername := .Values.cluster.name -}} +{{- if eq .Values.cloudprovider "harvester" }} +{{- range $index, $nodepool := .Values.nodepools }} +apiVersion: rke-machine-config.cattle.io/v1 +kind: HarvesterConfig +metadata: + name: {{ $clustername }}-{{ $nodepool.name }} + namespace: fleet-default +{{- if $nodepool.cloudConfig }} +cloudConfig: {{$nodepool.cloudconfig }} +{{- end }} +{{- if $nodepool.clusterId }} +clusterId: {{ $nodepool.clusterId }} +{{- end }} +{{- if $nodepool.clusterType }} +clusterType: {{ $nodepool.clusterType }} +{{- end }} +{{- if $nodepool.cpuCount }} +cpuCount: {{ $nodepool.cpuCount | squote }} +{{- end }} +{{- if $nodepool.diskBus }} +diskBus: {{ $nodepool.diskBus }} +{{- end }} +{{- if $nodepool.diskInfo }} +diskInfo: {{ $nodepool.diskInfo }} +{{- end }} +{{- if $nodepool.diskSize }} +diskSize: {{ $nodepool.diskSize | squote }} +{{- end }} +{{- if $nodepool.imageName }} +imageName: {{ $nodepool.imageName }} +{{- end }} +{{- if $nodepool.keyPairName }} +keyPairName: {{ $nodepool.keyPairName }} +{{- end }} +{{- if $nodepool.kubeconfigContent }} +kubeconfigContent: {{- $nodepool.kubeconfigContent | toYaml }} +{{- end }} +{{- if $nodepool.memorySize }} +memorySize: {{ $nodepool.memorySize | squote }} +{{- end }} +{{- if $nodepool.networkData }} +networkData: {{- $nodepool.networkData | toYaml | indent 1 }} +{{- end }} +{{- if $nodepool.networkInfo }} +networkInfo: {{ $nodepool.networkInfo }} +{{- end }} +{{- if $nodepool.networkModel }} +networkModel: {{ $nodepool.networkModel }} +{{- end }} +{{- if $nodepool.networkName }} +networkName: {{ $nodepool.networkName }} +{{- end }} +{{- if $nodepool.networkType }} +networkType: {{ $nodepool.networkType }} +{{- end }} +{{- if $nodepool.sshPassword }} +sshPassword: {{ $nodepool.sshPassword }} +{{- end }} +{{- if $nodepool.sshPort }} +sshPort: {{ $nodepool.sshPort | squote }} +{{- end }} +{{- if $nodepool.sshPrivateKeyPath }} +sshPrivateKeyPath: {{ $nodepool.sshPrivateKeyPath }} +{{- end }} +{{- if $nodepool.sshUser }} +sshUser: {{ $nodepool.sshUser }} +{{- end }} +{{- if $nodepool.userData }} +userData: {{ $nodepool.userData | toYaml }} +{{- end }} +{{- if $nodepool.vmAffinity }} +vmAffinity: {{ $nodepool.vmAffinity}} +{{- end }} +{{- if $nodepool.vmNamespace }} +vmNamespace: {{ $nodepool.vmNamespace }} +{{- end }} +--- +{{- end }} +{{ $nodepool := .Values.nodepool }} +{{- if $nodepool }} +apiVersion: rke-machine-config.cattle.io/v1 +kind: HarvesterConfig +metadata: + name: {{ $clustername }}-{{ $nodepool.name }} + namespace: fleet-default +common: +{{- if $nodepool.labels }} + labels: +{{ toYaml $nodepool.labels | indent 4 }} +{{- end }} +{{- if $nodepool.taints }} + taints: +{{ toYaml $nodepool.taints | indent 4 }} +{{- end }} +{{- if $nodepool.cloudConfig }} +cloudConfig: {{$nodepool.cloudconfig }} +{{- end }} +{{- if $nodepool.clusterId }} +clusterId: {{ $nodepool.clusterId }} +{{- end }} +{{- if $nodepool.clusterType }} +clusterType: {{ $nodepool.clusterType }} +{{- end }} +{{- if $nodepool.cpuCount }} +cpuCount: {{ $nodepool.cpuCount | squote }} +{{- end }} +{{- if $nodepool.diskBus }} +diskBus: {{ $nodepool.diskBus }} +{{- end }} +{{- if $nodepool.diskInfo }} +diskInfo: {{ $nodepool.diskInfo }} +{{- end }} +{{- if $nodepool.diskSize }} +diskSize: {{ $nodepool.diskSize | squote }} +{{- end }} +{{- if $nodepool.imageName }} +imageName: {{ $nodepool.imageName }} +{{- end }} +{{- if $nodepool.keyPairName }} +keyPairName: {{ $nodepool.keyPairName }} +{{- end }} +{{- if $nodepool.kubeconfigContent }} +kubeconfigContent: {{- $nodepool.kubeconfigContent | toYaml }} +{{- end }} +{{- if $nodepool.memorySize }} +memorySize: {{ $nodepool.memorySize | squote }} +{{- end }} +{{- if $nodepool.networkData }} +networkData: {{- $nodepool.networkData | toYaml | indent 1 }} +{{- end }} +{{- if $nodepool.networkInfo }} +networkInfo: {{ $nodepool.networkInfo }} +{{- end }} +{{- if $nodepool.networkModel }} +networkModel: {{ $nodepool.networkModel }} +{{- end }} +{{- if $nodepool.networkName }} +networkName: {{ $nodepool.networkName }} +{{- end }} +{{- if $nodepool.networkType }} +networkType: {{ $nodepool.networkType }} +{{- end }} +{{- if $nodepool.sshPassword }} +sshPassword: {{ $nodepool.sshPassword }} +{{- end }} +{{- if $nodepool.sshPort }} +sshPort: {{ $nodepool.sshPort | squote }} +{{- end }} +{{- if $nodepool.sshPrivateKeyPath }} +sshPrivateKeyPath: {{ $nodepool.sshPrivateKeyPath }} +{{- end }} +{{- if $nodepool.sshUser }} +sshUser: {{ $nodepool.sshUser }} +{{- end }} +{{- if $nodepool.userData }} +userData: {{ $nodepool.userData | toYaml }} +{{- end }} +{{- if $nodepool.vmAffinity }} +vmAffinity: {{ $nodepool.vmAffinity }} +{{- end }} +{{- if $nodepool.vmNamespace }} +vmNamespace: {{ $nodepool.vmNamespace }} +{{- end }} +{{- end }} +{{- end }} diff --git a/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/templates/nodeconfig-vsphere.yaml b/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/templates/nodeconfig-vsphere.yaml new file mode 100644 index 0000000..c8f40a6 --- /dev/null +++ b/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/templates/nodeconfig-vsphere.yaml @@ -0,0 +1,97 @@ +{{- $clustername := .Values.cluster.name -}} +{{- if eq .Values.cloudprovider "vsphere" }} +{{- range $index, $nodepool := .Values.nodepools }} +apiVersion: rke-machine-config.cattle.io/v1 +kind: VmwarevsphereConfig +metadata: + name: {{ $clustername }}-{{ $nodepool.name }} + namespace: fleet-default +common: +{{- if $nodepool.labels }} + labels: +{{ toYaml $nodepool.labels | indent 4 }} +{{- end }} +{{- if $nodepool.taints }} + taints: +{{ toYaml $nodepool.taints | indent 4 }} +{{- end }} +{{- if $nodepool.cfgparam }} +cfgparam: {{ $nodepool.cfgparam }} +{{- end }} +cloneFrom: {{ $nodepool.cloneFrom }} +cloudConfig: |- +{{ $nodepool.cloudConfig | indent 2 }} +cloudinit: {{ $nodepool.cloudinit }} +contentLibrary: {{ $nodepool.contentLibrary }} +cpuCount: {{ $nodepool.cpuCount | squote }} +creationType: {{ $nodepool.creationType }} +customAttribute: {{ $nodepool.customAttribute }} +datacenter: {{ $nodepool.datacenter }} +datastore: {{ $nodepool.datastore }} +datastoreCluster: {{ $nodepool.datastoreCluster }} +diskSize: {{ $nodepool.diskSize | squote }} +folder: {{ $nodepool.folder }} +hostsystem: {{ $nodepool.hostsystem }} +memorySize: {{ $nodepool.memorySize | squote }} +network: {{ $nodepool.network }} +pool: {{ $nodepool.pool }} +sshPort: {{ $nodepool.sshPort | squote }} +sshUser: {{ $nodepool.sshUser }} +sshUserGroup: {{ $nodepool.sshUserGroup }} +tag: {{ $nodepool.tag }} +vappIpallocationpolicy: {{ $nodepool.vappIpallocationpolicy }} +vappIpprotocol: {{ $nodepool.vappIpprotocol }} +vappProperty: {{ $nodepool.vappProperty }} +vappTransport: {{ $nodepool.vappTransport }} +vcenter: {{ $nodepool.vcenter }} +vcenterPort: {{ $nodepool.vcenterPort | squote }} +--- +{{- end }} +{{ $nodepool := .Values.nodepool }} +{{- if $nodepool }} +apiVersion: rke-machine-config.cattle.io/v1 +kind: VmwarevsphereConfig +metadata: + name: {{ $clustername }}-{{ $nodepool.name }} + namespace: fleet-default +common: +{{- if $nodepool.labels }} + labels: +{{ toYaml $nodepool.labels | indent 4 }} +{{- end }} +{{- if $nodepool.taints }} + taints: +{{ toYaml $nodepool.taints | indent 4 }} +{{- end }} +{{- if $nodepool.cfgparam }} +cfgparam: {{ $nodepool.cfgparam }} +{{- end }} +cloneFrom: {{ $nodepool.cloneFrom }} +cloudConfig: |- +{{ $nodepool.cloudConfig | indent 2 }} +cloudinit: {{ $nodepool.cloudinit }} +contentLibrary: {{ $nodepool.contentLibrary }} +cpuCount: {{ $nodepool.cpuCount | squote }} +creationType: {{ $nodepool.creationType }} +customAttribute: {{ $nodepool.customAttribute }} +datacenter: {{ $nodepool.datacenter }} +datastore: {{ $nodepool.datastore }} +datastoreCluster: {{ $nodepool.datastoreCluster }} +diskSize: {{ $nodepool.diskSize | squote }} +folder: {{ $nodepool.folder }} +hostsystem: {{ $nodepool.hostsystem }} +memorySize: {{ $nodepool.memorySize | squote }} +network: {{ $nodepool.network }} +pool: {{ $nodepool.pool }} +sshPort: {{ $nodepool.sshPort | squote }} +sshUser: {{ $nodepool.sshUser }} +sshUserGroup: {{ $nodepool.sshUserGroup }} +tag: {{ $nodepool.tag }} +vappIpallocationpolicy: {{ $nodepool.vappIpallocationpolicy }} +vappIpprotocol: {{ $nodepool.vappIpprotocol }} +vappProperty: {{ $nodepool.vappProperty }} +vappTransport: {{ $nodepool.vappTransport }} +vcenter: {{ $nodepool.vcenter }} +vcenterPort: {{ $nodepool.vcenterPort | squote }} +{{- end }} +{{- end }} diff --git a/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/values.yaml b/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/values.yaml new file mode 100644 index 0000000..0421ba9 --- /dev/null +++ b/deploy/iac-cnv/clusters/tpinf-1345-test-01/charts/cluster-templates/values.yaml @@ -0,0 +1,433 @@ +# amazonec2, azure, digitalocean, harvester, vsphere, custom +cloudprovider: harvester + +# cloud provider credentials +cloudCredentialSecretName: cc-mrklm + +# rancher manager url +rancher: + cattle: + url: rancher-mgmt.product.lan + +# cluster values +cluster: + + name: default-cluster + # labels: + # key: value + config: + kubernetesVersion: v1.33.5+rke2r1 + enableNetworkPolicy: true + localClusterAuthEndpoint: + enabled: false + + # Pod Security Standard (Replaces PSP) + defaultPodSecurityAdmissionConfigurationTemplateName: "rancher-restricted" + + globalConfig: + systemDefaultRegistry: docker.io + cni: canal + docker: false + disable_scheduler: false + disable_cloud_controller: false + disable_kube_proxy: false + etcd_expose_metrics: false + profile: 'cis' + selinux: false + secrets_encryption: true + write_kubeconfig_mode: 0600 + use_service_account_credentials: false + protect_kernel_defaults: true + + kube_apiserver_arg: + - "service-account-extend-token-expiration=false" + - "anonymous-auth=false" + - "enable-admission-plugins=NodeRestriction,PodSecurity,EventRateLimit,DenyServiceExternalIPs" + - "admission-control-config-file=/etc/rancher/rke2/rke2-admission.yaml" + - "audit-policy-file=/etc/rancher/rke2/audit-policy.yaml" + - "audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log" + - "audit-log-maxage=30" + - "audit-log-maxbackup=10" + - "audit-log-maxsize=100" + + kubelet_arg: + # Strong Ciphers (CIS 4.2.12) + - "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" + # PID Limit (CIS 4.2.13) + - "pod-max-pids=4096" + # Seccomp Default (CIS 4.2.14) + - "seccomp-default=true" + - "protect-kernel-defaults=true" + - "make-iptables-util-chains=true" + + upgradeStrategy: + controlPlaneConcurrency: 10% + controlPlaneDrainOptions: + enabled: false + workerConcurrency: 10% + workerDrainOptions: + enabled: false + +# node and nodepool(s) values +nodepools: + - name: control-plane-nodes + displayName: cp-nodes + quantity: 1 + etcd: true + controlplane: true + worker: false + paused: false + cpuCount: 4 + diskSize: 40 + imageName: vanderlande/image-qhtpc + memorySize: 8 + networkName: vanderlande/vm-lan + sshUser: rancher + vmNamespace: vanderlande + + # --------------------------------------------------------- + # Cloud-Init: Creates the Security Files + # --------------------------------------------------------- + userData: &userData | + #cloud-config + package_update: false + package_upgrade: false + snap: + commands: + 00: snap refresh --hold=forever + package_reboot_if_required: true + packages: + - qemu-guest-agent + - yq + - jq + - curl + - wget + + bootcmd: + - sysctl -w net.ipv6.conf.all.disable_ipv6=1 + - sysctl -w net.ipv6.conf.default.disable_ipv6=1 + + write_files: + # ---------------------------------------------------------------- + # 1. CNI Permission Fix Script & Cron (CIS 1.1.9 Persistence) + # ---------------------------------------------------------------- + - path: /usr/local/bin/fix-cni-perms.sh + permissions: '0700' + owner: root:root + content: | + #!/bin/bash + # Wait 60s on boot for RKE2 to write files + [ "$1" == "boot" ] && sleep 60 + + # Enforce 600 on CNI files (CIS 1.1.9) + if [ -d /etc/cni/net.d ]; then + find /etc/cni/net.d -type f -exec chmod 600 {} \; + fi + if [ -d /var/lib/cni/networks ]; then + find /var/lib/cni/networks -type f -exec chmod 600 {} \; + fi + + # Every RKE2 service restart can reset CNI file permissions, so we run + # this script on reboot and daily via cron to maintain CIS compliance. + + - path: /etc/cron.d/cis-cni-fix + permissions: '0644' + owner: root:root + content: | + # Run on Reboot (with delay) to fix files created during startup + @reboot root /usr/local/bin/fix-cni-perms.sh boot + # Run once daily at 00:00 to correct any drift + 0 0 * * * root /usr/local/bin/fix-cni-perms.sh + + # ---------------------------------------------------------------- + # 2. RKE2 Admission Config + # ---------------------------------------------------------------- + - path: /etc/rancher/rke2/rke2-admission.yaml + permissions: '0600' + owner: root:root + content: | + apiVersion: apiserver.config.k8s.io/v1 + kind: AdmissionConfiguration + plugins: + - name: PodSecurity + configuration: + apiVersion: pod-security.admission.config.k8s.io/v1beta1 + kind: PodSecurityConfiguration + defaults: + enforce: "restricted" + enforce-version: "latest" + audit: "restricted" + audit-version: "latest" + warn: "restricted" + warn-version: "latest" + exemptions: + usernames: [] + runtimeClasses: [] + namespaces: [compliance-operator-system,kube-system, cis-operator-system, tigera-operator, calico-system, rke2-ingress-nginx, cattle-system, cattle-fleet-system, longhorn-system, cattle-neuvector-system] + - name: EventRateLimit + configuration: + apiVersion: eventratelimit.admission.k8s.io/v1alpha1 + kind: Configuration + limits: + - type: Server + qps: 5000 + burst: 20000 + + # ---------------------------------------------------------------- + # 3. RKE2 Audit Policy + # ---------------------------------------------------------------- + - path: /etc/rancher/rke2/audit-policy.yaml + permissions: '0600' + owner: root:root + content: | + apiVersion: audit.k8s.io/v1 + kind: Policy + rules: + - level: None + users: ["system:kube-controller-manager", "system:kube-scheduler", "system:serviceaccount:kube-system:endpoint-controller"] + verbs: ["get", "update"] + resources: + - group: "" + resources: ["endpoints", "services", "services/status"] + - level: None + verbs: ["get"] + resources: + - group: "" + resources: ["nodes", "nodes/status", "pods", "pods/status"] + - level: None + users: ["kube-proxy"] + verbs: ["watch"] + resources: + - group: "" + resources: ["endpoints", "services", "services/status", "configmaps"] + - level: Metadata + resources: + - group: "" + resources: ["secrets", "configmaps"] + - level: RequestResponse + omitStages: + - RequestReceived + + # ---------------------------------------------------------------- + # 4. Static NetworkPolicies + # ---------------------------------------------------------------- + - path: /var/lib/rancher/rke2/server/manifests/cis-network-policy.yaml + permissions: '0600' + owner: root:root + content: | + apiVersion: networking.k8s.io/v1 + kind: NetworkPolicy + metadata: + name: default-deny-ingress + namespace: default + spec: + podSelector: {} + policyTypes: + - Ingress + --- + apiVersion: networking.k8s.io/v1 + kind: NetworkPolicy + metadata: + name: allow-all-metrics + namespace: kube-public + spec: + podSelector: {} + ingress: + - {} + policyTypes: + - Ingress + --- + apiVersion: networking.k8s.io/v1 + kind: NetworkPolicy + metadata: + name: allow-all-system + namespace: kube-system + spec: + podSelector: {} + ingress: + - {} + policyTypes: + - Ingress + + # ---------------------------------------------------------------- + # 5. Service Account Hardening + # ---------------------------------------------------------------- + - path: /var/lib/rancher/rke2/server/manifests/cis-sa-config.yaml + permissions: '0600' + owner: root:root + content: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: default + namespace: default + automountServiceAccountToken: false + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: default + namespace: kube-system + automountServiceAccountToken: false + + - path: /var/lib/rancher/rke2/server/manifests/cis-sa-cron.yaml + permissions: '0600' + owner: root:root + content: | + apiVersion: v1 + kind: ServiceAccount + metadata: {name: sa-cleaner, namespace: kube-system} + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: {name: sa-cleaner-role} + rules: + - apiGroups: [""] + resources: ["namespaces", "serviceaccounts"] + verbs: ["get", "list", "patch"] + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: {name: sa-cleaner-binding} + subjects: [{kind: ServiceAccount, name: sa-cleaner, namespace: kube-system}] + roleRef: {kind: ClusterRole, name: sa-cleaner-role, apiGroup: rbac.authorization.k8s.io} + --- + apiVersion: batch/v1 + kind: CronJob + metadata: + name: sa-cleaner + namespace: kube-system + spec: + schedule: "0 */6 * * *" # Run every 6 hours + jobTemplate: + spec: + template: + spec: + serviceAccountName: sa-cleaner + containers: + - name: cleaner + image: rancher/kubectl:v1.26.0 + command: + - /bin/bash + - -c + - | + # Get all namespaces + for ns in $(kubectl get ns -o jsonpath='{.items[*].metadata.name}'); do + # Check if default SA has automount=true (or null) + automount=$(kubectl get sa default -n $ns -o jsonpath='{.automountServiceAccountToken}') + if [ "$automount" != "false" ]; then + echo "Securing default SA in namespace: $ns" + kubectl patch sa default -n $ns -p '{"automountServiceAccountToken": false}' + fi + done + restartPolicy: OnFailure + + # ---------------------------------------------------------------- + # 6. OS Sysctls Hardening + # ---------------------------------------------------------------- + - path: /etc/sysctl.d/60-rke2-cis.conf + permissions: '0644' + content: | + vm.overcommit_memory=1 + vm.max_map_count=65530 + vm.panic_on_oom=0 + fs.inotify.max_user_watches=1048576 + fs.inotify.max_user_instances=8192 + kernel.panic=10 + kernel.panic_on_oops=1 + net.ipv4.conf.all.rp_filter=1 + net.ipv4.conf.default.rp_filter=1 + net.ipv4.conf.all.accept_source_route=0 + net.ipv4.conf.default.accept_source_route=0 + net.ipv4.conf.all.accept_redirects=0 + net.ipv4.conf.default.accept_redirects=0 + net.ipv4.conf.all.send_redirects=0 + net.ipv4.conf.default.send_redirects=0 + net.ipv4.conf.all.log_martians=1 + net.ipv4.conf.default.log_martians=1 + net.ipv4.icmp_echo_ignore_broadcasts=1 + net.ipv4.icmp_ignore_bogus_error_responses=1 + net.ipv6.conf.all.disable_ipv6=1 + net.ipv6.conf.default.disable_ipv6=1 + fs.protected_hardlinks=1 + fs.protected_symlinks=1 + + # ---------------------------------------------------------------- + # 7. Environment & Setup Scripts + # ---------------------------------------------------------------- + - path: /etc/profile.d/rke2.sh + permissions: '0644' + content: | + export PATH=$PATH:/var/lib/rancher/rke2/bin:/opt/rke2/bin + export KUBECONFIG=/etc/rancher/rke2/rke2.yaml + + + - path: /root/updates.sh + permissions: '0550' + content: | + #!/bin/bash + export DEBIAN_FRONTEND=noninteractive + apt-mark hold linux-headers-generic + apt-mark hold linux-headers-virtual + apt-mark hold linux-image-virtual + apt-mark hold linux-virtual + apt-get update + apt-get upgrade -y + apt-get autoremove -y + + users: + - name: rancher + gecos: Rancher service account + hashed_passwd: $6$Mas.x2i7B2cefjUy$59363FmEuoU.LiTLNRZmtemlH2W0D0SWsig22KSZ3QzOmfxeZXxdSx5wIw9wO7GXF/M9W.9SHoKVBOYj1HPX3. + lock_passwd: false + shell: /bin/bash + groups: [users, sudo, docker] + sudo: ALL=(ALL:ALL) ALL + ssh_authorized_keys: + - 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEwWnnOTAu0LlAZRczQ0Z0KvNlUdPhGQhpZie+nF1O3s' + + - name: etcd + gecos: "etcd user" + shell: /sbin/nologin + system: true + lock_passwd: true + + disable_root: true + ssh_pwauth: true + + runcmd: + - systemctl enable --now qemu-guest-agent + - sysctl --system + - /root/updates.sh + # Immediate run of fix script + - /usr/local/bin/fix-cni-perms.sh + + final_message: | + VI_CNV_CLOUD_INIT has been applied successfully. + Cluster ready for Rancher! + + - name: worker-nodes + displayName: wk-nodes + quantity: 2 + etcd: false + controlplane: false + worker: true + paused: false + cpuCount: 2 + diskSize: 40 + imageName: vanderlande/image-qmx5q + memorySize: 8 + networkName: vanderlande/vm-lan + sshUser: rancher + vmNamespace: vanderlande + userData: *userData + +addons: + monitoring: + enabled: false + logging: + enabled: false + longhorn: + enabled: false + neuvector: + enabled: false \ No newline at end of file diff --git a/deploy/iac-cnv/clusters/tpinf-1345-test-01/fleet.yaml b/deploy/iac-cnv/clusters/tpinf-1345-test-01/fleet.yaml new file mode 100644 index 0000000..6a74a12 --- /dev/null +++ b/deploy/iac-cnv/clusters/tpinf-1345-test-01/fleet.yaml @@ -0,0 +1,13 @@ +# The namespace on the Management Cluster where the "Cluster" CRD will be created. +# You specified 'fleet-local', which is valid for admin-level operations, +# though 'fleet-default' is also common for downstream clusters. +namespace: fleet-local + +# Reference the external rancher-federal helm chart +helm: + chart: ./charts/cluster-templates + # Replace with the specific version you wish to pin (highly recommended) + releaseName: tpinf-1345-test-01 + version: 0.7.2 + valuesFiles: + - values.yaml diff --git a/deploy/iac-cnv/clusters/tpinf-1345-test-01/gitrepo.yaml b/deploy/iac-cnv/clusters/tpinf-1345-test-01/gitrepo.yaml new file mode 100644 index 0000000..857fa71 --- /dev/null +++ b/deploy/iac-cnv/clusters/tpinf-1345-test-01/gitrepo.yaml @@ -0,0 +1,21 @@ +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + labels: + name: tpinf-1345 + namespace: fleet-local + resourceVersion: '27617825' +spec: + branch: TPINF-1345-inv-cis-hardening + clientSecretName: auth-qvn5p + correctDrift: + enabled: true + pollingInterval: 1m0s + repo: https://devstash.vanderlande.com/scm/ittp/as-vi-cnv.git + targets: + - clusterSelector: + matchExpressions: + - key: provider.cattle.io + operator: NotIn + values: + - harvester \ No newline at end of file diff --git a/deploy/iac-cnv/clusters/tpinf-1345-test-01/values.yaml b/deploy/iac-cnv/clusters/tpinf-1345-test-01/values.yaml new file mode 100644 index 0000000..b994c75 --- /dev/null +++ b/deploy/iac-cnv/clusters/tpinf-1345-test-01/values.yaml @@ -0,0 +1,365 @@ + + +# cluster values +cluster: + + name: tpinf-1345-test-01 + # labels: + # key: value + config: + kubernetesVersion: v1.33.5+rke2r1 + +# node and nodepool(s) values +nodepools: + - name: control-plane-nodes + displayName: cp-nodes + quantity: 1 + etcd: true + controlplane: true + worker: false + paused: false + cpuCount: 4 + diskSize: 40 + imageName: vanderlande/image-qhtpc + memorySize: 8 + networkName: vanderlande/vm-lan + sshUser: rancher + vmNamespace: vanderlande + + # --------------------------------------------------------- + # Cloud-Init: Creates the Security Files + # --------------------------------------------------------- + userData: &userData | + #cloud-config + package_update: false + package_upgrade: false + snap: + commands: + 00: snap refresh --hold=forever + package_reboot_if_required: true + packages: + - qemu-guest-agent + - yq + - jq + - curl + - wget + + bootcmd: + - sysctl -w net.ipv6.conf.all.disable_ipv6=1 + - sysctl -w net.ipv6.conf.default.disable_ipv6=1 + + write_files: + # ---------------------------------------------------------------- + # 1. CNI Permission Fix Script & Cron (CIS 1.1.9 Persistence) + # ---------------------------------------------------------------- + - path: /usr/local/bin/fix-cni-perms.sh + permissions: '0700' + owner: root:root + content: | + #!/bin/bash + # Wait 60s on boot for RKE2 to write files + [ "$1" == "boot" ] && sleep 60 + + # Enforce 600 on CNI files (CIS 1.1.9) + if [ -d /etc/cni/net.d ]; then + find /etc/cni/net.d -type f -exec chmod 600 {} \; + fi + if [ -d /var/lib/cni/networks ]; then + find /var/lib/cni/networks -type f -exec chmod 600 {} \; + fi + + # Every RKE2 service restart can reset CNI file permissions, so we run + # this script on reboot and daily via cron to maintain CIS compliance. + + - path: /etc/cron.d/cis-cni-fix + permissions: '0644' + owner: root:root + content: | + # Run on Reboot (with delay) to fix files created during startup + @reboot root /usr/local/bin/fix-cni-perms.sh boot + # Run once daily at 00:00 to correct any drift + 0 0 * * * root /usr/local/bin/fix-cni-perms.sh + + # ---------------------------------------------------------------- + # 2. RKE2 Admission Config + # ---------------------------------------------------------------- + - path: /etc/rancher/rke2/rke2-admission.yaml + permissions: '0600' + owner: root:root + content: | + apiVersion: apiserver.config.k8s.io/v1 + kind: AdmissionConfiguration + plugins: + - name: PodSecurity + configuration: + apiVersion: pod-security.admission.config.k8s.io/v1beta1 + kind: PodSecurityConfiguration + defaults: + enforce: "restricted" + enforce-version: "latest" + audit: "restricted" + audit-version: "latest" + warn: "restricted" + warn-version: "latest" + exemptions: + usernames: [] + runtimeClasses: [] + namespaces: [compliance-operator-system,kube-system, cis-operator-system, tigera-operator, calico-system, rke2-ingress-nginx, cattle-system, cattle-fleet-system, longhorn-system, cattle-neuvector-system] + - name: EventRateLimit + configuration: + apiVersion: eventratelimit.admission.k8s.io/v1alpha1 + kind: Configuration + limits: + - type: Server + qps: 5000 + burst: 20000 + + # ---------------------------------------------------------------- + # 3. RKE2 Audit Policy + # ---------------------------------------------------------------- + - path: /etc/rancher/rke2/audit-policy.yaml + permissions: '0600' + owner: root:root + content: | + apiVersion: audit.k8s.io/v1 + kind: Policy + rules: + - level: None + users: ["system:kube-controller-manager", "system:kube-scheduler", "system:serviceaccount:kube-system:endpoint-controller"] + verbs: ["get", "update"] + resources: + - group: "" + resources: ["endpoints", "services", "services/status"] + - level: None + verbs: ["get"] + resources: + - group: "" + resources: ["nodes", "nodes/status", "pods", "pods/status"] + - level: None + users: ["kube-proxy"] + verbs: ["watch"] + resources: + - group: "" + resources: ["endpoints", "services", "services/status", "configmaps"] + - level: Metadata + resources: + - group: "" + resources: ["secrets", "configmaps"] + - level: RequestResponse + omitStages: + - RequestReceived + + # ---------------------------------------------------------------- + # 4. Static NetworkPolicies + # ---------------------------------------------------------------- + - path: /var/lib/rancher/rke2/server/manifests/cis-network-policy.yaml + permissions: '0600' + owner: root:root + content: | + apiVersion: networking.k8s.io/v1 + kind: NetworkPolicy + metadata: + name: default-deny-ingress + namespace: default + spec: + podSelector: {} + policyTypes: + - Ingress + --- + apiVersion: networking.k8s.io/v1 + kind: NetworkPolicy + metadata: + name: allow-all-metrics + namespace: kube-public + spec: + podSelector: {} + ingress: + - {} + policyTypes: + - Ingress + --- + apiVersion: networking.k8s.io/v1 + kind: NetworkPolicy + metadata: + name: allow-all-system + namespace: kube-system + spec: + podSelector: {} + ingress: + - {} + policyTypes: + - Ingress + + # ---------------------------------------------------------------- + # 5. Service Account Hardening + # ---------------------------------------------------------------- + - path: /var/lib/rancher/rke2/server/manifests/cis-sa-config.yaml + permissions: '0600' + owner: root:root + content: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: default + namespace: default + automountServiceAccountToken: false + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: default + namespace: kube-system + automountServiceAccountToken: false + + - path: /var/lib/rancher/rke2/server/manifests/cis-sa-cron.yaml + permissions: '0600' + owner: root:root + content: | + apiVersion: v1 + kind: ServiceAccount + metadata: {name: sa-cleaner, namespace: kube-system} + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: {name: sa-cleaner-role} + rules: + - apiGroups: [""] + resources: ["namespaces", "serviceaccounts"] + verbs: ["get", "list", "patch"] + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: {name: sa-cleaner-binding} + subjects: [{kind: ServiceAccount, name: sa-cleaner, namespace: kube-system}] + roleRef: {kind: ClusterRole, name: sa-cleaner-role, apiGroup: rbac.authorization.k8s.io} + --- + apiVersion: batch/v1 + kind: CronJob + metadata: + name: sa-cleaner + namespace: kube-system + spec: + schedule: "0 */6 * * *" # Run every 6 hours + jobTemplate: + spec: + template: + spec: + serviceAccountName: sa-cleaner + containers: + - name: cleaner + image: rancher/kubectl:v1.26.0 + command: + - /bin/bash + - -c + - | + # Get all namespaces + for ns in $(kubectl get ns -o jsonpath='{.items[*].metadata.name}'); do + # Check if default SA has automount=true (or null) + automount=$(kubectl get sa default -n $ns -o jsonpath='{.automountServiceAccountToken}') + if [ "$automount" != "false" ]; then + echo "Securing default SA in namespace: $ns" + kubectl patch sa default -n $ns -p '{"automountServiceAccountToken": false}' + fi + done + restartPolicy: OnFailure + + # ---------------------------------------------------------------- + # 6. OS Sysctls Hardening + # ---------------------------------------------------------------- + - path: /etc/sysctl.d/60-rke2-cis.conf + permissions: '0644' + content: | + vm.overcommit_memory=1 + vm.max_map_count=65530 + vm.panic_on_oom=0 + fs.inotify.max_user_watches=1048576 + fs.inotify.max_user_instances=8192 + kernel.panic=10 + kernel.panic_on_oops=1 + net.ipv4.conf.all.rp_filter=1 + net.ipv4.conf.default.rp_filter=1 + net.ipv4.conf.all.accept_source_route=0 + net.ipv4.conf.default.accept_source_route=0 + net.ipv4.conf.all.accept_redirects=0 + net.ipv4.conf.default.accept_redirects=0 + net.ipv4.conf.all.send_redirects=0 + net.ipv4.conf.default.send_redirects=0 + net.ipv4.conf.all.log_martians=1 + net.ipv4.conf.default.log_martians=1 + net.ipv4.icmp_echo_ignore_broadcasts=1 + net.ipv4.icmp_ignore_bogus_error_responses=1 + net.ipv6.conf.all.disable_ipv6=1 + net.ipv6.conf.default.disable_ipv6=1 + fs.protected_hardlinks=1 + fs.protected_symlinks=1 + + # ---------------------------------------------------------------- + # 7. Environment & Setup Scripts + # ---------------------------------------------------------------- + - path: /etc/profile.d/rke2.sh + permissions: '0644' + content: | + export PATH=$PATH:/var/lib/rancher/rke2/bin:/opt/rke2/bin + export KUBECONFIG=/etc/rancher/rke2/rke2.yaml + + + - path: /root/updates.sh + permissions: '0550' + content: | + #!/bin/bash + export DEBIAN_FRONTEND=noninteractive + apt-mark hold linux-headers-generic + apt-mark hold linux-headers-virtual + apt-mark hold linux-image-virtual + apt-mark hold linux-virtual + apt-get update + apt-get upgrade -y + apt-get autoremove -y + + users: + - name: rancher + gecos: Rancher service account + hashed_passwd: $6$Mas.x2i7B2cefjUy$59363FmEuoU.LiTLNRZmtemlH2W0D0SWsig22KSZ3QzOmfxeZXxdSx5wIw9wO7GXF/M9W.9SHoKVBOYj1HPX3. + lock_passwd: false + shell: /bin/bash + groups: [users, sudo, docker] + sudo: ALL=(ALL:ALL) ALL + ssh_authorized_keys: + - 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEwWnnOTAu0LlAZRczQ0Z0KvNlUdPhGQhpZie+nF1O3s' + + - name: etcd + gecos: "etcd user" + shell: /sbin/nologin + system: true + lock_passwd: true + + disable_root: true + ssh_pwauth: true + + runcmd: + - systemctl enable --now qemu-guest-agent + - sysctl --system + - /root/updates.sh + # Immediate run of fix script + - /usr/local/bin/fix-cni-perms.sh + + final_message: | + VI_CNV_CLOUD_INIT has been applied successfully. + Cluster ready for Rancher! + + - name: worker-nodes + displayName: wk-nodes + quantity: 2 + etcd: false + controlplane: false + worker: true + paused: false + cpuCount: 2 + diskSize: 40 + imageName: vanderlande/image-qmx5q + memorySize: 8 + networkName: vanderlande/vm-lan + sshUser: rancher + vmNamespace: vanderlande + userData: *userData + diff --git a/deploy/k8s-provisioner/.devcontainer/devcontainer.json b/deploy/k8s-provisioner/.devcontainer/devcontainer.json new file mode 100644 index 0000000..a3ab754 --- /dev/null +++ b/deploy/k8s-provisioner/.devcontainer/devcontainer.json @@ -0,0 +1,25 @@ +{ + "name": "Kubebuilder DevContainer", + "image": "golang:1.24", + "features": { + "ghcr.io/devcontainers/features/docker-in-docker:2": {}, + "ghcr.io/devcontainers/features/git:1": {} + }, + + "runArgs": ["--network=host"], + + "customizations": { + "vscode": { + "settings": { + "terminal.integrated.shell.linux": "/bin/bash" + }, + "extensions": [ + "ms-kubernetes-tools.vscode-kubernetes-tools", + "ms-azuretools.vscode-docker" + ] + } + }, + + "onCreateCommand": "bash .devcontainer/post-install.sh" +} + diff --git a/deploy/k8s-provisioner/.devcontainer/post-install.sh b/deploy/k8s-provisioner/.devcontainer/post-install.sh new file mode 100644 index 0000000..67f3e97 --- /dev/null +++ b/deploy/k8s-provisioner/.devcontainer/post-install.sh @@ -0,0 +1,23 @@ +#!/bin/bash +set -x + +curl -Lo ./kind https://kind.sigs.k8s.io/dl/latest/kind-linux-$(go env GOARCH) +chmod +x ./kind +mv ./kind /usr/local/bin/kind + +curl -L -o kubebuilder https://go.kubebuilder.io/dl/latest/linux/$(go env GOARCH) +chmod +x kubebuilder +mv kubebuilder /usr/local/bin/ + +KUBECTL_VERSION=$(curl -L -s https://dl.k8s.io/release/stable.txt) +curl -LO "https://dl.k8s.io/release/$KUBECTL_VERSION/bin/linux/$(go env GOARCH)/kubectl" +chmod +x kubectl +mv kubectl /usr/local/bin/kubectl + +docker network create -d=bridge --subnet=172.19.0.0/24 kind + +kind version +kubebuilder version +docker --version +go version +kubectl version --client diff --git a/deploy/k8s-provisioner/.dockerignore b/deploy/k8s-provisioner/.dockerignore new file mode 100644 index 0000000..9af8280 --- /dev/null +++ b/deploy/k8s-provisioner/.dockerignore @@ -0,0 +1,11 @@ +# More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file +# Ignore everything by default and re-include only needed files +** + +# Re-include Go source files (but not *_test.go) +!**/*.go +**/*_test.go + +# Re-include Go module files +!go.mod +!go.sum diff --git a/deploy/k8s-provisioner/.github/workflows/lint.yml b/deploy/k8s-provisioner/.github/workflows/lint.yml new file mode 100644 index 0000000..4838c54 --- /dev/null +++ b/deploy/k8s-provisioner/.github/workflows/lint.yml @@ -0,0 +1,23 @@ +name: Lint + +on: + push: + pull_request: + +jobs: + lint: + name: Run on Ubuntu + runs-on: ubuntu-latest + steps: + - name: Clone the code + uses: actions/checkout@v4 + + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version-file: go.mod + + - name: Run linter + uses: golangci/golangci-lint-action@v8 + with: + version: v2.5.0 diff --git a/deploy/k8s-provisioner/.github/workflows/test-e2e.yml b/deploy/k8s-provisioner/.github/workflows/test-e2e.yml new file mode 100644 index 0000000..4cdfb30 --- /dev/null +++ b/deploy/k8s-provisioner/.github/workflows/test-e2e.yml @@ -0,0 +1,32 @@ +name: E2E Tests + +on: + push: + pull_request: + +jobs: + test-e2e: + name: Run on Ubuntu + runs-on: ubuntu-latest + steps: + - name: Clone the code + uses: actions/checkout@v4 + + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version-file: go.mod + + - name: Install the latest version of kind + run: | + curl -Lo ./kind https://kind.sigs.k8s.io/dl/latest/kind-linux-$(go env GOARCH) + chmod +x ./kind + sudo mv ./kind /usr/local/bin/kind + + - name: Verify kind installation + run: kind version + + - name: Running Test e2e + run: | + go mod tidy + make test-e2e diff --git a/deploy/k8s-provisioner/.github/workflows/test.yml b/deploy/k8s-provisioner/.github/workflows/test.yml new file mode 100644 index 0000000..fc2e80d --- /dev/null +++ b/deploy/k8s-provisioner/.github/workflows/test.yml @@ -0,0 +1,23 @@ +name: Tests + +on: + push: + pull_request: + +jobs: + test: + name: Run on Ubuntu + runs-on: ubuntu-latest + steps: + - name: Clone the code + uses: actions/checkout@v4 + + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version-file: go.mod + + - name: Running Tests + run: | + go mod tidy + make test diff --git a/deploy/k8s-provisioner/.gitignore b/deploy/k8s-provisioner/.gitignore new file mode 100644 index 0000000..9f0f3a1 --- /dev/null +++ b/deploy/k8s-provisioner/.gitignore @@ -0,0 +1,30 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +bin/* +Dockerfile.cross + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Go workspace file +go.work + +# Kubernetes Generated files - skip generated files, except for vendored files +!vendor/**/zz_generated.* + +# editor and IDE paraphernalia +.idea +.vscode +*.swp +*.swo +*~ + +# Kubeconfig might contain secrets +*.kubeconfig diff --git a/deploy/k8s-provisioner/.golangci.yml b/deploy/k8s-provisioner/.golangci.yml new file mode 100644 index 0000000..e5b21b0 --- /dev/null +++ b/deploy/k8s-provisioner/.golangci.yml @@ -0,0 +1,52 @@ +version: "2" +run: + allow-parallel-runners: true +linters: + default: none + enable: + - copyloopvar + - dupl + - errcheck + - ginkgolinter + - goconst + - gocyclo + - govet + - ineffassign + - lll + - misspell + - nakedret + - prealloc + - revive + - staticcheck + - unconvert + - unparam + - unused + settings: + revive: + rules: + - name: comment-spacings + - name: import-shadowing + exclusions: + generated: lax + rules: + - linters: + - lll + path: api/* + - linters: + - dupl + - lll + path: internal/* + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gofmt + - goimports + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/deploy/k8s-provisioner/Dockerfile b/deploy/k8s-provisioner/Dockerfile new file mode 100644 index 0000000..6466c48 --- /dev/null +++ b/deploy/k8s-provisioner/Dockerfile @@ -0,0 +1,31 @@ +# Build the manager binary +FROM golang:1.24 AS builder +ARG TARGETOS +ARG TARGETARCH + +WORKDIR /workspace +# Copy the Go Modules manifests +COPY go.mod go.mod +COPY go.sum go.sum +# cache deps before building and copying source so that we don't need to re-download as much +# and so that source changes don't invalidate our downloaded layer +RUN go mod download + +# Copy the Go source (relies on .dockerignore to filter) +COPY . . + +# Build +# the GOARCH has no default value to allow the binary to be built according to the host where the command +# was called. For example, if we call make docker-build in a local env which has the Apple Silicon M1 SO +# the docker BUILDPLATFORM arg will be linux/arm64 when for Apple x86 it will be linux/amd64. Therefore, +# by leaving it empty we can ensure that the container and binary shipped on it will have the same platform. +RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o manager cmd/main.go + +# Use distroless as minimal base image to package the manager binary +# Refer to https://github.com/GoogleContainerTools/distroless for more details +FROM gcr.io/distroless/static:nonroot +WORKDIR / +COPY --from=builder /workspace/manager . +USER 65532:65532 + +ENTRYPOINT ["/manager"] diff --git a/deploy/k8s-provisioner/Makefile b/deploy/k8s-provisioner/Makefile new file mode 100644 index 0000000..ec532ca --- /dev/null +++ b/deploy/k8s-provisioner/Makefile @@ -0,0 +1,250 @@ +# Image URL to use all building/pushing image targets +IMG ?= controller:latest + +# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) +ifeq (,$(shell go env GOBIN)) +GOBIN=$(shell go env GOPATH)/bin +else +GOBIN=$(shell go env GOBIN) +endif + +# CONTAINER_TOOL defines the container tool to be used for building images. +# Be aware that the target commands are only tested with Docker which is +# scaffolded by default. However, you might want to replace it to use other +# tools. (i.e. podman) +CONTAINER_TOOL ?= docker + +# Setting SHELL to bash allows bash commands to be executed by recipes. +# Options are set to exit when a recipe line exits non-zero or a piped command fails. +SHELL = /usr/bin/env bash -o pipefail +.SHELLFLAGS = -ec + +.PHONY: all +all: build + +##@ General + +# The help target prints out all targets with their descriptions organized +# beneath their categories. The categories are represented by '##@' and the +# target descriptions by '##'. The awk command is responsible for reading the +# entire set of makefiles included in this invocation, looking for lines of the +# file as xyz: ## something, and then pretty-format the target and help. Then, +# if there's a line with ##@ something, that gets pretty-printed as a category. +# More info on the usage of ANSI control characters for terminal formatting: +# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters +# More info on the awk command: +# http://linuxcommand.org/lc3_adv_awk.php + +.PHONY: help +help: ## Display this help. + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + +##@ Development + +.PHONY: manifests +manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. + "$(CONTROLLER_GEN)" rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases + +.PHONY: generate +generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. + "$(CONTROLLER_GEN)" object:headerFile="hack/boilerplate.go.txt" paths="./..." + +.PHONY: fmt +fmt: ## Run go fmt against code. + go fmt ./... + +.PHONY: vet +vet: ## Run go vet against code. + go vet ./... + +.PHONY: test +test: manifests generate fmt vet setup-envtest ## Run tests. + KUBEBUILDER_ASSETS="$(shell "$(ENVTEST)" use $(ENVTEST_K8S_VERSION) --bin-dir "$(LOCALBIN)" -p path)" go test $$(go list ./... | grep -v /e2e) -coverprofile cover.out + +# TODO(user): To use a different vendor for e2e tests, modify the setup under 'tests/e2e'. +# The default setup assumes Kind is pre-installed and builds/loads the Manager Docker image locally. +# CertManager is installed by default; skip with: +# - CERT_MANAGER_INSTALL_SKIP=true +KIND_CLUSTER ?= k8s-provisioner-test-e2e + +.PHONY: setup-test-e2e +setup-test-e2e: ## Set up a Kind cluster for e2e tests if it does not exist + @command -v $(KIND) >/dev/null 2>&1 || { \ + echo "Kind is not installed. Please install Kind manually."; \ + exit 1; \ + } + @case "$$($(KIND) get clusters)" in \ + *"$(KIND_CLUSTER)"*) \ + echo "Kind cluster '$(KIND_CLUSTER)' already exists. Skipping creation." ;; \ + *) \ + echo "Creating Kind cluster '$(KIND_CLUSTER)'..."; \ + $(KIND) create cluster --name $(KIND_CLUSTER) ;; \ + esac + +.PHONY: test-e2e +test-e2e: setup-test-e2e manifests generate fmt vet ## Run the e2e tests. Expected an isolated environment using Kind. + KIND=$(KIND) KIND_CLUSTER=$(KIND_CLUSTER) go test -tags=e2e ./test/e2e/ -v -ginkgo.v + $(MAKE) cleanup-test-e2e + +.PHONY: cleanup-test-e2e +cleanup-test-e2e: ## Tear down the Kind cluster used for e2e tests + @$(KIND) delete cluster --name $(KIND_CLUSTER) + +.PHONY: lint +lint: golangci-lint ## Run golangci-lint linter + "$(GOLANGCI_LINT)" run + +.PHONY: lint-fix +lint-fix: golangci-lint ## Run golangci-lint linter and perform fixes + "$(GOLANGCI_LINT)" run --fix + +.PHONY: lint-config +lint-config: golangci-lint ## Verify golangci-lint linter configuration + "$(GOLANGCI_LINT)" config verify + +##@ Build + +.PHONY: build +build: manifests generate fmt vet ## Build manager binary. + go build -o bin/manager cmd/main.go + +.PHONY: run +run: manifests generate fmt vet ## Run a controller from your host. + go run ./cmd/main.go + +# If you wish to build the manager image targeting other platforms you can use the --platform flag. +# (i.e. docker build --platform linux/arm64). However, you must enable docker buildKit for it. +# More info: https://docs.docker.com/develop/develop-images/build_enhancements/ +.PHONY: docker-build +docker-build: ## Build docker image with the manager. + $(CONTAINER_TOOL) build -t ${IMG} . + +.PHONY: docker-push +docker-push: ## Push docker image with the manager. + $(CONTAINER_TOOL) push ${IMG} + +# PLATFORMS defines the target platforms for the manager image be built to provide support to multiple +# architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to: +# - be able to use docker buildx. More info: https://docs.docker.com/build/buildx/ +# - have enabled BuildKit. More info: https://docs.docker.com/develop/develop-images/build_enhancements/ +# - be able to push the image to your registry (i.e. if you do not set a valid value via IMG=> then the export will fail) +# To adequately provide solutions that are compatible with multiple platforms, you should consider using this option. +PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x,linux/ppc64le +.PHONY: docker-buildx +docker-buildx: ## Build and push docker image for the manager for cross-platform support + # copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile + sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross + - $(CONTAINER_TOOL) buildx create --name k8s-provisioner-builder + $(CONTAINER_TOOL) buildx use k8s-provisioner-builder + - $(CONTAINER_TOOL) buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile.cross . + - $(CONTAINER_TOOL) buildx rm k8s-provisioner-builder + rm Dockerfile.cross + +.PHONY: build-installer +build-installer: manifests generate kustomize ## Generate a consolidated YAML with CRDs and deployment. + mkdir -p dist + cd config/manager && "$(KUSTOMIZE)" edit set image controller=${IMG} + "$(KUSTOMIZE)" build config/default > dist/install.yaml + +##@ Deployment + +ifndef ignore-not-found + ignore-not-found = false +endif + +.PHONY: install +install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config. + @out="$$( "$(KUSTOMIZE)" build config/crd 2>/dev/null || true )"; \ + if [ -n "$$out" ]; then echo "$$out" | "$(KUBECTL)" apply -f -; else echo "No CRDs to install; skipping."; fi + +.PHONY: uninstall +uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. + @out="$$( "$(KUSTOMIZE)" build config/crd 2>/dev/null || true )"; \ + if [ -n "$$out" ]; then echo "$$out" | "$(KUBECTL)" delete --ignore-not-found=$(ignore-not-found) -f -; else echo "No CRDs to delete; skipping."; fi + +.PHONY: deploy +deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. + cd config/manager && "$(KUSTOMIZE)" edit set image controller=${IMG} + "$(KUSTOMIZE)" build config/default | "$(KUBECTL)" apply -f - + +.PHONY: undeploy +undeploy: kustomize ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. + "$(KUSTOMIZE)" build config/default | "$(KUBECTL)" delete --ignore-not-found=$(ignore-not-found) -f - + +##@ Dependencies + +## Location to install dependencies to +LOCALBIN ?= $(shell pwd)/bin +$(LOCALBIN): + mkdir -p "$(LOCALBIN)" + +## Tool Binaries +KUBECTL ?= kubectl +KIND ?= kind +KUSTOMIZE ?= $(LOCALBIN)/kustomize +CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen +ENVTEST ?= $(LOCALBIN)/setup-envtest +GOLANGCI_LINT = $(LOCALBIN)/golangci-lint + +## Tool Versions +KUSTOMIZE_VERSION ?= v5.7.1 +CONTROLLER_TOOLS_VERSION ?= v0.19.0 + +#ENVTEST_VERSION is the version of controller-runtime release branch to fetch the envtest setup script (i.e. release-0.20) +ENVTEST_VERSION ?= $(shell v='$(call gomodver,sigs.k8s.io/controller-runtime)'; \ + [ -n "$$v" ] || { echo "Set ENVTEST_VERSION manually (controller-runtime replace has no tag)" >&2; exit 1; }; \ + printf '%s\n' "$$v" | sed -E 's/^v?([0-9]+)\.([0-9]+).*/release-\1.\2/') + +#ENVTEST_K8S_VERSION is the version of Kubernetes to use for setting up ENVTEST binaries (i.e. 1.31) +ENVTEST_K8S_VERSION ?= $(shell v='$(call gomodver,k8s.io/api)'; \ + [ -n "$$v" ] || { echo "Set ENVTEST_K8S_VERSION manually (k8s.io/api replace has no tag)" >&2; exit 1; }; \ + printf '%s\n' "$$v" | sed -E 's/^v?[0-9]+\.([0-9]+).*/1.\1/') + +GOLANGCI_LINT_VERSION ?= v2.5.0 +.PHONY: kustomize +kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. +$(KUSTOMIZE): $(LOCALBIN) + $(call go-install-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v5,$(KUSTOMIZE_VERSION)) + +.PHONY: controller-gen +controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary. +$(CONTROLLER_GEN): $(LOCALBIN) + $(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen,$(CONTROLLER_TOOLS_VERSION)) + +.PHONY: setup-envtest +setup-envtest: envtest ## Download the binaries required for ENVTEST in the local bin directory. + @echo "Setting up envtest binaries for Kubernetes version $(ENVTEST_K8S_VERSION)..." + @"$(ENVTEST)" use $(ENVTEST_K8S_VERSION) --bin-dir "$(LOCALBIN)" -p path || { \ + echo "Error: Failed to set up envtest binaries for version $(ENVTEST_K8S_VERSION)."; \ + exit 1; \ + } + +.PHONY: envtest +envtest: $(ENVTEST) ## Download setup-envtest locally if necessary. +$(ENVTEST): $(LOCALBIN) + $(call go-install-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest,$(ENVTEST_VERSION)) + +.PHONY: golangci-lint +golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary. +$(GOLANGCI_LINT): $(LOCALBIN) + $(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/v2/cmd/golangci-lint,$(GOLANGCI_LINT_VERSION)) + +# go-install-tool will 'go install' any package with custom target and name of binary, if it doesn't exist +# $1 - target path with name of binary +# $2 - package url which can be installed +# $3 - specific version of package +define go-install-tool +@[ -f "$(1)-$(3)" ] && [ "$$(readlink -- "$(1)" 2>/dev/null)" = "$(1)-$(3)" ] || { \ +set -e; \ +package=$(2)@$(3) ;\ +echo "Downloading $${package}" ;\ +rm -f "$(1)" ;\ +GOBIN="$(LOCALBIN)" go install $${package} ;\ +mv "$(LOCALBIN)/$$(basename "$(1)")" "$(1)-$(3)" ;\ +} ;\ +ln -sf "$$(realpath "$(1)-$(3)")" "$(1)" +endef + +define gomodver +$(shell go list -m -f '{{if .Replace}}{{.Replace.Version}}{{else}}{{.Version}}{{end}}' $(1) 2>/dev/null) +endef diff --git a/deploy/k8s-provisioner/PROJECT b/deploy/k8s-provisioner/PROJECT new file mode 100644 index 0000000..9bc8651 --- /dev/null +++ b/deploy/k8s-provisioner/PROJECT @@ -0,0 +1,29 @@ +# Code generated by tool. DO NOT EDIT. +# This file is used to track the info used to scaffold your project +# and allow the plugins properly work. +# More info: https://book.kubebuilder.io/reference/project-config.html +cliVersion: 4.10.1 +domain: appstack.io +layout: +- go.kubebuilder.io/v4 +projectName: k8s-provisioner +repo: vanderlande.com/appstack/k8s-provisioner +resources: +- api: + crdVersion: v1 + namespaced: true + domain: appstack.io + group: k8sprovisioner + kind: Infra + path: vanderlande.com/appstack/k8s-provisioner/api/v1alpha1 + version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: appstack.io + group: k8sprovisioner + kind: Cluster + path: vanderlande.com/appstack/k8s-provisioner/api/v1alpha1 + version: v1alpha1 +version: "3" diff --git a/deploy/k8s-provisioner/README.md b/deploy/k8s-provisioner/README.md new file mode 100644 index 0000000..1b242af --- /dev/null +++ b/deploy/k8s-provisioner/README.md @@ -0,0 +1,135 @@ +# k8s-provisioner +// TODO(user): Add simple overview of use/purpose + +## Description +// TODO(user): An in-depth paragraph about your project and overview of use + +## Getting Started + +### Prerequisites +- go version v1.24.6+ +- docker version 17.03+. +- kubectl version v1.11.3+. +- Access to a Kubernetes v1.11.3+ cluster. + +### To Deploy on the cluster +**Build and push your image to the location specified by `IMG`:** + +```sh +make docker-build docker-push IMG=/k8s-provisioner:tag +``` + +**NOTE:** This image ought to be published in the personal registry you specified. +And it is required to have access to pull the image from the working environment. +Make sure you have the proper permission to the registry if the above commands don’t work. + +**Install the CRDs into the cluster:** + +```sh +make install +``` + +**Deploy the Manager to the cluster with the image specified by `IMG`:** + +```sh +make deploy IMG=/k8s-provisioner:tag +``` + +> **NOTE**: If you encounter RBAC errors, you may need to grant yourself cluster-admin +privileges or be logged in as admin. + +**Create instances of your solution** +You can apply the samples (examples) from the config/sample: + +```sh +kubectl apply -k config/samples/ +``` + +>**NOTE**: Ensure that the samples has default values to test it out. + +### To Uninstall +**Delete the instances (CRs) from the cluster:** + +```sh +kubectl delete -k config/samples/ +``` + +**Delete the APIs(CRDs) from the cluster:** + +```sh +make uninstall +``` + +**UnDeploy the controller from the cluster:** + +```sh +make undeploy +``` + +## Project Distribution + +Following the options to release and provide this solution to the users. + +### By providing a bundle with all YAML files + +1. Build the installer for the image built and published in the registry: + +```sh +make build-installer IMG=/k8s-provisioner:tag +``` + +**NOTE:** The makefile target mentioned above generates an 'install.yaml' +file in the dist directory. This file contains all the resources built +with Kustomize, which are necessary to install this project without its +dependencies. + +2. Using the installer + +Users can just run 'kubectl apply -f ' to install +the project, i.e.: + +```sh +kubectl apply -f https://raw.githubusercontent.com//k8s-provisioner//dist/install.yaml +``` + +### By providing a Helm Chart + +1. Build the chart using the optional helm plugin + +```sh +kubebuilder edit --plugins=helm/v2-alpha +``` + +2. See that a chart was generated under 'dist/chart', and users +can obtain this solution from there. + +**NOTE:** If you change the project, you need to update the Helm Chart +using the same command above to sync the latest changes. Furthermore, +if you create webhooks, you need to use the above command with +the '--force' flag and manually ensure that any custom configuration +previously added to 'dist/chart/values.yaml' or 'dist/chart/manager/manager.yaml' +is manually re-applied afterwards. + +## Contributing +// TODO(user): Add detailed information on how you would like others to contribute to this project + +**NOTE:** Run `make help` for more information on all potential `make` targets + +More information can be found via the [Kubebuilder Documentation](https://book.kubebuilder.io/introduction.html) + +## License + +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + diff --git a/deploy/k8s-provisioner/api/v1alpha1/cluster_types.go b/deploy/k8s-provisioner/api/v1alpha1/cluster_types.go new file mode 100644 index 0000000..fe63d0e --- /dev/null +++ b/deploy/k8s-provisioner/api/v1alpha1/cluster_types.go @@ -0,0 +1,56 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type ClusterSpec struct { + InfraRef string `json:"infraRef"` + KubernetesVersion string `json:"kubernetesVersion"` + ControlPlaneHA bool `json:"controlPlaneHA"` + WorkerPools []WorkerPoolRequest `json:"workerPools"` +} + +type WorkerPoolRequest struct { + Name string `json:"name"` + Quantity int `json:"quantity"` + CpuCores int `json:"cpuCores"` + MemoryGB int `json:"memoryGb"` + DiskGB int `json:"diskGb"` +} + +// [NEW] Struct to track the Harvester Identity +type HarvesterAccountStatus struct { + // The ServiceAccount created on Harvester (e.g. "prov-test-cluster-01") + ServiceAccountName string `json:"serviceAccountName,omitempty"` + // The Secret created in this namespace (e.g. "harvesterconfig-test-cluster-01") + SecretRef string `json:"secretRef,omitempty"` + // Expiry for future rotation logic + TokenExpiresAt *metav1.Time `json:"tokenExpiresAt,omitempty"` +} + +type ClusterStatus struct { + Ready bool `json:"ready"` + // +optional + GeneratedAccount *HarvesterAccountStatus `json:"generatedAccount,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +type Cluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec ClusterSpec `json:"spec,omitempty"` + Status ClusterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true +type ClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Cluster `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Cluster{}, &ClusterList{}) +} diff --git a/deploy/k8s-provisioner/api/v1alpha1/groupversion_info.go b/deploy/k8s-provisioner/api/v1alpha1/groupversion_info.go new file mode 100644 index 0000000..c687da4 --- /dev/null +++ b/deploy/k8s-provisioner/api/v1alpha1/groupversion_info.go @@ -0,0 +1,36 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha1 contains API Schema definitions for the k8sprovisioner v1alpha1 API group. +// +kubebuilder:object:generate=true +// +groupName=k8sprovisioner.appstack.io +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects. + GroupVersion = schema.GroupVersion{Group: "k8sprovisioner.appstack.io", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/deploy/k8s-provisioner/api/v1alpha1/infra_types.go b/deploy/k8s-provisioner/api/v1alpha1/infra_types.go new file mode 100644 index 0000000..c40f37c --- /dev/null +++ b/deploy/k8s-provisioner/api/v1alpha1/infra_types.go @@ -0,0 +1,50 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type InfraSpec struct { + // 1. Rancher/Cloud Settings + // The "Master" credential name in cattle-global-data + // +required + CloudCredentialSecret string `json:"cloudCredentialSecret"` + RancherURL string `json:"rancherUrl"` + // This removes the need for auto-discovery. + HarvesterURL string `json:"harvesterUrl"` + // 2. Environment Defaults + VmNamespace string `json:"vmNamespace"` + ImageName string `json:"imageName"` + NetworkName string `json:"networkName"` + SshUser string `json:"sshUser"` + + // 3. Governance Configs + // +kubebuilder:validation:Optional + RKE2ConfigYAML string `json:"rke2ConfigYaml"` + // +kubebuilder:validation:Optional + UserData string `json:"userData"` +} + +type InfraStatus struct { + Ready bool `json:"ready"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +type Infra struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec InfraSpec `json:"spec,omitempty"` + Status InfraStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true +type InfraList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Infra `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Infra{}, &InfraList{}) +} diff --git a/deploy/k8s-provisioner/api/v1alpha1/zz_generated.deepcopy.go b/deploy/k8s-provisioner/api/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..a5ea856 --- /dev/null +++ b/deploy/k8s-provisioner/api/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,247 @@ +//go:build !ignore_autogenerated + +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster) DeepCopyInto(out *Cluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. +func (in *Cluster) DeepCopy() *Cluster { + if in == nil { + return nil + } + out := new(Cluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Cluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterList) DeepCopyInto(out *ClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Cluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterList. +func (in *ClusterList) DeepCopy() *ClusterList { + if in == nil { + return nil + } + out := new(ClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { + *out = *in + if in.WorkerPools != nil { + in, out := &in.WorkerPools, &out.WorkerPools + *out = make([]WorkerPoolRequest, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec. +func (in *ClusterSpec) DeepCopy() *ClusterSpec { + if in == nil { + return nil + } + out := new(ClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { + *out = *in + if in.GeneratedAccount != nil { + in, out := &in.GeneratedAccount, &out.GeneratedAccount + *out = new(HarvesterAccountStatus) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus. +func (in *ClusterStatus) DeepCopy() *ClusterStatus { + if in == nil { + return nil + } + out := new(ClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HarvesterAccountStatus) DeepCopyInto(out *HarvesterAccountStatus) { + *out = *in + if in.TokenExpiresAt != nil { + in, out := &in.TokenExpiresAt, &out.TokenExpiresAt + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HarvesterAccountStatus. +func (in *HarvesterAccountStatus) DeepCopy() *HarvesterAccountStatus { + if in == nil { + return nil + } + out := new(HarvesterAccountStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Infra) DeepCopyInto(out *Infra) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Infra. +func (in *Infra) DeepCopy() *Infra { + if in == nil { + return nil + } + out := new(Infra) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Infra) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InfraList) DeepCopyInto(out *InfraList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Infra, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfraList. +func (in *InfraList) DeepCopy() *InfraList { + if in == nil { + return nil + } + out := new(InfraList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InfraList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InfraSpec) DeepCopyInto(out *InfraSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfraSpec. +func (in *InfraSpec) DeepCopy() *InfraSpec { + if in == nil { + return nil + } + out := new(InfraSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InfraStatus) DeepCopyInto(out *InfraStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfraStatus. +func (in *InfraStatus) DeepCopy() *InfraStatus { + if in == nil { + return nil + } + out := new(InfraStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerPoolRequest) DeepCopyInto(out *WorkerPoolRequest) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerPoolRequest. +func (in *WorkerPoolRequest) DeepCopy() *WorkerPoolRequest { + if in == nil { + return nil + } + out := new(WorkerPoolRequest) + in.DeepCopyInto(out) + return out +} diff --git a/deploy/k8s-provisioner/cmd/main.go b/deploy/k8s-provisioner/cmd/main.go new file mode 100644 index 0000000..e15c17a --- /dev/null +++ b/deploy/k8s-provisioner/cmd/main.go @@ -0,0 +1,204 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "crypto/tls" + "flag" + "os" + + // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) + // to ensure that exec-entrypoint and run can make use of them. + _ "k8s.io/client-go/plugin/pkg/client/auth" + + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/healthz" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/metrics/filters" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + "sigs.k8s.io/controller-runtime/pkg/webhook" + + k8sprovisionerv1alpha1 "vanderlande.com/appstack/k8s-provisioner/api/v1alpha1" + "vanderlande.com/appstack/k8s-provisioner/internal/controller" + // +kubebuilder:scaffold:imports +) + +var ( + scheme = runtime.NewScheme() + setupLog = ctrl.Log.WithName("setup") +) + +func init() { + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + + utilruntime.Must(k8sprovisionerv1alpha1.AddToScheme(scheme)) + // +kubebuilder:scaffold:scheme +} + +// nolint:gocyclo +func main() { + var metricsAddr string + var metricsCertPath, metricsCertName, metricsCertKey string + var webhookCertPath, webhookCertName, webhookCertKey string + var enableLeaderElection bool + var probeAddr string + var secureMetrics bool + var enableHTTP2 bool + var tlsOpts []func(*tls.Config) + flag.StringVar(&metricsAddr, "metrics-bind-address", "0", "The address the metrics endpoint binds to. "+ + "Use :8443 for HTTPS or :8080 for HTTP, or leave as 0 to disable the metrics service.") + flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") + flag.BoolVar(&enableLeaderElection, "leader-elect", false, + "Enable leader election for controller manager. "+ + "Enabling this will ensure there is only one active controller manager.") + flag.BoolVar(&secureMetrics, "metrics-secure", true, + "If set, the metrics endpoint is served securely via HTTPS. Use --metrics-secure=false to use HTTP instead.") + flag.StringVar(&webhookCertPath, "webhook-cert-path", "", "The directory that contains the webhook certificate.") + flag.StringVar(&webhookCertName, "webhook-cert-name", "tls.crt", "The name of the webhook certificate file.") + flag.StringVar(&webhookCertKey, "webhook-cert-key", "tls.key", "The name of the webhook key file.") + flag.StringVar(&metricsCertPath, "metrics-cert-path", "", + "The directory that contains the metrics server certificate.") + flag.StringVar(&metricsCertName, "metrics-cert-name", "tls.crt", "The name of the metrics server certificate file.") + flag.StringVar(&metricsCertKey, "metrics-cert-key", "tls.key", "The name of the metrics server key file.") + flag.BoolVar(&enableHTTP2, "enable-http2", false, + "If set, HTTP/2 will be enabled for the metrics and webhook servers") + opts := zap.Options{ + Development: true, + } + opts.BindFlags(flag.CommandLine) + flag.Parse() + + ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) + + // if the enable-http2 flag is false (the default), http/2 should be disabled + // due to its vulnerabilities. More specifically, disabling http/2 will + // prevent from being vulnerable to the HTTP/2 Stream Cancellation and + // Rapid Reset CVEs. For more information see: + // - https://github.com/advisories/GHSA-qppj-fm5r-hxr3 + // - https://github.com/advisories/GHSA-4374-p667-p6c8 + disableHTTP2 := func(c *tls.Config) { + setupLog.Info("disabling http/2") + c.NextProtos = []string{"http/1.1"} + } + + if !enableHTTP2 { + tlsOpts = append(tlsOpts, disableHTTP2) + } + + // Initial webhook TLS options + webhookTLSOpts := tlsOpts + webhookServerOptions := webhook.Options{ + TLSOpts: webhookTLSOpts, + } + + if len(webhookCertPath) > 0 { + setupLog.Info("Initializing webhook certificate watcher using provided certificates", + "webhook-cert-path", webhookCertPath, "webhook-cert-name", webhookCertName, "webhook-cert-key", webhookCertKey) + + webhookServerOptions.CertDir = webhookCertPath + webhookServerOptions.CertName = webhookCertName + webhookServerOptions.KeyName = webhookCertKey + } + + webhookServer := webhook.NewServer(webhookServerOptions) + + // Metrics endpoint is enabled in 'config/default/kustomization.yaml'. The Metrics options configure the server. + // More info: + // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.22.4/pkg/metrics/server + // - https://book.kubebuilder.io/reference/metrics.html + metricsServerOptions := metricsserver.Options{ + BindAddress: metricsAddr, + SecureServing: secureMetrics, + TLSOpts: tlsOpts, + } + + if secureMetrics { + // FilterProvider is used to protect the metrics endpoint with authn/authz. + // These configurations ensure that only authorized users and service accounts + // can access the metrics endpoint. The RBAC are configured in 'config/rbac/kustomization.yaml'. More info: + // https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.22.4/pkg/metrics/filters#WithAuthenticationAndAuthorization + metricsServerOptions.FilterProvider = filters.WithAuthenticationAndAuthorization + } + + // If the certificate is not specified, controller-runtime will automatically + // generate self-signed certificates for the metrics server. While convenient for development and testing, + // this setup is not recommended for production. + // + // TODO(user): If you enable certManager, uncomment the following lines: + // - [METRICS-WITH-CERTS] at config/default/kustomization.yaml to generate and use certificates + // managed by cert-manager for the metrics server. + // - [PROMETHEUS-WITH-CERTS] at config/prometheus/kustomization.yaml for TLS certification. + if len(metricsCertPath) > 0 { + setupLog.Info("Initializing metrics certificate watcher using provided certificates", + "metrics-cert-path", metricsCertPath, "metrics-cert-name", metricsCertName, "metrics-cert-key", metricsCertKey) + + metricsServerOptions.CertDir = metricsCertPath + metricsServerOptions.CertName = metricsCertName + metricsServerOptions.KeyName = metricsCertKey + } + + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ + Scheme: scheme, + Metrics: metricsServerOptions, + WebhookServer: webhookServer, + HealthProbeBindAddress: probeAddr, + LeaderElection: enableLeaderElection, + LeaderElectionID: "8a5a6d0a.appstack.io", + // LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily + // when the Manager ends. This requires the binary to immediately end when the + // Manager is stopped, otherwise, this setting is unsafe. Setting this significantly + // speeds up voluntary leader transitions as the new leader don't have to wait + // LeaseDuration time first. + // + // In the default scaffold provided, the program ends immediately after + // the manager stops, so would be fine to enable this option. However, + // if you are doing or is intended to do any operation such as perform cleanups + // after the manager stops then its usage might be unsafe. + // LeaderElectionReleaseOnCancel: true, + }) + if err != nil { + setupLog.Error(err, "unable to start manager") + os.Exit(1) + } + + if err := (&controller.ClusterReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "Cluster") + os.Exit(1) + } + // +kubebuilder:scaffold:builder + + if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up health check") + os.Exit(1) + } + if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up ready check") + os.Exit(1) + } + + setupLog.Info("starting manager") + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + setupLog.Error(err, "problem running manager") + os.Exit(1) + } +} diff --git a/deploy/k8s-provisioner/config/crd/bases/k8sprovisioner.appstack.io_clusters.yaml b/deploy/k8s-provisioner/config/crd/bases/k8sprovisioner.appstack.io_clusters.yaml new file mode 100644 index 0000000..376feda --- /dev/null +++ b/deploy/k8s-provisioner/config/crd/bases/k8sprovisioner.appstack.io_clusters.yaml @@ -0,0 +1,98 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.19.0 + name: clusters.k8sprovisioner.appstack.io +spec: + group: k8sprovisioner.appstack.io + names: + kind: Cluster + listKind: ClusterList + plural: clusters + singular: cluster + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + properties: + controlPlaneHA: + type: boolean + infraRef: + type: string + kubernetesVersion: + type: string + workerPools: + items: + properties: + cpuCores: + type: integer + diskGb: + type: integer + memoryGb: + type: integer + name: + type: string + quantity: + type: integer + required: + - cpuCores + - diskGb + - memoryGb + - name + - quantity + type: object + type: array + required: + - controlPlaneHA + - infraRef + - kubernetesVersion + - workerPools + type: object + status: + properties: + generatedAccount: + description: '[NEW] Struct to track the Harvester Identity' + properties: + secretRef: + description: The Secret created in this namespace (e.g. "harvesterconfig-test-cluster-01") + type: string + serviceAccountName: + description: The ServiceAccount created on Harvester (e.g. "prov-test-cluster-01") + type: string + tokenExpiresAt: + description: Expiry for future rotation logic + format: date-time + type: string + type: object + ready: + type: boolean + required: + - ready + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/deploy/k8s-provisioner/config/crd/bases/k8sprovisioner.appstack.io_infras.yaml b/deploy/k8s-provisioner/config/crd/bases/k8sprovisioner.appstack.io_infras.yaml new file mode 100644 index 0000000..2812b73 --- /dev/null +++ b/deploy/k8s-provisioner/config/crd/bases/k8sprovisioner.appstack.io_infras.yaml @@ -0,0 +1,84 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.19.0 + name: infras.k8sprovisioner.appstack.io +spec: + group: k8sprovisioner.appstack.io + names: + kind: Infra + listKind: InfraList + plural: infras + singular: infra + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + properties: + cloudCredentialSecret: + description: |- + 1. Rancher/Cloud Settings + The "Master" credential name in cattle-global-data + type: string + harvesterUrl: + description: This removes the need for auto-discovery. + type: string + imageName: + type: string + networkName: + type: string + rancherUrl: + type: string + rke2ConfigYaml: + description: 3. Governance Configs + type: string + sshUser: + type: string + userData: + type: string + vmNamespace: + description: 2. Environment Defaults + type: string + required: + - cloudCredentialSecret + - harvesterUrl + - imageName + - networkName + - rancherUrl + - sshUser + - vmNamespace + type: object + status: + properties: + ready: + type: boolean + required: + - ready + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/deploy/k8s-provisioner/config/crd/kustomization.yaml b/deploy/k8s-provisioner/config/crd/kustomization.yaml new file mode 100644 index 0000000..c682f6d --- /dev/null +++ b/deploy/k8s-provisioner/config/crd/kustomization.yaml @@ -0,0 +1,17 @@ +# This kustomization.yaml is not intended to be run by itself, +# since it depends on service name and namespace that are out of this kustomize package. +# It should be run by config/default +resources: +- bases/k8sprovisioner.appstack.io_infras.yaml +- bases/k8sprovisioner.appstack.io_clusters.yaml +# +kubebuilder:scaffold:crdkustomizeresource + +patches: +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. +# patches here are for enabling the conversion webhook for each CRD +# +kubebuilder:scaffold:crdkustomizewebhookpatch + +# [WEBHOOK] To enable webhook, uncomment the following section +# the following config is for teaching kustomize how to do kustomization for CRDs. +#configurations: +#- kustomizeconfig.yaml diff --git a/deploy/k8s-provisioner/config/crd/kustomizeconfig.yaml b/deploy/k8s-provisioner/config/crd/kustomizeconfig.yaml new file mode 100644 index 0000000..ec5c150 --- /dev/null +++ b/deploy/k8s-provisioner/config/crd/kustomizeconfig.yaml @@ -0,0 +1,19 @@ +# This file is for teaching kustomize how to substitute name and namespace reference in CRD +nameReference: +- kind: Service + version: v1 + fieldSpecs: + - kind: CustomResourceDefinition + version: v1 + group: apiextensions.k8s.io + path: spec/conversion/webhook/clientConfig/service/name + +namespace: +- kind: CustomResourceDefinition + version: v1 + group: apiextensions.k8s.io + path: spec/conversion/webhook/clientConfig/service/namespace + create: false + +varReference: +- path: metadata/annotations diff --git a/deploy/k8s-provisioner/config/default/cert_metrics_manager_patch.yaml b/deploy/k8s-provisioner/config/default/cert_metrics_manager_patch.yaml new file mode 100644 index 0000000..d975015 --- /dev/null +++ b/deploy/k8s-provisioner/config/default/cert_metrics_manager_patch.yaml @@ -0,0 +1,30 @@ +# This patch adds the args, volumes, and ports to allow the manager to use the metrics-server certs. + +# Add the volumeMount for the metrics-server certs +- op: add + path: /spec/template/spec/containers/0/volumeMounts/- + value: + mountPath: /tmp/k8s-metrics-server/metrics-certs + name: metrics-certs + readOnly: true + +# Add the --metrics-cert-path argument for the metrics server +- op: add + path: /spec/template/spec/containers/0/args/- + value: --metrics-cert-path=/tmp/k8s-metrics-server/metrics-certs + +# Add the metrics-server certs volume configuration +- op: add + path: /spec/template/spec/volumes/- + value: + name: metrics-certs + secret: + secretName: metrics-server-cert + optional: false + items: + - key: ca.crt + path: ca.crt + - key: tls.crt + path: tls.crt + - key: tls.key + path: tls.key diff --git a/deploy/k8s-provisioner/config/default/kustomization.yaml b/deploy/k8s-provisioner/config/default/kustomization.yaml new file mode 100644 index 0000000..7c265f7 --- /dev/null +++ b/deploy/k8s-provisioner/config/default/kustomization.yaml @@ -0,0 +1,234 @@ +# Adds namespace to all resources. +namespace: k8s-provisioner-system + +# Value of this field is prepended to the +# names of all resources, e.g. a deployment named +# "wordpress" becomes "alices-wordpress". +# Note that it should also match with the prefix (text before '-') of the namespace +# field above. +namePrefix: k8s-provisioner- + +# Labels to add to all resources and selectors. +#labels: +#- includeSelectors: true +# pairs: +# someName: someValue + +resources: +- ../crd +- ../rbac +- ../manager +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in +# crd/kustomization.yaml +#- ../webhook +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. +#- ../certmanager +# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. +#- ../prometheus +# [METRICS] Expose the controller manager metrics service. +- metrics_service.yaml +# [NETWORK POLICY] Protect the /metrics endpoint and Webhook Server with NetworkPolicy. +# Only Pod(s) running a namespace labeled with 'metrics: enabled' will be able to gather the metrics. +# Only CR(s) which requires webhooks and are applied on namespaces labeled with 'webhooks: enabled' will +# be able to communicate with the Webhook Server. +#- ../network-policy + +# Uncomment the patches line if you enable Metrics +patches: +# [METRICS] The following patch will enable the metrics endpoint using HTTPS and the port :8443. +# More info: https://book.kubebuilder.io/reference/metrics +- path: manager_metrics_patch.yaml + target: + kind: Deployment + +# Uncomment the patches line if you enable Metrics and CertManager +# [METRICS-WITH-CERTS] To enable metrics protected with certManager, uncomment the following line. +# This patch will protect the metrics with certManager self-signed certs. +#- path: cert_metrics_manager_patch.yaml +# target: +# kind: Deployment + +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in +# crd/kustomization.yaml +#- path: manager_webhook_patch.yaml +# target: +# kind: Deployment + +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. +# Uncomment the following replacements to add the cert-manager CA injection annotations +#replacements: +# - source: # Uncomment the following block to enable certificates for metrics +# kind: Service +# version: v1 +# name: controller-manager-metrics-service +# fieldPath: metadata.name +# targets: +# - select: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: metrics-certs +# fieldPaths: +# - spec.dnsNames.0 +# - spec.dnsNames.1 +# options: +# delimiter: '.' +# index: 0 +# create: true +# - select: # Uncomment the following to set the Service name for TLS config in Prometheus ServiceMonitor +# kind: ServiceMonitor +# group: monitoring.coreos.com +# version: v1 +# name: controller-manager-metrics-monitor +# fieldPaths: +# - spec.endpoints.0.tlsConfig.serverName +# options: +# delimiter: '.' +# index: 0 +# create: true + +# - source: +# kind: Service +# version: v1 +# name: controller-manager-metrics-service +# fieldPath: metadata.namespace +# targets: +# - select: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: metrics-certs +# fieldPaths: +# - spec.dnsNames.0 +# - spec.dnsNames.1 +# options: +# delimiter: '.' +# index: 1 +# create: true +# - select: # Uncomment the following to set the Service namespace for TLS in Prometheus ServiceMonitor +# kind: ServiceMonitor +# group: monitoring.coreos.com +# version: v1 +# name: controller-manager-metrics-monitor +# fieldPaths: +# - spec.endpoints.0.tlsConfig.serverName +# options: +# delimiter: '.' +# index: 1 +# create: true + +# - source: # Uncomment the following block if you have any webhook +# kind: Service +# version: v1 +# name: webhook-service +# fieldPath: .metadata.name # Name of the service +# targets: +# - select: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert +# fieldPaths: +# - .spec.dnsNames.0 +# - .spec.dnsNames.1 +# options: +# delimiter: '.' +# index: 0 +# create: true +# - source: +# kind: Service +# version: v1 +# name: webhook-service +# fieldPath: .metadata.namespace # Namespace of the service +# targets: +# - select: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert +# fieldPaths: +# - .spec.dnsNames.0 +# - .spec.dnsNames.1 +# options: +# delimiter: '.' +# index: 1 +# create: true + +# - source: # Uncomment the following block if you have a ValidatingWebhook (--programmatic-validation) +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert # This name should match the one in certificate.yaml +# fieldPath: .metadata.namespace # Namespace of the certificate CR +# targets: +# - select: +# kind: ValidatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 0 +# create: true +# - source: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert +# fieldPath: .metadata.name +# targets: +# - select: +# kind: ValidatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 1 +# create: true + +# - source: # Uncomment the following block if you have a DefaultingWebhook (--defaulting ) +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert +# fieldPath: .metadata.namespace # Namespace of the certificate CR +# targets: +# - select: +# kind: MutatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 0 +# create: true +# - source: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert +# fieldPath: .metadata.name +# targets: +# - select: +# kind: MutatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 1 +# create: true + +# - source: # Uncomment the following block if you have a ConversionWebhook (--conversion) +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert +# fieldPath: .metadata.namespace # Namespace of the certificate CR +# targets: # Do not remove or uncomment the following scaffold marker; required to generate code for target CRD. +# +kubebuilder:scaffold:crdkustomizecainjectionns +# - source: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert +# fieldPath: .metadata.name +# targets: # Do not remove or uncomment the following scaffold marker; required to generate code for target CRD. +# +kubebuilder:scaffold:crdkustomizecainjectionname diff --git a/deploy/k8s-provisioner/config/default/manager_metrics_patch.yaml b/deploy/k8s-provisioner/config/default/manager_metrics_patch.yaml new file mode 100644 index 0000000..2aaef65 --- /dev/null +++ b/deploy/k8s-provisioner/config/default/manager_metrics_patch.yaml @@ -0,0 +1,4 @@ +# This patch adds the args to allow exposing the metrics endpoint using HTTPS +- op: add + path: /spec/template/spec/containers/0/args/0 + value: --metrics-bind-address=:8443 diff --git a/deploy/k8s-provisioner/config/default/metrics_service.yaml b/deploy/k8s-provisioner/config/default/metrics_service.yaml new file mode 100644 index 0000000..ab1b361 --- /dev/null +++ b/deploy/k8s-provisioner/config/default/metrics_service.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: k8s-provisioner + app.kubernetes.io/managed-by: kustomize + name: controller-manager-metrics-service + namespace: system +spec: + ports: + - name: https + port: 8443 + protocol: TCP + targetPort: 8443 + selector: + control-plane: controller-manager + app.kubernetes.io/name: k8s-provisioner diff --git a/deploy/k8s-provisioner/config/manager/kustomization.yaml b/deploy/k8s-provisioner/config/manager/kustomization.yaml new file mode 100644 index 0000000..5c5f0b8 --- /dev/null +++ b/deploy/k8s-provisioner/config/manager/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- manager.yaml diff --git a/deploy/k8s-provisioner/config/manager/manager.yaml b/deploy/k8s-provisioner/config/manager/manager.yaml new file mode 100644 index 0000000..6434f87 --- /dev/null +++ b/deploy/k8s-provisioner/config/manager/manager.yaml @@ -0,0 +1,99 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: k8s-provisioner + app.kubernetes.io/managed-by: kustomize + name: system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system + labels: + control-plane: controller-manager + app.kubernetes.io/name: k8s-provisioner + app.kubernetes.io/managed-by: kustomize +spec: + selector: + matchLabels: + control-plane: controller-manager + app.kubernetes.io/name: k8s-provisioner + replicas: 1 + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: manager + labels: + control-plane: controller-manager + app.kubernetes.io/name: k8s-provisioner + spec: + # TODO(user): Uncomment the following code to configure the nodeAffinity expression + # according to the platforms which are supported by your solution. + # It is considered best practice to support multiple architectures. You can + # build your manager image using the makefile target docker-buildx. + # affinity: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/arch + # operator: In + # values: + # - amd64 + # - arm64 + # - ppc64le + # - s390x + # - key: kubernetes.io/os + # operator: In + # values: + # - linux + securityContext: + # Projects are configured by default to adhere to the "restricted" Pod Security Standards. + # This ensures that deployments meet the highest security requirements for Kubernetes. + # For more details, see: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + containers: + - command: + - /manager + args: + - --leader-elect + - --health-probe-bind-address=:8081 + image: controller:latest + name: manager + ports: [] + securityContext: + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: + - "ALL" + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + # TODO(user): Configure the resources accordingly based on the project requirements. + # More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 10m + memory: 64Mi + volumeMounts: [] + volumes: [] + serviceAccountName: controller-manager + terminationGracePeriodSeconds: 10 diff --git a/deploy/k8s-provisioner/config/network-policy/allow-metrics-traffic.yaml b/deploy/k8s-provisioner/config/network-policy/allow-metrics-traffic.yaml new file mode 100644 index 0000000..727a884 --- /dev/null +++ b/deploy/k8s-provisioner/config/network-policy/allow-metrics-traffic.yaml @@ -0,0 +1,27 @@ +# This NetworkPolicy allows ingress traffic +# with Pods running on namespaces labeled with 'metrics: enabled'. Only Pods on those +# namespaces are able to gather data from the metrics endpoint. +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + labels: + app.kubernetes.io/name: k8s-provisioner + app.kubernetes.io/managed-by: kustomize + name: allow-metrics-traffic + namespace: system +spec: + podSelector: + matchLabels: + control-plane: controller-manager + app.kubernetes.io/name: k8s-provisioner + policyTypes: + - Ingress + ingress: + # This allows ingress traffic from any namespace with the label metrics: enabled + - from: + - namespaceSelector: + matchLabels: + metrics: enabled # Only from namespaces with this label + ports: + - port: 8443 + protocol: TCP diff --git a/deploy/k8s-provisioner/config/network-policy/kustomization.yaml b/deploy/k8s-provisioner/config/network-policy/kustomization.yaml new file mode 100644 index 0000000..ec0fb5e --- /dev/null +++ b/deploy/k8s-provisioner/config/network-policy/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- allow-metrics-traffic.yaml diff --git a/deploy/k8s-provisioner/config/prometheus/kustomization.yaml b/deploy/k8s-provisioner/config/prometheus/kustomization.yaml new file mode 100644 index 0000000..fdc5481 --- /dev/null +++ b/deploy/k8s-provisioner/config/prometheus/kustomization.yaml @@ -0,0 +1,11 @@ +resources: +- monitor.yaml + +# [PROMETHEUS-WITH-CERTS] The following patch configures the ServiceMonitor in ../prometheus +# to securely reference certificates created and managed by cert-manager. +# Additionally, ensure that you uncomment the [METRICS WITH CERTMANAGER] patch under config/default/kustomization.yaml +# to mount the "metrics-server-cert" secret in the Manager Deployment. +#patches: +# - path: monitor_tls_patch.yaml +# target: +# kind: ServiceMonitor diff --git a/deploy/k8s-provisioner/config/prometheus/monitor.yaml b/deploy/k8s-provisioner/config/prometheus/monitor.yaml new file mode 100644 index 0000000..3dc1527 --- /dev/null +++ b/deploy/k8s-provisioner/config/prometheus/monitor.yaml @@ -0,0 +1,27 @@ +# Prometheus Monitor Service (Metrics) +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: k8s-provisioner + app.kubernetes.io/managed-by: kustomize + name: controller-manager-metrics-monitor + namespace: system +spec: + endpoints: + - path: /metrics + port: https # Ensure this is the name of the port that exposes HTTPS metrics + scheme: https + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + tlsConfig: + # TODO(user): The option insecureSkipVerify: true is not recommended for production since it disables + # certificate verification, exposing the system to potential man-in-the-middle attacks. + # For production environments, it is recommended to use cert-manager for automatic TLS certificate management. + # To apply this configuration, enable cert-manager and use the patch located at config/prometheus/servicemonitor_tls_patch.yaml, + # which securely references the certificate from the 'metrics-server-cert' secret. + insecureSkipVerify: true + selector: + matchLabels: + control-plane: controller-manager + app.kubernetes.io/name: k8s-provisioner diff --git a/deploy/k8s-provisioner/config/prometheus/monitor_tls_patch.yaml b/deploy/k8s-provisioner/config/prometheus/monitor_tls_patch.yaml new file mode 100644 index 0000000..5bf84ce --- /dev/null +++ b/deploy/k8s-provisioner/config/prometheus/monitor_tls_patch.yaml @@ -0,0 +1,19 @@ +# Patch for Prometheus ServiceMonitor to enable secure TLS configuration +# using certificates managed by cert-manager +- op: replace + path: /spec/endpoints/0/tlsConfig + value: + # SERVICE_NAME and SERVICE_NAMESPACE will be substituted by kustomize + serverName: SERVICE_NAME.SERVICE_NAMESPACE.svc + insecureSkipVerify: false + ca: + secret: + name: metrics-server-cert + key: ca.crt + cert: + secret: + name: metrics-server-cert + key: tls.crt + keySecret: + name: metrics-server-cert + key: tls.key diff --git a/deploy/k8s-provisioner/config/rbac/cluster_admin_role.yaml b/deploy/k8s-provisioner/config/rbac/cluster_admin_role.yaml new file mode 100644 index 0000000..7e4f769 --- /dev/null +++ b/deploy/k8s-provisioner/config/rbac/cluster_admin_role.yaml @@ -0,0 +1,27 @@ +# This rule is not used by the project k8s-provisioner itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants full permissions ('*') over k8sprovisioner.appstack.io. +# This role is intended for users authorized to modify roles and bindings within the cluster, +# enabling them to delegate specific permissions to other users or groups as needed. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: k8s-provisioner + app.kubernetes.io/managed-by: kustomize + name: cluster-admin-role +rules: +- apiGroups: + - k8sprovisioner.appstack.io + resources: + - clusters + verbs: + - '*' +- apiGroups: + - k8sprovisioner.appstack.io + resources: + - clusters/status + verbs: + - get diff --git a/deploy/k8s-provisioner/config/rbac/cluster_editor_role.yaml b/deploy/k8s-provisioner/config/rbac/cluster_editor_role.yaml new file mode 100644 index 0000000..a16b345 --- /dev/null +++ b/deploy/k8s-provisioner/config/rbac/cluster_editor_role.yaml @@ -0,0 +1,33 @@ +# This rule is not used by the project k8s-provisioner itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants permissions to create, update, and delete resources within the k8sprovisioner.appstack.io. +# This role is intended for users who need to manage these resources +# but should not control RBAC or manage permissions for others. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: k8s-provisioner + app.kubernetes.io/managed-by: kustomize + name: cluster-editor-role +rules: +- apiGroups: + - k8sprovisioner.appstack.io + resources: + - clusters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - k8sprovisioner.appstack.io + resources: + - clusters/status + verbs: + - get diff --git a/deploy/k8s-provisioner/config/rbac/cluster_viewer_role.yaml b/deploy/k8s-provisioner/config/rbac/cluster_viewer_role.yaml new file mode 100644 index 0000000..b0b5c99 --- /dev/null +++ b/deploy/k8s-provisioner/config/rbac/cluster_viewer_role.yaml @@ -0,0 +1,29 @@ +# This rule is not used by the project k8s-provisioner itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants read-only access to k8sprovisioner.appstack.io resources. +# This role is intended for users who need visibility into these resources +# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: k8s-provisioner + app.kubernetes.io/managed-by: kustomize + name: cluster-viewer-role +rules: +- apiGroups: + - k8sprovisioner.appstack.io + resources: + - clusters + verbs: + - get + - list + - watch +- apiGroups: + - k8sprovisioner.appstack.io + resources: + - clusters/status + verbs: + - get diff --git a/deploy/k8s-provisioner/config/rbac/infra_admin_role.yaml b/deploy/k8s-provisioner/config/rbac/infra_admin_role.yaml new file mode 100644 index 0000000..d6a6abf --- /dev/null +++ b/deploy/k8s-provisioner/config/rbac/infra_admin_role.yaml @@ -0,0 +1,27 @@ +# This rule is not used by the project k8s-provisioner itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants full permissions ('*') over k8sprovisioner.appstack.io. +# This role is intended for users authorized to modify roles and bindings within the cluster, +# enabling them to delegate specific permissions to other users or groups as needed. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: k8s-provisioner + app.kubernetes.io/managed-by: kustomize + name: infra-admin-role +rules: +- apiGroups: + - k8sprovisioner.appstack.io + resources: + - infras + verbs: + - '*' +- apiGroups: + - k8sprovisioner.appstack.io + resources: + - infras/status + verbs: + - get diff --git a/deploy/k8s-provisioner/config/rbac/infra_editor_role.yaml b/deploy/k8s-provisioner/config/rbac/infra_editor_role.yaml new file mode 100644 index 0000000..fe05dcf --- /dev/null +++ b/deploy/k8s-provisioner/config/rbac/infra_editor_role.yaml @@ -0,0 +1,33 @@ +# This rule is not used by the project k8s-provisioner itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants permissions to create, update, and delete resources within the k8sprovisioner.appstack.io. +# This role is intended for users who need to manage these resources +# but should not control RBAC or manage permissions for others. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: k8s-provisioner + app.kubernetes.io/managed-by: kustomize + name: infra-editor-role +rules: +- apiGroups: + - k8sprovisioner.appstack.io + resources: + - infras + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - k8sprovisioner.appstack.io + resources: + - infras/status + verbs: + - get diff --git a/deploy/k8s-provisioner/config/rbac/infra_viewer_role.yaml b/deploy/k8s-provisioner/config/rbac/infra_viewer_role.yaml new file mode 100644 index 0000000..35205ae --- /dev/null +++ b/deploy/k8s-provisioner/config/rbac/infra_viewer_role.yaml @@ -0,0 +1,29 @@ +# This rule is not used by the project k8s-provisioner itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants read-only access to k8sprovisioner.appstack.io resources. +# This role is intended for users who need visibility into these resources +# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: k8s-provisioner + app.kubernetes.io/managed-by: kustomize + name: infra-viewer-role +rules: +- apiGroups: + - k8sprovisioner.appstack.io + resources: + - infras + verbs: + - get + - list + - watch +- apiGroups: + - k8sprovisioner.appstack.io + resources: + - infras/status + verbs: + - get diff --git a/deploy/k8s-provisioner/config/rbac/kustomization.yaml b/deploy/k8s-provisioner/config/rbac/kustomization.yaml new file mode 100644 index 0000000..042a61b --- /dev/null +++ b/deploy/k8s-provisioner/config/rbac/kustomization.yaml @@ -0,0 +1,31 @@ +resources: +# All RBAC will be applied under this service account in +# the deployment namespace. You may comment out this resource +# if your manager will use a service account that exists at +# runtime. Be sure to update RoleBinding and ClusterRoleBinding +# subjects if changing service account names. +- service_account.yaml +- role.yaml +- role_binding.yaml +- leader_election_role.yaml +- leader_election_role_binding.yaml +# The following RBAC configurations are used to protect +# the metrics endpoint with authn/authz. These configurations +# ensure that only authorized users and service accounts +# can access the metrics endpoint. Comment the following +# permissions if you want to disable this protection. +# More info: https://book.kubebuilder.io/reference/metrics.html +- metrics_auth_role.yaml +- metrics_auth_role_binding.yaml +- metrics_reader_role.yaml +# For each CRD, "Admin", "Editor" and "Viewer" roles are scaffolded by +# default, aiding admins in cluster management. Those roles are +# not used by the k8s-provisioner itself. You can comment the following lines +# if you do not want those helpers be installed with your Project. +- cluster_admin_role.yaml +- cluster_editor_role.yaml +- cluster_viewer_role.yaml +- infra_admin_role.yaml +- infra_editor_role.yaml +- infra_viewer_role.yaml + diff --git a/deploy/k8s-provisioner/config/rbac/leader_election_role.yaml b/deploy/k8s-provisioner/config/rbac/leader_election_role.yaml new file mode 100644 index 0000000..1ca283c --- /dev/null +++ b/deploy/k8s-provisioner/config/rbac/leader_election_role.yaml @@ -0,0 +1,40 @@ +# permissions to do leader election. +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/name: k8s-provisioner + app.kubernetes.io/managed-by: kustomize + name: leader-election-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch diff --git a/deploy/k8s-provisioner/config/rbac/leader_election_role_binding.yaml b/deploy/k8s-provisioner/config/rbac/leader_election_role_binding.yaml new file mode 100644 index 0000000..f63b997 --- /dev/null +++ b/deploy/k8s-provisioner/config/rbac/leader_election_role_binding.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/name: k8s-provisioner + app.kubernetes.io/managed-by: kustomize + name: leader-election-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: leader-election-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/deploy/k8s-provisioner/config/rbac/metrics_auth_role.yaml b/deploy/k8s-provisioner/config/rbac/metrics_auth_role.yaml new file mode 100644 index 0000000..32d2e4e --- /dev/null +++ b/deploy/k8s-provisioner/config/rbac/metrics_auth_role.yaml @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: metrics-auth-role +rules: +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create diff --git a/deploy/k8s-provisioner/config/rbac/metrics_auth_role_binding.yaml b/deploy/k8s-provisioner/config/rbac/metrics_auth_role_binding.yaml new file mode 100644 index 0000000..e775d67 --- /dev/null +++ b/deploy/k8s-provisioner/config/rbac/metrics_auth_role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: metrics-auth-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: metrics-auth-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/deploy/k8s-provisioner/config/rbac/metrics_reader_role.yaml b/deploy/k8s-provisioner/config/rbac/metrics_reader_role.yaml new file mode 100644 index 0000000..51a75db --- /dev/null +++ b/deploy/k8s-provisioner/config/rbac/metrics_reader_role.yaml @@ -0,0 +1,9 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: metrics-reader +rules: +- nonResourceURLs: + - "/metrics" + verbs: + - get diff --git a/deploy/k8s-provisioner/config/rbac/role.yaml b/deploy/k8s-provisioner/config/rbac/role.yaml new file mode 100644 index 0000000..f01874d --- /dev/null +++ b/deploy/k8s-provisioner/config/rbac/role.yaml @@ -0,0 +1,46 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: manager-role +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - k8sprovisioner.appstack.io + resources: + - clusters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - k8sprovisioner.appstack.io + resources: + - clusters/status + verbs: + - get + - patch + - update +- apiGroups: + - k8sprovisioner.appstack.io + resources: + - infras + verbs: + - get + - list + - watch diff --git a/deploy/k8s-provisioner/config/rbac/role_binding.yaml b/deploy/k8s-provisioner/config/rbac/role_binding.yaml new file mode 100644 index 0000000..ecac253 --- /dev/null +++ b/deploy/k8s-provisioner/config/rbac/role_binding.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: k8s-provisioner + app.kubernetes.io/managed-by: kustomize + name: manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: manager-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/deploy/k8s-provisioner/config/rbac/service_account.yaml b/deploy/k8s-provisioner/config/rbac/service_account.yaml new file mode 100644 index 0000000..9348f71 --- /dev/null +++ b/deploy/k8s-provisioner/config/rbac/service_account.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: k8s-provisioner + app.kubernetes.io/managed-by: kustomize + name: controller-manager + namespace: system diff --git a/deploy/k8s-provisioner/config/samples/k8sprovisioner_v1alpha1_cluster.yaml b/deploy/k8s-provisioner/config/samples/k8sprovisioner_v1alpha1_cluster.yaml new file mode 100644 index 0000000..59d6599 --- /dev/null +++ b/deploy/k8s-provisioner/config/samples/k8sprovisioner_v1alpha1_cluster.yaml @@ -0,0 +1,9 @@ +apiVersion: k8sprovisioner.appstack.io/v1alpha1 +kind: Cluster +metadata: + labels: + app.kubernetes.io/name: k8s-provisioner + app.kubernetes.io/managed-by: kustomize + name: cluster-sample +spec: + # TODO(user): Add fields here diff --git a/deploy/k8s-provisioner/config/samples/k8sprovisioner_v1alpha1_infra.yaml b/deploy/k8s-provisioner/config/samples/k8sprovisioner_v1alpha1_infra.yaml new file mode 100644 index 0000000..c24c692 --- /dev/null +++ b/deploy/k8s-provisioner/config/samples/k8sprovisioner_v1alpha1_infra.yaml @@ -0,0 +1,9 @@ +apiVersion: k8sprovisioner.appstack.io/v1alpha1 +kind: Infra +metadata: + labels: + app.kubernetes.io/name: k8s-provisioner + app.kubernetes.io/managed-by: kustomize + name: infra-sample +spec: + # TODO(user): Add fields here diff --git a/deploy/k8s-provisioner/config/samples/kustomization.yaml b/deploy/k8s-provisioner/config/samples/kustomization.yaml new file mode 100644 index 0000000..7a76081 --- /dev/null +++ b/deploy/k8s-provisioner/config/samples/kustomization.yaml @@ -0,0 +1,5 @@ +## Append samples of your project ## +resources: +- k8sprovisioner_v1alpha1_infra.yaml +- k8sprovisioner_v1alpha1_cluster.yaml +# +kubebuilder:scaffold:manifestskustomizesamples diff --git a/deploy/k8s-provisioner/go.mod b/deploy/k8s-provisioner/go.mod new file mode 100644 index 0000000..5c8d398 --- /dev/null +++ b/deploy/k8s-provisioner/go.mod @@ -0,0 +1,161 @@ +module vanderlande.com/appstack/k8s-provisioner + +go 1.25.0 + +require ( + github.com/onsi/ginkgo/v2 v2.27.2 + github.com/onsi/gomega v1.38.2 + gopkg.in/yaml.v3 v3.0.1 + helm.sh/helm/v3 v3.19.4 + k8s.io/api v0.35.0 + k8s.io/apimachinery v0.35.0 + k8s.io/cli-runtime v0.35.0 + k8s.io/client-go v0.35.0 + sigs.k8s.io/controller-runtime v0.22.4 +) + +require ( + cel.dev/expr v0.24.0 // indirect + dario.cat/mergo v1.0.1 // indirect + github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect + github.com/BurntSushi/toml v1.5.0 // indirect + github.com/MakeNowJust/heredoc v1.0.0 // indirect + github.com/Masterminds/goutils v1.1.1 // indirect + github.com/Masterminds/semver/v3 v3.4.0 // indirect + github.com/Masterminds/sprig/v3 v3.3.0 // indirect + github.com/Masterminds/squirrel v1.5.4 // indirect + github.com/antlr4-go/antlr/v4 v4.13.0 // indirect + github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/chai2010/gettext-go v1.0.2 // indirect + github.com/containerd/containerd v1.7.29 // indirect + github.com/containerd/errdefs v0.3.0 // indirect + github.com/containerd/log v0.1.0 // indirect + github.com/containerd/platforms v0.2.1 // indirect + github.com/cyphar/filepath-securejoin v0.6.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/emicklei/go-restful/v3 v3.12.2 // indirect + github.com/evanphx/json-patch v5.9.11+incompatible // indirect + github.com/evanphx/json-patch/v5 v5.9.11 // indirect + github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect + github.com/fatih/color v1.13.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect + github.com/go-errors/errors v1.4.2 // indirect + github.com/go-gorp/gorp/v3 v3.1.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-logr/zapr v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect + github.com/gobwas/glob v0.2.3 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/btree v1.1.3 // indirect + github.com/google/cel-go v0.26.0 // indirect + github.com/google/gnostic-models v0.7.0 // indirect + github.com/google/go-cmp v0.7.0 // indirect + github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect + github.com/gosuri/uitable v0.0.4 // indirect + github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/huandu/xstrings v1.5.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jmoiron/sqlx v1.4.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.18.0 // indirect + github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect + github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect + github.com/lib/pq v1.10.9 // indirect + github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.17 // indirect + github.com/mattn/go-runewidth v0.0.9 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/spdystream v0.5.0 // indirect + github.com/moby/term v0.5.2 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect + github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.1 // indirect + github.com/peterbourgon/diskv v2.0.1+incompatible // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/client_golang v1.22.0 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.62.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + github.com/rubenv/sql-migrate v1.8.0 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 // indirect + github.com/shopspring/decimal v1.4.0 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cobra v1.10.1 // indirect + github.com/spf13/pflag v1.0.10 // indirect + github.com/stoewer/go-strcase v1.3.0 // indirect + github.com/x448/float16 v0.8.4 // indirect + github.com/xlab/treeprint v1.2.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect + go.opentelemetry.io/otel v1.35.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 // indirect + go.opentelemetry.io/otel/metric v1.35.0 // indirect + go.opentelemetry.io/otel/sdk v1.34.0 // indirect + go.opentelemetry.io/otel/trace v1.35.0 // indirect + go.opentelemetry.io/proto/otlp v1.5.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.0 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/crypto v0.45.0 // indirect + golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect + golang.org/x/mod v0.29.0 // indirect + golang.org/x/net v0.47.0 // indirect + golang.org/x/oauth2 v0.30.0 // indirect + golang.org/x/sync v0.18.0 // indirect + golang.org/x/sys v0.38.0 // indirect + golang.org/x/term v0.37.0 // indirect + golang.org/x/text v0.31.0 // indirect + golang.org/x/time v0.12.0 // indirect + golang.org/x/tools v0.38.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb // indirect + google.golang.org/grpc v1.72.1 // indirect + google.golang.org/protobuf v1.36.8 // indirect + gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + k8s.io/apiextensions-apiserver v0.34.2 // indirect + k8s.io/apiserver v0.34.2 // indirect + k8s.io/component-base v0.34.2 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect + k8s.io/kubectl v0.34.2 // indirect + k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 // indirect + oras.land/oras-go/v2 v2.6.0 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect + sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect + sigs.k8s.io/kustomize/api v0.20.1 // indirect + sigs.k8s.io/kustomize/kyaml v0.20.1 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect + sigs.k8s.io/yaml v1.6.0 // indirect +) diff --git a/deploy/k8s-provisioner/go.sum b/deploy/k8s-provisioner/go.sum new file mode 100644 index 0000000..bba3359 --- /dev/null +++ b/deploy/k8s-provisioner/go.sum @@ -0,0 +1,501 @@ +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= +dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= +dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= +github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= +github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= +github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= +github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= +github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs= +github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0= +github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM= +github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= +github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= +github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= +github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= +github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= +github.com/containerd/containerd v1.7.29 h1:90fWABQsaN9mJhGkoVnuzEY+o1XDPbg9BTC9QTAHnuE= +github.com/containerd/containerd v1.7.29/go.mod h1:azUkWcOvHrWvaiUjSQH0fjzuHIwSPg1WL5PshGP4Szs= +github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4= +github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= +github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/cyphar/filepath-securejoin v0.6.1 h1:5CeZ1jPXEiYt3+Z6zqprSAgSWiggmpVyciv8syjIpVE= +github.com/cyphar/filepath-securejoin v0.6.1/go.mod h1:A8hd4EnAeyujCJRrICiOWqjS1AX0a9kM5XL+NwKoYSc= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/distribution/distribution/v3 v3.0.0 h1:q4R8wemdRQDClzoNNStftB2ZAfqOiN6UX90KJc4HjyM= +github.com/distribution/distribution/v3 v3.0.0/go.mod h1:tRNuFoZsUdyRVegq8xGNeds4KLjwLCRin/tTo6i1DhU= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI= +github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= +github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= +github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= +github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/evanphx/json-patch v5.9.11+incompatible h1:ixHHqfcGvxhWkniF1tWxBHA0yb4Z+d1UQi45df52xW8= +github.com/evanphx/json-patch v5.9.11+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= +github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= +github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4= +github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7DlmewI= +github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= +github.com/gkampitakis/ciinfo v0.3.2 h1:JcuOPk8ZU7nZQjdUhctuhQofk7BGHuIy0c9Ez8BNhXs= +github.com/gkampitakis/ciinfo v0.3.2/go.mod h1:1NIwaOcFChN4fa/B0hEBdAb6npDlFL8Bwx4dfRLRqAo= +github.com/gkampitakis/go-diff v1.3.2 h1:Qyn0J9XJSDTgnsgHRdz9Zp24RaJeKMUHg2+PDZZdC4M= +github.com/gkampitakis/go-diff v1.3.2/go.mod h1:LLgOrpqleQe26cte8s36HTWcTmMEur6OPYerdAAS9tk= +github.com/gkampitakis/go-snaps v0.5.15 h1:amyJrvM1D33cPHwVrjo9jQxX8g/7E2wYdZ+01KS3zGE= +github.com/gkampitakis/go-snaps v0.5.15/go.mod h1:HNpx/9GoKisdhw9AFOBT1N7DBs9DiHo/hGheFGBZ+mc= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= +github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= +github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw= +github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/cel-go v0.26.0 h1:DPGjXackMpJWH680oGY4lZhYjIameYmR+/6RBdDGmaI= +github.com/google/cel-go v0.26.0/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM= +github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= +github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= +github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= +github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY= +github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= +github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= +github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/golang-lru/arc/v2 v2.0.5 h1:l2zaLDubNhW4XO3LnliVj0GXO3+/CGNJAg1dcN2Fpfw= +github.com/hashicorp/golang-lru/arc/v2 v2.0.5/go.mod h1:ny6zBSQZi2JxIeYcv7kt2sH2PXJtirBN7RDhRpxPkxU= +github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvHgwfx2Vm4= +github.com/hashicorp/golang-lru/v2 v2.0.5/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= +github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= +github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/joshdk/go-junit v1.0.0 h1:S86cUKIdwBHWwA6xCmFlf3RTLfVXYQfvanM5Uh+K6GE= +github.com/joshdk/go-junit v1.0.0/go.mod h1:TiiV0PqkaNfFXjEiyjWM3XXrhVyCa1K4Zfga6W52ung= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/maruel/natural v1.1.1 h1:Hja7XhhmvEFhcByqDoHz9QZbkWey+COd9xWfCfn1ioo= +github.com/maruel/natural v1.1.1/go.mod h1:v+Rfd79xlw1AgVBjbO0BEQmptqb5HvL/k9GRHB7ZKEg= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= +github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/mfridman/tparse v0.18.0 h1:wh6dzOKaIwkUGyKgOntDW4liXSo37qg5AXbIhkMV3vE= +github.com/mfridman/tparse v0.18.0/go.mod h1:gEvqZTuCgEhPbYk/2lS3Kcxg1GmTxxU7kTC8DvP0i/A= +github.com/miekg/dns v1.1.57 h1:Jzi7ApEIzwEPLHWRcafCN9LZSBbqQpxjt/wpgvg7wcM= +github.com/miekg/dns v1.1.57/go.mod h1:uqRjCRUuEAA6qsOiJvDd+CFo/vW+y5WR6SNmHE55hZk= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= +github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= +github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= +github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns= +github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= +github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= +github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= +github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= +github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 h1:EaDatTxkdHG+U3Bk4EUr+DZ7fOGwTfezUiUJMaIcaho= +github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5/go.mod h1:fyalQWdtzDBECAQFBJuQe5bzQ02jGd5Qcbgb97Flm7U= +github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 h1:EfpWLLCyXw8PSM2/XNJLjI3Pb27yVE+gIAfeqp8LUCc= +github.com/redis/go-redis/extra/redisotel/v9 v9.0.5/go.mod h1:WZjPDy7VNzn77AAfnAfVjZNvfJTYfPetfZk5yoSTLaQ= +github.com/redis/go-redis/v9 v9.7.3 h1:YpPyAayJV+XErNsatSElgRZZVCwXX9QzkKYNvO7x0wM= +github.com/redis/go-redis/v9 v9.7.3/go.mod h1:bGUrSggJ9X9GUmZpZNEOQKaANxSGgOEBRltRTZHSvrA= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/rubenv/sql-migrate v1.8.0 h1:dXnYiJk9k3wetp7GfQbKJcPHjVJL6YK19tKj8t2Ns0o= +github.com/rubenv/sql-migrate v1.8.0/go.mod h1:F2bGFBwCU+pnmbtNYDeKvSuvL6lBVtXDXUUv5t+u1qw= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 h1:KRzFb2m7YtdldCEkzs6KqmJw4nqEVZGK7IN2kJkjTuQ= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.2/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU= +github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= +github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= +github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= +github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= +github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= +github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= +github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 h1:UW0+QyeyBVhn+COBec3nGhfnFe5lwB0ic1JBVjzhk0w= +go.opentelemetry.io/contrib/bridges/prometheus v0.57.0/go.mod h1:ppciCHRLsyCio54qbzQv0E4Jyth/fLWDTJYfvWpcSVk= +go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 h1:jmTVJ86dP60C01K3slFQa2NQ/Aoi7zA+wy7vMOKD9H4= +go.opentelemetry.io/contrib/exporters/autoexport v0.57.0/go.mod h1:EJBheUMttD/lABFyLXhce47Wr6DPWYReCzaZiXadH7g= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= +go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= +go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 h1:WzNab7hOOLzdDF/EoWCt4glhrbMPVMOO5JYTmpz36Ls= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0/go.mod h1:hKvJwTzJdp90Vh7p6q/9PAOd55dI6WA6sWj62a/JvSs= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 h1:S+LdBGiQXtJdowoJoQPEtI52syEP/JYBUpjO49EQhV8= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0/go.mod h1:5KXybFvPGds3QinJWQT7pmXf+TN5YIa7CNYObWRkj50= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 h1:j7ZSD+5yn+lo3sGV69nW04rRR0jhYnBwjuX3r0HvnK0= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0/go.mod h1:WXbYJTUaZXAbYd8lbgGuvih0yuCfOFC5RJoYnoLcGz8= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 h1:t/Qur3vKSkUCcDVaSumWF2PKHt85pc7fRvFuoVT8qFU= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0/go.mod h1:Rl61tySSdcOJWoEgYZVtmnKdA0GeKrSqkHC1t+91CH8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glBlOBp5+WlYsOElzTNmiPW/x60= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 h1:tgJ0uaNS4c98WRNUEx5U3aDlrDOI5Rs+1Vifcw4DJ8U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0/go.mod h1:U7HYyW0zt/a9x5J1Kjs+r1f/d4ZHnYFclhYY2+YbeoE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 h1:cMyu9O88joYEaI47CnQkxO1XZdpoTF9fEnW2duIddhw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0/go.mod h1:6Am3rn7P9TVVeXYG+wtcGE7IE1tsQ+bP3AuWcKt/gOI= +go.opentelemetry.io/otel/exporters/prometheus v0.54.0 h1:rFwzp68QMgtzu9PgP3jm9XaMICI6TsofWWPcBDKwlsU= +go.opentelemetry.io/otel/exporters/prometheus v0.54.0/go.mod h1:QyjcV9qDP6VeK5qPyKETvNjmaaEc7+gqjh4SS0ZYzDU= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0 h1:CHXNXwfKWfzS65yrlB2PVds1IBZcdsX8Vepy9of0iRU= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0/go.mod h1:zKU4zUgKiaRxrdovSS2amdM5gOc59slmo/zJwGX+YBg= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 h1:SZmDnHcgp3zwlPBS2JX2urGYe/jBKEIT6ZedHRUyCz8= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0/go.mod h1:fdWW0HtZJ7+jNpTKUR0GpMEDP69nR8YBJQxNiVCE3jk= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 h1:cC2yDI3IQd0Udsux7Qmq8ToKAx1XCilTQECZ0KDZyTw= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0/go.mod h1:2PD5Ex6z8CFzDbTdOlwyNIUywRr1DN0ospafJM1wJ+s= +go.opentelemetry.io/otel/log v0.8.0 h1:egZ8vV5atrUWUbnSsHn6vB8R21G2wrKqNiDt3iWertk= +go.opentelemetry.io/otel/log v0.8.0/go.mod h1:M9qvDdUTRCopJcGRKg57+JSQ9LgLBrwwfC32epk5NX8= +go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= +go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= +go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= +go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= +go.opentelemetry.io/otel/sdk/log v0.8.0 h1:zg7GUYXqxk1jnGF/dTdLPrK06xJdrXgqgFLnI4Crxvs= +go.opentelemetry.io/otel/sdk/log v0.8.0/go.mod h1:50iXr0UVwQrYS45KbruFrEt4LvAdCaWWgIrsN3ZQggo= +go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= +go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= +go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= +go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= +go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= +go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= +golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= +golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= +golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= +golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= +golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= +golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= +golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950= +google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb h1:TLPQVbx1GJ8VKZxz52VAxl1EBgKXXbTiU9Fc5fZeLn4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I= +google.golang.org/grpc v1.72.1 h1:HR03wO6eyZ7lknl75XlxABNVLLFc2PAb6mHlYh756mA= +google.golang.org/grpc v1.72.1/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= +google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo= +gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +helm.sh/helm/v3 v3.19.4 h1:E2yFBejmZBczWr5LblhjZbvAOAwVumfBO1AtN3nqI30= +helm.sh/helm/v3 v3.19.4/go.mod h1:PC1rk7PqacpkV4acUFMLStOOis7QM9Jq3DveHBInu4s= +k8s.io/api v0.35.0 h1:iBAU5LTyBI9vw3L5glmat1njFK34srdLmktWwLTprlY= +k8s.io/api v0.35.0/go.mod h1:AQ0SNTzm4ZAczM03QH42c7l3bih1TbAXYo0DkF8ktnA= +k8s.io/apiextensions-apiserver v0.34.2 h1:WStKftnGeoKP4AZRz/BaAAEJvYp4mlZGN0UCv+uvsqo= +k8s.io/apiextensions-apiserver v0.34.2/go.mod h1:398CJrsgXF1wytdaanynDpJ67zG4Xq7yj91GrmYN2SE= +k8s.io/apimachinery v0.35.0 h1:Z2L3IHvPVv/MJ7xRxHEtk6GoJElaAqDCCU0S6ncYok8= +k8s.io/apimachinery v0.35.0/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns= +k8s.io/apiserver v0.34.2 h1:2/yu8suwkmES7IzwlehAovo8dDE07cFRC7KMDb1+MAE= +k8s.io/apiserver v0.34.2/go.mod h1:gqJQy2yDOB50R3JUReHSFr+cwJnL8G1dzTA0YLEqAPI= +k8s.io/cli-runtime v0.35.0 h1:PEJtYS/Zr4p20PfZSLCbY6YvaoLrfByd6THQzPworUE= +k8s.io/cli-runtime v0.35.0/go.mod h1:VBRvHzosVAoVdP3XwUQn1Oqkvaa8facnokNkD7jOTMY= +k8s.io/client-go v0.35.0 h1:IAW0ifFbfQQwQmga0UdoH0yvdqrbwMdq9vIFEhRpxBE= +k8s.io/client-go v0.35.0/go.mod h1:q2E5AAyqcbeLGPdoRB+Nxe3KYTfPce1Dnu1myQdqz9o= +k8s.io/component-base v0.34.2 h1:HQRqK9x2sSAsd8+R4xxRirlTjowsg6fWCPwWYeSvogQ= +k8s.io/component-base v0.34.2/go.mod h1:9xw2FHJavUHBFpiGkZoKuYZ5pdtLKe97DEByaA+hHbM= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE= +k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ= +k8s.io/kubectl v0.34.2 h1:+fWGrVlDONMUmmQLDaGkQ9i91oszjjRAa94cr37hzqA= +k8s.io/kubectl v0.34.2/go.mod h1:X2KTOdtZZNrTWmUD4oHApJ836pevSl+zvC5sI6oO2YQ= +k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck= +k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc= +oras.land/oras-go/v2 v2.6.0/go.mod h1:magiQDfG6H1O9APp+rOsvCPcW1GD2MM7vgnKY0Y+u1o= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= +sigs.k8s.io/controller-runtime v0.22.4 h1:GEjV7KV3TY8e+tJ2LCTxUTanW4z/FmNB7l327UfMq9A= +sigs.k8s.io/controller-runtime v0.22.4/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/kustomize/api v0.20.1 h1:iWP1Ydh3/lmldBnH/S5RXgT98vWYMaTUL1ADcr+Sv7I= +sigs.k8s.io/kustomize/api v0.20.1/go.mod h1:t6hUFxO+Ph0VxIk1sKp1WS0dOjbPCtLJ4p8aADLwqjM= +sigs.k8s.io/kustomize/kyaml v0.20.1 h1:PCMnA2mrVbRP3NIB6v9kYCAc38uvFLVs8j/CD567A78= +sigs.k8s.io/kustomize/kyaml v0.20.1/go.mod h1:0EmkQHRUsJxY8Ug9Niig1pUMSCGHxQ5RklbpV/Ri6po= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/deploy/k8s-provisioner/hack/boilerplate.go.txt b/deploy/k8s-provisioner/hack/boilerplate.go.txt new file mode 100644 index 0000000..9786798 --- /dev/null +++ b/deploy/k8s-provisioner/hack/boilerplate.go.txt @@ -0,0 +1,15 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ \ No newline at end of file diff --git a/deploy/k8s-provisioner/internal/controller/cluster_controller.go b/deploy/k8s-provisioner/internal/controller/cluster_controller.go new file mode 100644 index 0000000..b09ff9a --- /dev/null +++ b/deploy/k8s-provisioner/internal/controller/cluster_controller.go @@ -0,0 +1,157 @@ +package controller + +import ( + "context" + _ "embed" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + + k8sprovisionerv1alpha1 "vanderlande.com/appstack/k8s-provisioner/api/v1alpha1" + "vanderlande.com/appstack/k8s-provisioner/internal/harvester" + "vanderlande.com/appstack/k8s-provisioner/internal/helm" + "vanderlande.com/appstack/k8s-provisioner/internal/templates" + + "vanderlande.com/appstack/k8s-provisioner/internal/values" +) + +type ClusterReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +// Internal Struct for mapping NodePools to Helm Values +type HelmNodePool struct { + Name string `json:"name"` + DisplayName string `json:"displayName"` + Quantity int `json:"quantity"` + Etcd bool `json:"etcd"` + ControlPlane bool `json:"controlplane"` + Worker bool `json:"worker"` + Paused bool `json:"paused"` + CpuCount int `json:"cpuCount"` + DiskSize int `json:"diskSize"` + ImageName string `json:"imageName"` + MemorySize int `json:"memorySize"` + NetworkName string `json:"networkName"` + SshUser string `json:"sshUser"` + VmNamespace string `json:"vmNamespace"` + UserData string `json:"userData"` +} + +// +kubebuilder:rbac:groups=k8sprovisioner.appstack.io,resources=clusters,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=k8sprovisioner.appstack.io,resources=clusters/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=k8sprovisioner.appstack.io,resources=infras,verbs=get;list;watch +// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;patch;delete + +const clusterFinalizer = "k8sprovisioner.appstack.io/finalizer" + +func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + l := log.FromContext(ctx) + + // Initialize Managers + hvManager := harvester.NewIdentityManager(r.Client, r.Scheme) + + // 1. Fetch Cluster + var cluster k8sprovisionerv1alpha1.Cluster + if err := r.Get(ctx, req.NamespacedName, &cluster); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + // 2. Handle Deletion + if !cluster.ObjectMeta.DeletionTimestamp.IsZero() { + if controllerutil.ContainsFinalizer(&cluster, clusterFinalizer) { + l.Info("Processing Cluster Deletion...") + + // A. Uninstall Helm + helmCfg := helm.Config{Namespace: req.Namespace, ReleaseName: req.Name} + if err := helm.Uninstall(helmCfg); err != nil { + return ctrl.Result{}, err + } + + // B. Cleanup Harvester (Using Manager) + hvManager.Cleanup(ctx, &cluster) + + // C. Remove Finalizer + controllerutil.RemoveFinalizer(&cluster, clusterFinalizer) + if err := r.Update(ctx, &cluster); err != nil { + return ctrl.Result{}, err + } + } + return ctrl.Result{}, nil + } + + // 3. Add Finalizer + if !controllerutil.ContainsFinalizer(&cluster, clusterFinalizer) { + controllerutil.AddFinalizer(&cluster, clusterFinalizer) + if err := r.Update(ctx, &cluster); err != nil { + return ctrl.Result{}, err + } + } + + // 4. Fetch Infra + var infra k8sprovisionerv1alpha1.Infra + if err := r.Get(ctx, types.NamespacedName{Name: cluster.Spec.InfraRef, Namespace: req.Namespace}, &infra); err != nil { + return ctrl.Result{}, err + } + + // ========================================================= + // 5. SECURE HARVESTER IDENTITY (Simplified) + // ========================================================= + + // The manager handles looking up Rancher creds, minting tokens, + // saving secrets, and updating the Cluster status. + generatedSecretName, err := hvManager.Ensure(ctx, &cluster, &infra) + if err != nil { + return ctrl.Result{}, err + } + + // ========================================================= + // 6. HELM VALUES GENERATION + // ========================================================= + + vb := values.NewBuilder( + &cluster, + &infra, + templates.BaseValuesYAML, + generatedSecretName, + req.Namespace, + ) + + helmValues, err := vb.Build() + if err != nil { + l.Error(err, "Failed to generate helm values") + return ctrl.Result{}, err + } + + chartSpec := vb.GetChartConfig() + + // 7. Trigger Helm Apply + l.Info("Syncing Helm Release", "Release", req.Name) + + helmCfg := helm.Config{ + Namespace: req.Namespace, + ReleaseName: req.Name, + RepoURL: chartSpec.Repo, + ChartName: chartSpec.Name, + Version: chartSpec.Version, + Values: helmValues, + } + + if err := helm.Apply(helmCfg); err != nil { + l.Error(err, "Helm Apply Failed") + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil +} + +func (r *ClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&k8sprovisionerv1alpha1.Cluster{}). + Complete(r) +} diff --git a/deploy/k8s-provisioner/internal/controller/cluster_controller_test.go b/deploy/k8s-provisioner/internal/controller/cluster_controller_test.go new file mode 100644 index 0000000..590275f --- /dev/null +++ b/deploy/k8s-provisioner/internal/controller/cluster_controller_test.go @@ -0,0 +1,84 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + k8sprovisionerv1alpha1 "vanderlande.com/appstack/k8s-provisioner/api/v1alpha1" +) + +var _ = Describe("Cluster Controller", func() { + Context("When reconciling a resource", func() { + const resourceName = "test-resource" + + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", // TODO(user):Modify as needed + } + cluster := &k8sprovisionerv1alpha1.Cluster{} + + BeforeEach(func() { + By("creating the custom resource for the Kind Cluster") + err := k8sClient.Get(ctx, typeNamespacedName, cluster) + if err != nil && errors.IsNotFound(err) { + resource := &k8sprovisionerv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: "default", + }, + // TODO(user): Specify other spec details if needed. + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + + AfterEach(func() { + // TODO(user): Cleanup logic after each test, like removing the resource instance. + resource := &k8sprovisionerv1alpha1.Cluster{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance Cluster") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + }) + It("should successfully reconcile the resource", func() { + By("Reconciling the created resource") + controllerReconciler := &ClusterReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. + // Example: If you expect a certain status condition after reconciliation, verify it here. + }) + }) +}) diff --git a/deploy/k8s-provisioner/internal/controller/suite_test.go b/deploy/k8s-provisioner/internal/controller/suite_test.go new file mode 100644 index 0000000..c02ff88 --- /dev/null +++ b/deploy/k8s-provisioner/internal/controller/suite_test.go @@ -0,0 +1,116 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "os" + "path/filepath" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + k8sprovisionerv1alpha1 "vanderlande.com/appstack/k8s-provisioner/api/v1alpha1" + // +kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var ( + ctx context.Context + cancel context.CancelFunc + testEnv *envtest.Environment + cfg *rest.Config + k8sClient client.Client +) + +func TestControllers(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Controller Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + ctx, cancel = context.WithCancel(context.TODO()) + + var err error + err = k8sprovisionerv1alpha1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + // +kubebuilder:scaffold:scheme + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: true, + } + + // Retrieve the first found binary directory to allow running tests from IDEs + if getFirstFoundEnvTestBinaryDir() != "" { + testEnv.BinaryAssetsDirectory = getFirstFoundEnvTestBinaryDir() + } + + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) +}) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + cancel() + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) + +// getFirstFoundEnvTestBinaryDir locates the first binary in the specified path. +// ENVTEST-based tests depend on specific binaries, usually located in paths set by +// controller-runtime. When running tests directly (e.g., via an IDE) without using +// Makefile targets, the 'BinaryAssetsDirectory' must be explicitly configured. +// +// This function streamlines the process by finding the required binaries, similar to +// setting the 'KUBEBUILDER_ASSETS' environment variable. To ensure the binaries are +// properly set up, run 'make setup-envtest' beforehand. +func getFirstFoundEnvTestBinaryDir() string { + basePath := filepath.Join("..", "..", "bin", "k8s") + entries, err := os.ReadDir(basePath) + if err != nil { + logf.Log.Error(err, "Failed to read directory", "path", basePath) + return "" + } + for _, entry := range entries { + if entry.IsDir() { + return filepath.Join(basePath, entry.Name()) + } + } + return "" +} diff --git a/deploy/k8s-provisioner/internal/harvester/factory.go b/deploy/k8s-provisioner/internal/harvester/factory.go new file mode 100644 index 0000000..c50cf80 --- /dev/null +++ b/deploy/k8s-provisioner/internal/harvester/factory.go @@ -0,0 +1,229 @@ +package harvester + +import ( + "context" + "encoding/base64" + "fmt" + "time" + + authenticationv1 "k8s.io/api/authentication/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + + k8sprovisionerv1alpha1 "vanderlande.com/appstack/k8s-provisioner/api/v1alpha1" +) + +// TryCleanup performs a "Best Effort" cleanup of Harvester resources. +func TryCleanup(ctx context.Context, k8sClient client.Client, infraRefName, namespace, saName string) { + l := log.FromContext(ctx) + + // 1. Fetch Infra + var infra k8sprovisionerv1alpha1.Infra + if err := k8sClient.Get(ctx, types.NamespacedName{Name: infraRefName, Namespace: namespace}, &infra); err != nil { + l.Info("Cleanup skipped: Infra object not found") + return + } + + vmNamespace := infra.Spec.VmNamespace + if vmNamespace == "" { + vmNamespace = "default" + } + + // 2. Fetch Master Credential + rancherCredName := infra.Spec.CloudCredentialSecret + var rancherSecret corev1.Secret + if err := k8sClient.Get(ctx, types.NamespacedName{Name: rancherCredName, Namespace: "cattle-global-data"}, &rancherSecret); err != nil { + l.Info("Cleanup skipped: Master Credential Secret not found") + return + } + + // 3. Extract Kubeconfig + var kubeBytes []byte + if len(rancherSecret.Data["harvestercredentialConfig-kubeconfigContent"]) > 0 { + kubeBytes = rancherSecret.Data["harvestercredentialConfig-kubeconfigContent"] + } else if len(rancherSecret.Data["credential"]) > 0 { + kubeBytes = rancherSecret.Data["credential"] + } else { + return + } + + // 4. Cleanup + if err := deleteHarvesterResources(ctx, kubeBytes, saName, vmNamespace); err != nil { + l.Error(err, "Failed to cleanup Harvester resources (ignoring)") + } else { + l.Info("Harvester resources deleted successfully") + } +} + +// Internal helper for cleanup +func deleteHarvesterResources(ctx context.Context, masterKubeconfig []byte, serviceAccountName, vmNamespace string) error { + restConfig, err := clientcmd.RESTConfigFromKubeConfig(masterKubeconfig) + if err != nil { + return err + } + hvClient, err := kubernetes.NewForConfig(restConfig) + if err != nil { + return err + } + + deletePolicy := metav1.DeletePropagationBackground + deleteOpts := metav1.DeleteOptions{PropagationPolicy: &deletePolicy} + + // 1. Delete Global CSI Binding (ClusterRoleBinding) + csiBindingName := fmt.Sprintf("%s-csi-binding", serviceAccountName) + err = hvClient.RbacV1().ClusterRoleBindings().Delete(ctx, csiBindingName, deleteOpts) + if err != nil && !apierrors.IsNotFound(err) { + return err + } + + // 2. Delete Cloud Provider Binding (RoleBinding in VM Namespace) + cpBindingName := fmt.Sprintf("%s-cloud-binding", serviceAccountName) + err = hvClient.RbacV1().RoleBindings(vmNamespace).Delete(ctx, cpBindingName, deleteOpts) + if err != nil && !apierrors.IsNotFound(err) { + return err + } + + // 3. Delete ServiceAccount (VM Namespace) + err = hvClient.CoreV1().ServiceAccounts(vmNamespace).Delete(ctx, serviceAccountName, deleteOpts) + if err != nil && !apierrors.IsNotFound(err) { + return err + } + + return nil +} + +// EnsureCredential mints a dedicated ServiceAccount in the specific VM Namespace +func EnsureCredential(ctx context.Context, masterKubeconfig []byte, clusterName, targetNamespace, vmNamespace, harvesterURL string) (*corev1.Secret, string, time.Time, error) { + + // --- PHASE 1: Connect (Proxy/Master Config) --- + restConfig, err := clientcmd.RESTConfigFromKubeConfig(masterKubeconfig) + if err != nil { + return nil, "", time.Time{}, fmt.Errorf("invalid rancher cloud credential kubeconfig: %w", err) + } + hvClient, err := kubernetes.NewForConfig(restConfig) + if err != nil { + return nil, "", time.Time{}, err + } + + // --- PHASE 2: Create Identity (SA & Bindings) --- + if vmNamespace == "" { + vmNamespace = "default" + } + saName := fmt.Sprintf("prov-%s", clusterName) + + // A. Create ServiceAccount + sa := &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: saName, Namespace: vmNamespace}} + if _, err := hvClient.CoreV1().ServiceAccounts(vmNamespace).Create(ctx, sa, metav1.CreateOptions{}); err != nil { + if !apierrors.IsAlreadyExists(err) { + return nil, "", time.Time{}, err + } + } + + // B. Create RoleBinding (VM Namespace) + rb := &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{Name: saName + "-cloud-binding", Namespace: vmNamespace}, + Subjects: []rbacv1.Subject{{Kind: "ServiceAccount", Name: saName, Namespace: vmNamespace}}, + RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "harvesterhci.io:cloudprovider", APIGroup: "rbac.authorization.k8s.io"}, + } + if _, err := hvClient.RbacV1().RoleBindings(vmNamespace).Create(ctx, rb, metav1.CreateOptions{}); err != nil { + if !apierrors.IsAlreadyExists(err) { /* Ignore */ + } + } + + // C. Create ClusterRoleBinding (Global) + crb := &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{Name: saName + "-csi-binding"}, + Subjects: []rbacv1.Subject{{Kind: "ServiceAccount", Name: saName, Namespace: vmNamespace}}, + RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "harvesterhci.io:csi-driver", APIGroup: "rbac.authorization.k8s.io"}, + } + if _, err := hvClient.RbacV1().ClusterRoleBindings().Create(ctx, crb, metav1.CreateOptions{}); err != nil { + if !apierrors.IsAlreadyExists(err) { /* Ignore */ + } + } + + // D. Mint Token + ttlSeconds := int64(315360000) + tokenRequest, err := hvClient.CoreV1().ServiceAccounts(vmNamespace).CreateToken(ctx, saName, &authenticationv1.TokenRequest{ + Spec: authenticationv1.TokenRequestSpec{ExpirationSeconds: &ttlSeconds}, + }, metav1.CreateOptions{}) + if err != nil { + return nil, "", time.Time{}, fmt.Errorf("failed to mint harvester token: %w", err) + } + expiryTime := time.Now().Add(time.Duration(ttlSeconds) * time.Second) + + // --- PHASE 3: Determine URL & CA --- + + // 1. URL: Use the explicitly provided HarvesterURL + if harvesterURL == "" { + // Fallback to Proxy if user forgot to set it (Safety net) + harvesterURL = restConfig.Host + } + + // 2. CA: Fetch the internal Harvester CA + // (Required because the proxy CA won't match the direct IP/URL) + harvesterCA := restConfig.CAData + + caConfigMap, err := hvClient.CoreV1().ConfigMaps("default").Get(ctx, "kube-root-ca.crt", metav1.GetOptions{}) + if err == nil { + if caStr, ok := caConfigMap.Data["ca.crt"]; ok { + harvesterCA = []byte(caStr) + } + } + + // --- PHASE 4: Construct Kubeconfig --- + caData := base64.StdEncoding.EncodeToString(harvesterCA) + token := tokenRequest.Status.Token + + // Ensure "namespace" aligns vertically with "cluster" and "user" + newKubeconfig := fmt.Sprintf( + `apiVersion: v1 +kind: Config +clusters: +- name: harvester + cluster: + server: %s + certificate-authority-data: %s +users: +- name: provisioner + user: + token: %s +contexts: +- name: default + context: + cluster: harvester + user: provisioner + namespace: %s +current-context: default +`, harvesterURL, caData, token, vmNamespace) + + // --- PHASE 5: Create Secret --- + secretName := fmt.Sprintf("harvesterconfig-%s", clusterName) + + secret := &corev1.Secret{ + TypeMeta: metav1.TypeMeta{Kind: "Secret", APIVersion: "v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: targetNamespace, + Annotations: map[string]string{ + "v2prov-secret-authorized-for-cluster": clusterName, + "v2prov-authorized-secret-deletes-on-cluster-removal": "true", + }, + Labels: map[string]string{ + "cattle.io/creator": "k8s-provisioner", + }, + }, + Type: "Opaque", + StringData: map[string]string{ + "credential": newKubeconfig, + }, + } + + return secret, saName, expiryTime, nil +} diff --git a/deploy/k8s-provisioner/internal/helm/client.go b/deploy/k8s-provisioner/internal/helm/client.go new file mode 100644 index 0000000..d2df63e --- /dev/null +++ b/deploy/k8s-provisioner/internal/helm/client.go @@ -0,0 +1,126 @@ +package helm + +import ( + "fmt" + "log" + "os" + + "helm.sh/helm/v3/pkg/action" + "helm.sh/helm/v3/pkg/chart/loader" + "helm.sh/helm/v3/pkg/cli" + "helm.sh/helm/v3/pkg/registry" // [NEW] Required for OCI + "helm.sh/helm/v3/pkg/storage/driver" + "k8s.io/cli-runtime/pkg/genericclioptions" +) + +type Config struct { + Namespace string + ReleaseName string + RepoURL string + ChartName string + Version string + Values map[string]interface{} +} + +func Apply(cfg Config) error { + settings := cli.New() + + // 1. Initialize Action Config + actionConfig := new(action.Configuration) + getter := genericclioptions.NewConfigFlags(false) + + if err := actionConfig.Init(getter, cfg.Namespace, os.Getenv("HELM_DRIVER"), log.Printf); err != nil { + return fmt.Errorf("failed to init helm config: %w", err) + } + + // 2. [NEW] Initialize OCI Registry Client + // This tells Helm how to talk to ghcr.io, docker.io, etc. + registryClient, err := registry.NewClient( + registry.ClientOptDebug(true), + registry.ClientOptEnableCache(true), + registry.ClientOptCredentialsFile(settings.RegistryConfig), // Uses ~/.config/helm/registry/config.json + ) + if err != nil { + return fmt.Errorf("failed to init registry client: %w", err) + } + actionConfig.RegistryClient = registryClient + + // 3. Setup Install Action + client := action.NewInstall(actionConfig) + client.Version = cfg.Version + client.Namespace = cfg.Namespace + client.ReleaseName = cfg.ReleaseName + client.CreateNamespace = true + + if cfg.RepoURL != "" { + client.RepoURL = cfg.RepoURL + } + + // 4. Locate Chart (Now supports oci:// because RegistryClient is set) + cp, err := client.ChartPathOptions.LocateChart(cfg.ChartName, settings) + if err != nil { + return fmt.Errorf("failed to locate chart %s: %w", cfg.ChartName, err) + } + + chart, err := loader.Load(cp) + if err != nil { + return fmt.Errorf("failed to load chart: %w", err) + } + + // 5. Install or Upgrade + histClient := action.NewHistory(actionConfig) + histClient.Max = 1 + + if _, err := histClient.Run(cfg.ReleaseName); err == driver.ErrReleaseNotFound { + fmt.Printf("Installing OCI Release %s...\n", cfg.ReleaseName) + _, err := client.Run(chart, cfg.Values) + return err + } else if err != nil { + return err + } + + fmt.Printf("Upgrading OCI Release %s...\n", cfg.ReleaseName) + upgrade := action.NewUpgrade(actionConfig) + upgrade.Version = cfg.Version + upgrade.Namespace = cfg.Namespace + // Important: Upgrade also needs the RegistryClient, but it shares 'actionConfig' + // so it is already set up. + if cfg.RepoURL != "" { + upgrade.RepoURL = cfg.RepoURL + } + _, err = upgrade.Run(cfg.ReleaseName, chart, cfg.Values) + return err +} + +func Uninstall(cfg Config) error { + settings := cli.New() + + // 1. Initialize Action Config (Same as Apply) + actionConfig := new(action.Configuration) + getter := genericclioptions.NewConfigFlags(false) + if err := actionConfig.Init(getter, cfg.Namespace, os.Getenv("HELM_DRIVER"), log.Printf); err != nil { + return fmt.Errorf("failed to init helm config: %w", err) + } + + // 2. Initialize OCI Registry Client (Crucial for OCI charts) + registryClient, err := registry.NewClient( + registry.ClientOptDebug(true), + registry.ClientOptEnableCache(true), + registry.ClientOptCredentialsFile(settings.RegistryConfig), + ) + if err != nil { + return fmt.Errorf("failed to init registry client: %w", err) + } + actionConfig.RegistryClient = registryClient + + // 3. Run Uninstall + client := action.NewUninstall(actionConfig) + // Don't fail if it's already gone + _, err = client.Run(cfg.ReleaseName) + if err != nil && err != driver.ErrReleaseNotFound { + return fmt.Errorf("failed to uninstall release: %w", err) + } + + fmt.Printf("✅ Uninstalled Release %s\n", cfg.ReleaseName) + return nil +} diff --git a/deploy/k8s-provisioner/internal/templates/base_values.yaml b/deploy/k8s-provisioner/internal/templates/base_values.yaml new file mode 100644 index 0000000..cabfb23 --- /dev/null +++ b/deploy/k8s-provisioner/internal/templates/base_values.yaml @@ -0,0 +1,456 @@ +# ---------------------------------------------------------------- +# BASE TEMPLATE (internal/templates/base_values.yaml) +# ---------------------------------------------------------------- + +_defaults: + helmChart: + repo: "" + name: "oci://ghcr.io/rancherfederal/charts/rancher-cluster-templates" + version: "0.7.2" + controlPlaneProfile: + cpuCores: 4 + memoryGb: 8 + diskGb: 40 + userData: &userData | + #cloud-config + package_update: false + package_upgrade: false + snap: + commands: + 00: snap refresh --hold=forever + package_reboot_if_required: true + packages: + - qemu-guest-agent + - yq + - jq + - curl + - wget + + bootcmd: + - sysctl -w net.ipv6.conf.all.disable_ipv6=1 + - sysctl -w net.ipv6.conf.default.disable_ipv6=1 + + write_files: + # ---------------------------------------------------------------- + # 1. CNI Permission Fix Script & Cron (CIS 1.1.9 Persistence) + # ---------------------------------------------------------------- + - path: /usr/local/bin/fix-cni-perms.sh + permissions: '0700' + owner: root:root + content: | + #!/bin/bash + # Wait 60s on boot for RKE2 to write files + [ "$1" == "boot" ] && sleep 60 + + # Enforce 600 on CNI files (CIS 1.1.9) + if [ -d /etc/cni/net.d ]; then + find /etc/cni/net.d -type f -exec chmod 600 {} \; + fi + if [ -d /var/lib/cni/networks ]; then + find /var/lib/cni/networks -type f -exec chmod 600 {} \; + fi + + # Every RKE2 service restart can reset CNI file permissions, so we run + # this script on reboot and daily via cron to maintain CIS compliance. + + - path: /etc/cron.d/cis-cni-fix + permissions: '0644' + owner: root:root + content: | + # Run on Reboot (with delay) to fix files created during startup + @reboot root /usr/local/bin/fix-cni-perms.sh boot + # Run once daily at 00:00 to correct any drift + 0 0 * * * root /usr/local/bin/fix-cni-perms.sh + + # ---------------------------------------------------------------- + # 2. RKE2 Admission Config + # ---------------------------------------------------------------- + - path: /etc/rancher/rke2/rke2-admission.yaml + permissions: '0600' + owner: root:root + content: | + apiVersion: apiserver.config.k8s.io/v1 + kind: AdmissionConfiguration + plugins: + - name: PodSecurity + configuration: + apiVersion: pod-security.admission.config.k8s.io/v1beta1 + kind: PodSecurityConfiguration + defaults: + enforce: "restricted" + enforce-version: "latest" + audit: "restricted" + audit-version: "latest" + warn: "restricted" + warn-version: "latest" + exemptions: + usernames: [] + runtimeClasses: [] + namespaces: [compliance-operator-system,kube-system, cis-operator-system, tigera-operator, calico-system, rke2-ingress-nginx, cattle-system, cattle-fleet-system, longhorn-system, cattle-neuvector-system] + - name: EventRateLimit + configuration: + apiVersion: eventratelimit.admission.k8s.io/v1alpha1 + kind: Configuration + limits: + - type: Server + qps: 5000 + burst: 20000 + + # ---------------------------------------------------------------- + # 3. RKE2 Audit Policy + # ---------------------------------------------------------------- + - path: /etc/rancher/rke2/audit-policy.yaml + permissions: '0600' + owner: root:root + content: | + apiVersion: audit.k8s.io/v1 + kind: Policy + rules: + - level: None + users: ["system:kube-controller-manager", "system:kube-scheduler", "system:serviceaccount:kube-system:endpoint-controller"] + verbs: ["get", "update"] + resources: + - group: "" + resources: ["endpoints", "services", "services/status"] + - level: None + verbs: ["get"] + resources: + - group: "" + resources: ["nodes", "nodes/status", "pods", "pods/status"] + - level: None + users: ["kube-proxy"] + verbs: ["watch"] + resources: + - group: "" + resources: ["endpoints", "services", "services/status", "configmaps"] + - level: Metadata + resources: + - group: "" + resources: ["secrets", "configmaps"] + - level: RequestResponse + omitStages: + - RequestReceived + + # ---------------------------------------------------------------- + # 4. Static NetworkPolicies + # ---------------------------------------------------------------- + - path: /var/lib/rancher/rke2/server/manifests/cis-network-policy.yaml + permissions: '0600' + owner: root:root + content: | + apiVersion: networking.k8s.io/v1 + kind: NetworkPolicy + metadata: + name: default-deny-ingress + namespace: default + spec: + podSelector: {} + policyTypes: + - Ingress + --- + apiVersion: networking.k8s.io/v1 + kind: NetworkPolicy + metadata: + name: allow-all-metrics + namespace: kube-public + spec: + podSelector: {} + ingress: + - {} + policyTypes: + - Ingress + --- + apiVersion: networking.k8s.io/v1 + kind: NetworkPolicy + metadata: + name: allow-all-system + namespace: kube-system + spec: + podSelector: {} + ingress: + - {} + policyTypes: + - Ingress + + # ---------------------------------------------------------------- + # 5. Service Account Hardening + # ---------------------------------------------------------------- + - path: /var/lib/rancher/rke2/server/manifests/cis-sa-config.yaml + permissions: '0600' + owner: root:root + content: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: default + namespace: default + automountServiceAccountToken: false + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: default + namespace: kube-system + automountServiceAccountToken: false + + - path: /var/lib/rancher/rke2/server/manifests/cis-sa-cron.yaml + permissions: '0600' + owner: root:root + content: | + apiVersion: v1 + kind: ServiceAccount + metadata: {name: sa-cleaner, namespace: kube-system} + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: {name: sa-cleaner-role} + rules: + - apiGroups: [""] + resources: ["namespaces", "serviceaccounts"] + verbs: ["get", "list", "patch"] + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: {name: sa-cleaner-binding} + subjects: [{kind: ServiceAccount, name: sa-cleaner, namespace: kube-system}] + roleRef: {kind: ClusterRole, name: sa-cleaner-role, apiGroup: rbac.authorization.k8s.io} + --- + apiVersion: batch/v1 + kind: CronJob + metadata: + name: sa-cleaner + namespace: kube-system + spec: + schedule: "0 */6 * * *" # Run every 6 hours + jobTemplate: + spec: + template: + spec: + serviceAccountName: sa-cleaner + containers: + - name: cleaner + image: rancher/kubectl:v1.26.0 + command: + - /bin/bash + - -c + - | + # Get all namespaces + for ns in $(kubectl get ns -o jsonpath='{.items[*].metadata.name}'); do + # Check if default SA has automount=true (or null) + automount=$(kubectl get sa default -n $ns -o jsonpath='{.automountServiceAccountToken}') + if [ "$automount" != "false" ]; then + echo "Securing default SA in namespace: $ns" + kubectl patch sa default -n $ns -p '{"automountServiceAccountToken": false}' + fi + done + restartPolicy: OnFailure + + # ---------------------------------------------------------------- + # 6. OS Sysctls Hardening + # ---------------------------------------------------------------- + - path: /etc/sysctl.d/60-rke2-cis.conf + permissions: '0644' + content: | + vm.overcommit_memory=1 + vm.max_map_count=65530 + vm.panic_on_oom=0 + fs.inotify.max_user_watches=1048576 + fs.inotify.max_user_instances=8192 + kernel.panic=10 + kernel.panic_on_oops=1 + net.ipv4.conf.all.rp_filter=1 + net.ipv4.conf.default.rp_filter=1 + net.ipv4.conf.all.accept_source_route=0 + net.ipv4.conf.default.accept_source_route=0 + net.ipv4.conf.all.accept_redirects=0 + net.ipv4.conf.default.accept_redirects=0 + net.ipv4.conf.all.send_redirects=0 + net.ipv4.conf.default.send_redirects=0 + net.ipv4.conf.all.log_martians=1 + net.ipv4.conf.default.log_martians=1 + net.ipv4.icmp_echo_ignore_broadcasts=1 + net.ipv4.icmp_ignore_bogus_error_responses=1 + net.ipv6.conf.all.disable_ipv6=1 + net.ipv6.conf.default.disable_ipv6=1 + fs.protected_hardlinks=1 + fs.protected_symlinks=1 + + # ---------------------------------------------------------------- + # 7. Environment & Setup Scripts + # ---------------------------------------------------------------- + - path: /etc/profile.d/rke2.sh + permissions: '0644' + content: | + export PATH=$PATH:/var/lib/rancher/rke2/bin:/opt/rke2/bin + export KUBECONFIG=/etc/rancher/rke2/rke2.yaml + + + - path: /root/updates.sh + permissions: '0550' + content: | + #!/bin/bash + export DEBIAN_FRONTEND=noninteractive + apt-mark hold linux-headers-generic + apt-mark hold linux-headers-virtual + apt-mark hold linux-image-virtual + apt-mark hold linux-virtual + apt-get update + apt-get upgrade -y + apt-get autoremove -y + + users: + - name: rancher + gecos: Rancher service account + hashed_passwd: $6$Mas.x2i7B2cefjUy$59363FmEuoU.LiTLNRZmtemlH2W0D0SWsig22KSZ3QzOmfxeZXxdSx5wIw9wO7GXF/M9W.9SHoKVBOYj1HPX3. + lock_passwd: false + shell: /bin/bash + groups: [users, sudo, docker] + sudo: ALL=(ALL:ALL) ALL + ssh_authorized_keys: + - 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEwWnnOTAu0LlAZRczQ0Z0KvNlUdPhGQhpZie+nF1O3s' + + - name: etcd + gecos: "etcd user" + shell: /sbin/nologin + system: true + lock_passwd: true + + disable_root: true + ssh_pwauth: true + + runcmd: + - systemctl enable --now qemu-guest-agent + - sysctl --system + - /root/updates.sh + # Immediate run of fix script + - /usr/local/bin/fix-cni-perms.sh + + final_message: | + VI_CNV_CLOUD_INIT has been applied successfully. + Node ready for Rancher! + +# amazonec2, azure, digitalocean, harvester, vsphere, custom +cloudprovider: harvester + +# cloud provider credentials +cloudCredentialSecretName: cc-mrklm + +# rancher manager url +rancher: + cattle: + url: rancher-mgmt.product.lan + +# cluster values +cluster: + + name: default-cluster + # labels: + # key: value + config: + kubernetesVersion: v1.33.5+rke2r1 + enableNetworkPolicy: true + localClusterAuthEndpoint: + enabled: false + chartValues: + harvester-cloud-provider: + global: + cattle: + clusterName: default-cluster + + # Pod Security Standard (Replaces PSP) + defaultPodSecurityAdmissionConfigurationTemplateName: "rancher-restricted" + + globalConfig: + systemDefaultRegistry: docker.io + cni: canal + docker: false + disable_scheduler: false + disable_cloud_controller: false + disable_kube_proxy: false + etcd_expose_metrics: false + profile: 'cis' + selinux: false + secrets_encryption: true + write_kubeconfig_mode: 0600 + use_service_account_credentials: false + protect_kernel_defaults: true + cloud_provider_name: harvester + cloud_provider_config: secret://fleet-default:harvesterconfigzswmd + + kube_apiserver_arg: + - "service-account-extend-token-expiration=false" + - "anonymous-auth=false" + - "enable-admission-plugins=NodeRestriction,PodSecurity,EventRateLimit,DenyServiceExternalIPs" + - "admission-control-config-file=/etc/rancher/rke2/rke2-admission.yaml" + - "audit-policy-file=/etc/rancher/rke2/audit-policy.yaml" + - "audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log" + - "audit-log-maxage=30" + - "audit-log-maxbackup=10" + - "audit-log-maxsize=100" + + kubelet_arg: + # Strong Ciphers (CIS 4.2.12) + - "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" + # PID Limit (CIS 4.2.13) + - "pod-max-pids=4096" + # Seccomp Default (CIS 4.2.14) + - "seccomp-default=true" + - "protect-kernel-defaults=true" + - "make-iptables-util-chains=true" + + upgradeStrategy: + controlPlaneConcurrency: 10% + controlPlaneDrainOptions: + enabled: false + workerConcurrency: 10% + workerDrainOptions: + enabled: false +addons: + monitoring: + enabled: false + logging: + enabled: false + longhorn: + enabled: false + neuvector: + enabled: false + +# node and nodepool(s) values +# ---------------------------------------------------------------- +# MANUAL TESTING SECTION +# The Operator will DELETE and OVERWRITE this section at runtime. +# These values are only used if you run 'helm install' manually. +# ---------------------------------------------------------------- +nodepools: + - name: control-plane-nodes + displayName: cp-nodes + quantity: 1 + etcd: true + controlplane: true + worker: false + paused: false + cpuCount: 4 + diskSize: 40 + imageName: vanderlande/image-qhtpc + memorySize: 8 + networkName: vanderlande/vm-lan + sshUser: rancher + vmNamespace: vanderlande + userData: *userData + + - name: worker-nodes + displayName: wk-nodes + quantity: 2 + etcd: false + controlplane: false + worker: true + paused: false + cpuCount: 2 + diskSize: 40 + imageName: vanderlande/image-qmx5q + memorySize: 8 + networkName: vanderlande/vm-lan + sshUser: rancher + vmNamespace: vanderlande + userData: *userData + diff --git a/deploy/k8s-provisioner/internal/templates/base_values_vsphere.yaml b/deploy/k8s-provisioner/internal/templates/base_values_vsphere.yaml new file mode 100644 index 0000000..18f5a37 --- /dev/null +++ b/deploy/k8s-provisioner/internal/templates/base_values_vsphere.yaml @@ -0,0 +1,205 @@ +# ---------------------------------------------------------------- +# BASE TEMPLATE (internal/templates/base_values.yaml) +# ---------------------------------------------------------------- + +_defaults: + helmChart: + repo: "" + name: "oci://ghcr.io/rancherfederal/charts/rancher-cluster-templates" + version: "0.7.2" + controlPlaneProfile: + cpuCores: 4 + memoryGb: 8 + diskGb: 40 + userData: &userData | + #cloud-config + package_update: false + package_upgrade: false + snap: + commands: + 00: snap refresh --hold=forever + package_reboot_if_required: true + packages: + - yq + - jq + + disable_root: true + ssh_pwauth: false + + write_files: + - path: /root/updates.sh + permissions: '0550' + content: | + #!/bin/bash + export DEBIAN_FRONTEND=noninteractive + apt-mark hold linux-headers-generic + apt-mark hold linux-headers-virtual + apt-mark hold linux-image-virtual + apt-mark hold linux-virtual + apt-get update + apt-get upgrade -y + apt-get autoremove -y + + users: + - name: rancher + gecos: Rancher service account + hashed_passwd: $6$Mas.x2i7B2cefjUy$59363FmEuoU.LiTLNRZmtemlH2W0D0SWsig22KSZ3QzOmfxeZXxdSx5wIw9wO7GXF/M9W.9SHoKVBOYj1HPX3. + lock_passwd: false + shell: /bin/bash + groups: [users, sudo, docker] + sudo: ALL=(ALL:ALL) ALL + ssh_authorized_keys: + - 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEwWnnOTAu0LlAZRczQ0Z0KvNlUdPhGQhpZie+nF1O3s' + + disable_root: true + ssh_pwauth: true + + runcmd: + # - systemctl enable --now qemu-guest-agent + - sysctl --system + - /root/updates.sh + # Immediate run of fix script + + bootcmd: + - sudo bash /root/networking.sh + + final_message: | + VI_CNV_CLOUD_INIT has been applied successfully. + Node ready for Rancher! + +# amazonec2, azure, digitalocean, harvester, vsphere, custom +cloudprovider: vsphere + +# cloud provider credentials +cloudCredentialSecretName: cc-lhtl9 + +# rancher manager url +rancher: + cattle: + url: rancher.tst.vanderlande.com + +# cluster values +cluster: + + name: default-cluster-005 + # labels: + # key: value + config: + kubernetesVersion: v1.31.12+rke2r1 + enableNetworkPolicy: true + localClusterAuthEndpoint: + enabled: false + + + # Pod Security Standard (Replaces PSP) + # defaultPodSecurityAdmissionConfigurationTemplateName: "rancher-restricted" + + globalConfig: + systemDefaultRegistry: docker.io + cni: canal + docker: false + disable_scheduler: false + disable_cloud_controller: false + disable_kube_proxy: false + etcd_expose_metrics: false + profile: '' + selinux: false + secrets_encryption: false + write_kubeconfig_mode: 0600 + use_service_account_credentials: false + protect_kernel_defaults: false + cloud_provider_name: '' + + # kube_apiserver_arg: + # - "service-account-extend-token-expiration=false" + # - "anonymous-auth=false" + # - "enable-admission-plugins=NodeRestriction,PodSecurity,EventRateLimit,DenyServiceExternalIPs" + # - "admission-control-config-file=/etc/rancher/rke2/rke2-admission.yaml" + # - "audit-policy-file=/etc/rancher/rke2/audit-policy.yaml" + # - "audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log" + # - "audit-log-maxage=30" + # - "audit-log-maxbackup=10" + # - "audit-log-maxsize=100" + + # kubelet_arg: + # # Strong Ciphers (CIS 4.2.12) + # - "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" + # # PID Limit (CIS 4.2.13) + # - "pod-max-pids=4096" + # # Seccomp Default (CIS 4.2.14) + # - "seccomp-default=true" + # - "protect-kernel-defaults=true" + # - "make-iptables-util-chains=true" + + upgradeStrategy: + controlPlaneConcurrency: 10% + controlPlaneDrainOptions: + enabled: false + workerConcurrency: 10% + workerDrainOptions: + enabled: false +addons: + monitoring: + enabled: false + logging: + enabled: false + longhorn: + enabled: true + neuvector: + enabled: false + +# node and nodepool(s) values +# ---------------------------------------------------------------- +# MANUAL TESTING SECTION +# The Operator will DELETE and OVERWRITE this section at runtime. +# These values are only used if you run 'helm install' manually. +# ---------------------------------------------------------------- +nodepools: + - name: control-plane-nodes + displayName: cp-nodes + quantity: 1 + etcd: true + controlplane: true + worker: false + paused: false + # VSPHERE SPECIFIC FIELDS + cpuCount: 2 + memorySize: 8192 + diskSize: 40000 + vcenter: "vcenter.vanderlande.com" + datacenter: "NL001" + folder: "ICT Digitalisation - Rancher" + pool: "NL001 Development - Rancher/Resources" + datastoreCluster: "NL001 Development - Rancher SDRS" # Matches your SDRS input + network: + - "nl001.vDS.Distri.Vlan.1542" + # Provisioning Source + creationType: "template" + cloneFrom: "nl001-cp-ubuntu-22.04-amd64-20250327-5.15.0-135-rke2-k3s" + cloudConfig: *userData # Using the anchor from your base file + + - name: worker-storage-nodes + displayName: wk-nodes + quantity: 2 + etcd: false + controlplane: false + worker: true + paused: false + # VSPHERE SPECIFIC FIELDS + cpuCount: 4 + memorySize: 8192 + diskSize: 100000 + vcenter: "vcenter.vanderlande.com" + datacenter: "NL001" + folder: "ICT Digitalisation - Rancher" + pool: "NL001 Development - Rancher/Resources" + datastoreCluster: "NL001 Development - Rancher SDRS" # Matches your SDRS input + network: + - "nl001.vDS.Distri.Vlan.1542" + # Provisioning Source + creationType: "template" + cloneFrom: "nl001-cp-ubuntu-22.04-amd64-20250327-5.15.0-135-rke2-k3s" + cloudConfig: *userData # Using the anchor from your base file + + + diff --git a/deploy/k8s-provisioner/internal/templates/embed.go b/deploy/k8s-provisioner/internal/templates/embed.go new file mode 100644 index 0000000..745896b --- /dev/null +++ b/deploy/k8s-provisioner/internal/templates/embed.go @@ -0,0 +1,6 @@ +package templates + +import _ "embed" + +//go:embed base_values.yaml +var BaseValuesYAML []byte diff --git a/deploy/k8s-provisioner/test/e2e/e2e_suite_test.go b/deploy/k8s-provisioner/test/e2e/e2e_suite_test.go new file mode 100644 index 0000000..aded9ab --- /dev/null +++ b/deploy/k8s-provisioner/test/e2e/e2e_suite_test.go @@ -0,0 +1,92 @@ +//go:build e2e +// +build e2e + +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "fmt" + "os" + "os/exec" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "vanderlande.com/appstack/k8s-provisioner/test/utils" +) + +var ( + // Optional Environment Variables: + // - CERT_MANAGER_INSTALL_SKIP=true: Skips CertManager installation during test setup. + // These variables are useful if CertManager is already installed, avoiding + // re-installation and conflicts. + skipCertManagerInstall = os.Getenv("CERT_MANAGER_INSTALL_SKIP") == "true" + // isCertManagerAlreadyInstalled will be set true when CertManager CRDs be found on the cluster + isCertManagerAlreadyInstalled = false + + // projectImage is the name of the image which will be build and loaded + // with the code source changes to be tested. + projectImage = "example.com/k8s-provisioner:v0.0.1" +) + +// TestE2E runs the end-to-end (e2e) test suite for the project. These tests execute in an isolated, +// temporary environment to validate project changes with the purpose of being used in CI jobs. +// The default setup requires Kind, builds/loads the Manager Docker image locally, and installs +// CertManager. +func TestE2E(t *testing.T) { + RegisterFailHandler(Fail) + _, _ = fmt.Fprintf(GinkgoWriter, "Starting k8s-provisioner integration test suite\n") + RunSpecs(t, "e2e suite") +} + +var _ = BeforeSuite(func() { + By("building the manager(Operator) image") + cmd := exec.Command("make", "docker-build", fmt.Sprintf("IMG=%s", projectImage)) + _, err := utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Failed to build the manager(Operator) image") + + // TODO(user): If you want to change the e2e test vendor from Kind, ensure the image is + // built and available before running the tests. Also, remove the following block. + By("loading the manager(Operator) image on Kind") + err = utils.LoadImageToKindClusterWithName(projectImage) + ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Failed to load the manager(Operator) image into Kind") + + // The tests-e2e are intended to run on a temporary cluster that is created and destroyed for testing. + // To prevent errors when tests run in environments with CertManager already installed, + // we check for its presence before execution. + // Setup CertManager before the suite if not skipped and if not already installed + if !skipCertManagerInstall { + By("checking if cert manager is installed already") + isCertManagerAlreadyInstalled = utils.IsCertManagerCRDsInstalled() + if !isCertManagerAlreadyInstalled { + _, _ = fmt.Fprintf(GinkgoWriter, "Installing CertManager...\n") + Expect(utils.InstallCertManager()).To(Succeed(), "Failed to install CertManager") + } else { + _, _ = fmt.Fprintf(GinkgoWriter, "WARNING: CertManager is already installed. Skipping installation...\n") + } + } +}) + +var _ = AfterSuite(func() { + // Teardown CertManager after the suite if not skipped and if it was not already installed + if !skipCertManagerInstall && !isCertManagerAlreadyInstalled { + _, _ = fmt.Fprintf(GinkgoWriter, "Uninstalling CertManager...\n") + utils.UninstallCertManager() + } +}) diff --git a/deploy/k8s-provisioner/test/e2e/e2e_test.go b/deploy/k8s-provisioner/test/e2e/e2e_test.go new file mode 100644 index 0000000..b2d9e17 --- /dev/null +++ b/deploy/k8s-provisioner/test/e2e/e2e_test.go @@ -0,0 +1,337 @@ +//go:build e2e +// +build e2e + +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "encoding/json" + "fmt" + "os" + "os/exec" + "path/filepath" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "vanderlande.com/appstack/k8s-provisioner/test/utils" +) + +// namespace where the project is deployed in +const namespace = "k8s-provisioner-system" + +// serviceAccountName created for the project +const serviceAccountName = "k8s-provisioner-controller-manager" + +// metricsServiceName is the name of the metrics service of the project +const metricsServiceName = "k8s-provisioner-controller-manager-metrics-service" + +// metricsRoleBindingName is the name of the RBAC that will be created to allow get the metrics data +const metricsRoleBindingName = "k8s-provisioner-metrics-binding" + +var _ = Describe("Manager", Ordered, func() { + var controllerPodName string + + // Before running the tests, set up the environment by creating the namespace, + // enforce the restricted security policy to the namespace, installing CRDs, + // and deploying the controller. + BeforeAll(func() { + By("creating manager namespace") + cmd := exec.Command("kubectl", "create", "ns", namespace) + _, err := utils.Run(cmd) + Expect(err).NotTo(HaveOccurred(), "Failed to create namespace") + + By("labeling the namespace to enforce the restricted security policy") + cmd = exec.Command("kubectl", "label", "--overwrite", "ns", namespace, + "pod-security.kubernetes.io/enforce=restricted") + _, err = utils.Run(cmd) + Expect(err).NotTo(HaveOccurred(), "Failed to label namespace with restricted policy") + + By("installing CRDs") + cmd = exec.Command("make", "install") + _, err = utils.Run(cmd) + Expect(err).NotTo(HaveOccurred(), "Failed to install CRDs") + + By("deploying the controller-manager") + cmd = exec.Command("make", "deploy", fmt.Sprintf("IMG=%s", projectImage)) + _, err = utils.Run(cmd) + Expect(err).NotTo(HaveOccurred(), "Failed to deploy the controller-manager") + }) + + // After all tests have been executed, clean up by undeploying the controller, uninstalling CRDs, + // and deleting the namespace. + AfterAll(func() { + By("cleaning up the curl pod for metrics") + cmd := exec.Command("kubectl", "delete", "pod", "curl-metrics", "-n", namespace) + _, _ = utils.Run(cmd) + + By("undeploying the controller-manager") + cmd = exec.Command("make", "undeploy") + _, _ = utils.Run(cmd) + + By("uninstalling CRDs") + cmd = exec.Command("make", "uninstall") + _, _ = utils.Run(cmd) + + By("removing manager namespace") + cmd = exec.Command("kubectl", "delete", "ns", namespace) + _, _ = utils.Run(cmd) + }) + + // After each test, check for failures and collect logs, events, + // and pod descriptions for debugging. + AfterEach(func() { + specReport := CurrentSpecReport() + if specReport.Failed() { + By("Fetching controller manager pod logs") + cmd := exec.Command("kubectl", "logs", controllerPodName, "-n", namespace) + controllerLogs, err := utils.Run(cmd) + if err == nil { + _, _ = fmt.Fprintf(GinkgoWriter, "Controller logs:\n %s", controllerLogs) + } else { + _, _ = fmt.Fprintf(GinkgoWriter, "Failed to get Controller logs: %s", err) + } + + By("Fetching Kubernetes events") + cmd = exec.Command("kubectl", "get", "events", "-n", namespace, "--sort-by=.lastTimestamp") + eventsOutput, err := utils.Run(cmd) + if err == nil { + _, _ = fmt.Fprintf(GinkgoWriter, "Kubernetes events:\n%s", eventsOutput) + } else { + _, _ = fmt.Fprintf(GinkgoWriter, "Failed to get Kubernetes events: %s", err) + } + + By("Fetching curl-metrics logs") + cmd = exec.Command("kubectl", "logs", "curl-metrics", "-n", namespace) + metricsOutput, err := utils.Run(cmd) + if err == nil { + _, _ = fmt.Fprintf(GinkgoWriter, "Metrics logs:\n %s", metricsOutput) + } else { + _, _ = fmt.Fprintf(GinkgoWriter, "Failed to get curl-metrics logs: %s", err) + } + + By("Fetching controller manager pod description") + cmd = exec.Command("kubectl", "describe", "pod", controllerPodName, "-n", namespace) + podDescription, err := utils.Run(cmd) + if err == nil { + fmt.Println("Pod description:\n", podDescription) + } else { + fmt.Println("Failed to describe controller pod") + } + } + }) + + SetDefaultEventuallyTimeout(2 * time.Minute) + SetDefaultEventuallyPollingInterval(time.Second) + + Context("Manager", func() { + It("should run successfully", func() { + By("validating that the controller-manager pod is running as expected") + verifyControllerUp := func(g Gomega) { + // Get the name of the controller-manager pod + cmd := exec.Command("kubectl", "get", + "pods", "-l", "control-plane=controller-manager", + "-o", "go-template={{ range .items }}"+ + "{{ if not .metadata.deletionTimestamp }}"+ + "{{ .metadata.name }}"+ + "{{ \"\\n\" }}{{ end }}{{ end }}", + "-n", namespace, + ) + + podOutput, err := utils.Run(cmd) + g.Expect(err).NotTo(HaveOccurred(), "Failed to retrieve controller-manager pod information") + podNames := utils.GetNonEmptyLines(podOutput) + g.Expect(podNames).To(HaveLen(1), "expected 1 controller pod running") + controllerPodName = podNames[0] + g.Expect(controllerPodName).To(ContainSubstring("controller-manager")) + + // Validate the pod's status + cmd = exec.Command("kubectl", "get", + "pods", controllerPodName, "-o", "jsonpath={.status.phase}", + "-n", namespace, + ) + output, err := utils.Run(cmd) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(output).To(Equal("Running"), "Incorrect controller-manager pod status") + } + Eventually(verifyControllerUp).Should(Succeed()) + }) + + It("should ensure the metrics endpoint is serving metrics", func() { + By("creating a ClusterRoleBinding for the service account to allow access to metrics") + cmd := exec.Command("kubectl", "create", "clusterrolebinding", metricsRoleBindingName, + "--clusterrole=k8s-provisioner-metrics-reader", + fmt.Sprintf("--serviceaccount=%s:%s", namespace, serviceAccountName), + ) + _, err := utils.Run(cmd) + Expect(err).NotTo(HaveOccurred(), "Failed to create ClusterRoleBinding") + + By("validating that the metrics service is available") + cmd = exec.Command("kubectl", "get", "service", metricsServiceName, "-n", namespace) + _, err = utils.Run(cmd) + Expect(err).NotTo(HaveOccurred(), "Metrics service should exist") + + By("getting the service account token") + token, err := serviceAccountToken() + Expect(err).NotTo(HaveOccurred()) + Expect(token).NotTo(BeEmpty()) + + By("ensuring the controller pod is ready") + verifyControllerPodReady := func(g Gomega) { + cmd := exec.Command("kubectl", "get", "pod", controllerPodName, "-n", namespace, + "-o", "jsonpath={.status.conditions[?(@.type=='Ready')].status}") + output, err := utils.Run(cmd) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(output).To(Equal("True"), "Controller pod not ready") + } + Eventually(verifyControllerPodReady, 3*time.Minute, time.Second).Should(Succeed()) + + By("verifying that the controller manager is serving the metrics server") + verifyMetricsServerStarted := func(g Gomega) { + cmd := exec.Command("kubectl", "logs", controllerPodName, "-n", namespace) + output, err := utils.Run(cmd) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(output).To(ContainSubstring("Serving metrics server"), + "Metrics server not yet started") + } + Eventually(verifyMetricsServerStarted, 3*time.Minute, time.Second).Should(Succeed()) + + // +kubebuilder:scaffold:e2e-metrics-webhooks-readiness + + By("creating the curl-metrics pod to access the metrics endpoint") + cmd = exec.Command("kubectl", "run", "curl-metrics", "--restart=Never", + "--namespace", namespace, + "--image=curlimages/curl:latest", + "--overrides", + fmt.Sprintf(`{ + "spec": { + "containers": [{ + "name": "curl", + "image": "curlimages/curl:latest", + "command": ["/bin/sh", "-c"], + "args": ["curl -v -k -H 'Authorization: Bearer %s' https://%s.%s.svc.cluster.local:8443/metrics"], + "securityContext": { + "readOnlyRootFilesystem": true, + "allowPrivilegeEscalation": false, + "capabilities": { + "drop": ["ALL"] + }, + "runAsNonRoot": true, + "runAsUser": 1000, + "seccompProfile": { + "type": "RuntimeDefault" + } + } + }], + "serviceAccountName": "%s" + } + }`, token, metricsServiceName, namespace, serviceAccountName)) + _, err = utils.Run(cmd) + Expect(err).NotTo(HaveOccurred(), "Failed to create curl-metrics pod") + + By("waiting for the curl-metrics pod to complete.") + verifyCurlUp := func(g Gomega) { + cmd := exec.Command("kubectl", "get", "pods", "curl-metrics", + "-o", "jsonpath={.status.phase}", + "-n", namespace) + output, err := utils.Run(cmd) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(output).To(Equal("Succeeded"), "curl pod in wrong status") + } + Eventually(verifyCurlUp, 5*time.Minute).Should(Succeed()) + + By("getting the metrics by checking curl-metrics logs") + verifyMetricsAvailable := func(g Gomega) { + metricsOutput, err := getMetricsOutput() + g.Expect(err).NotTo(HaveOccurred(), "Failed to retrieve logs from curl pod") + g.Expect(metricsOutput).NotTo(BeEmpty()) + g.Expect(metricsOutput).To(ContainSubstring("< HTTP/1.1 200 OK")) + } + Eventually(verifyMetricsAvailable, 2*time.Minute).Should(Succeed()) + }) + + // +kubebuilder:scaffold:e2e-webhooks-checks + + // TODO: Customize the e2e test suite with scenarios specific to your project. + // Consider applying sample/CR(s) and check their status and/or verifying + // the reconciliation by using the metrics, i.e.: + // metricsOutput, err := getMetricsOutput() + // Expect(err).NotTo(HaveOccurred(), "Failed to retrieve logs from curl pod") + // Expect(metricsOutput).To(ContainSubstring( + // fmt.Sprintf(`controller_runtime_reconcile_total{controller="%s",result="success"} 1`, + // strings.ToLower(), + // )) + }) +}) + +// serviceAccountToken returns a token for the specified service account in the given namespace. +// It uses the Kubernetes TokenRequest API to generate a token by directly sending a request +// and parsing the resulting token from the API response. +func serviceAccountToken() (string, error) { + const tokenRequestRawString = `{ + "apiVersion": "authentication.k8s.io/v1", + "kind": "TokenRequest" + }` + + // Temporary file to store the token request + secretName := fmt.Sprintf("%s-token-request", serviceAccountName) + tokenRequestFile := filepath.Join("/tmp", secretName) + err := os.WriteFile(tokenRequestFile, []byte(tokenRequestRawString), os.FileMode(0o644)) + if err != nil { + return "", err + } + + var out string + verifyTokenCreation := func(g Gomega) { + // Execute kubectl command to create the token + cmd := exec.Command("kubectl", "create", "--raw", fmt.Sprintf( + "/api/v1/namespaces/%s/serviceaccounts/%s/token", + namespace, + serviceAccountName, + ), "-f", tokenRequestFile) + + output, err := cmd.CombinedOutput() + g.Expect(err).NotTo(HaveOccurred()) + + // Parse the JSON output to extract the token + var token tokenRequest + err = json.Unmarshal(output, &token) + g.Expect(err).NotTo(HaveOccurred()) + + out = token.Status.Token + } + Eventually(verifyTokenCreation).Should(Succeed()) + + return out, err +} + +// getMetricsOutput retrieves and returns the logs from the curl pod used to access the metrics endpoint. +func getMetricsOutput() (string, error) { + By("getting the curl-metrics logs") + cmd := exec.Command("kubectl", "logs", "curl-metrics", "-n", namespace) + return utils.Run(cmd) +} + +// tokenRequest is a simplified representation of the Kubernetes TokenRequest API response, +// containing only the token field that we need to extract. +type tokenRequest struct { + Status struct { + Token string `json:"token"` + } `json:"status"` +} diff --git a/deploy/k8s-provisioner/test/local/cluster.yaml b/deploy/k8s-provisioner/test/local/cluster.yaml new file mode 100644 index 0000000..2f69d74 --- /dev/null +++ b/deploy/k8s-provisioner/test/local/cluster.yaml @@ -0,0 +1,22 @@ +apiVersion: k8sprovisioner.appstack.io/v1alpha1 +kind: Cluster +metadata: + name: test-cluster-01 + namespace: fleet-default +spec: + infraRef: "dev-environment-v1" # Must match the Infra name above + # 1. Lifecycle + kubernetesVersion: "v1.33.5+rke2r1" # Overrides the template default + # 2. Topology: Control Plane + # false = 1 Node (Using the Standard 4CPU/8GB from _defaults) + # true = 3 Nodes (Using the Standard 4CPU/8GB from _defaults) + controlPlaneHA: false + # 3. Topology: Workers + # These uses the VM settings (Network, User, etc) from Infra, + # but the Hardware Specs defined below. + workerPools: + - name: "app-workers" + quantity: 1 + cpuCores: 4 # Custom Sizing + memoryGb: 16 # Custom Sizing + diskGb: 60 # Custom Sizing \ No newline at end of file diff --git a/deploy/k8s-provisioner/test/local/cluster2.yaml b/deploy/k8s-provisioner/test/local/cluster2.yaml new file mode 100644 index 0000000..ba48c18 --- /dev/null +++ b/deploy/k8s-provisioner/test/local/cluster2.yaml @@ -0,0 +1,22 @@ +apiVersion: k8sprovisioner.appstack.io/v1alpha1 +kind: Cluster +metadata: + name: test-cluster-02 + namespace: fleet-default +spec: + infraRef: "dev-environment-v1" # Must match the Infra name above + # 1. Lifecycle + kubernetesVersion: "v1.32.10+rke2r1" # Overrides the template default + # 2. Topology: Control Plane + # false = 1 Node (Using the Standard 4CPU/8GB from _defaults) + # true = 3 Nodes (Using the Standard 4CPU/8GB from _defaults) + controlPlaneHA: false + # 3. Topology: Workers + # These uses the VM settings (Network, User, etc) from Infra, + # but the Hardware Specs defined below. + workerPools: + - name: "tech-session-workers" + quantity: 1 + cpuCores: 4 # Custom Sizing + memoryGb: 16 # Custom Sizing + diskGb: 60 # Custom Sizing \ No newline at end of file diff --git a/deploy/k8s-provisioner/test/local/infra.yaml b/deploy/k8s-provisioner/test/local/infra.yaml new file mode 100644 index 0000000..d67288c --- /dev/null +++ b/deploy/k8s-provisioner/test/local/infra.yaml @@ -0,0 +1,16 @@ +apiVersion: k8sprovisioner.appstack.io/v1alpha1 +kind: Infra +metadata: + name: dev-environment-v1 + namespace: fleet-default +spec: + # 1. Integration Credentials + cloudCredentialSecret: "cc-mrklm" # Matches your values.yaml example + rancherUrl: "https://rancher-mgmt.product.lan" + harvesterUrl: "https://172.27.27.190:6443" + + # 2. VM Environment Defaults + vmNamespace: "vanderlande" + imageName: "vanderlande/image-qhtpc" # Default image for this environment + networkName: "vanderlande/vm-lan" + sshUser: "rancher" diff --git a/deploy/k8s-provisioner/test/local/local-vsphere-tst.yaml b/deploy/k8s-provisioner/test/local/local-vsphere-tst.yaml new file mode 100644 index 0000000..970db21 --- /dev/null +++ b/deploy/k8s-provisioner/test/local/local-vsphere-tst.yaml @@ -0,0 +1,50 @@ +apiVersion: v1 +kind: Config +clusters: +- name: "local" + cluster: + server: "https://rancher.tst.vanderlande.com/k8s/clusters/local" + certificate-authority-data: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpNSUlFaXpDQ\ + 0EzT2dBd0lCQWdJUUNRN294ZDViK21MU3JpLzNDWHhJVnpBTkJna3Foa2lHOXcwQkFRc0ZBREJoD\ + QpNUXN3Q1FZRFZRUUdFd0pWVXpFVk1CTUdBMVVFQ2hNTVJHbG5hVU5sY25RZ1NXNWpNUmt3RndZR\ + FZRUUxFeEIzDQpkM2N1WkdsbmFXTmxjblF1WTI5dE1TQXdIZ1lEVlFRREV4ZEVhV2RwUTJWeWRDQ\ + khiRzlpWVd3Z1VtOXZkQ0JIDQpNakFlRncweE56RXhNREl4TWpJME1qVmFGdzB5TnpFeE1ESXhNa\ + kkwTWpWYU1GNHhDekFKQmdOVkJBWVRBbFZUDQpNUlV3RXdZRFZRUUtFd3hFYVdkcFEyVnlkQ0JKY\ + m1NeEdUQVhCZ05WQkFzVEVIZDNkeTVrYVdkcFkyVnlkQzVqDQpiMjB4SFRBYkJnTlZCQU1URkZSb\ + 1lYZDBaU0JVVEZNZ1VsTkJJRU5CSUVjeE1JSUJJakFOQmdrcWhraUc5dzBCDQpBUUVGQUFPQ0FRO\ + EFNSUlCQ2dLQ0FRRUF4am5nbVBoVmV0QzBiL296YllKZHpPQlVBMXNNb2c0NzAzMGNBUCtQDQoyM\ + 0FOVU44Z3JYRUNMOE5oREVGNEYxUjl0TDB3WTBtY3pIYVIwYTdsWWFubHh0d1dvMXMydUdubnlEc\ + zZtT0NzDQo2NmV3MnczWUVUcjZUYjE0eGdqcHUxZ0dGdEFlZXdhaWtPOUZ1ZDhoeEdKVFN3bjh4Z\ + U5rZktWV3BEMkw0dkZODQozNkZOZ3hlaWxLNmFFNHlrZ0dBek5sb2tUcDZoTk9MQVlwRHlTZExBU\ + Et6dUpTUTdKQ0VaNk8rU0RLeXdJZFhMDQpvTVRucHh1QktHU0c4OE5XVG8zQ0hDT0dtUUVDaWEye\ + XFkUERqZ0xxbkVpWU5qd1FMOHVNcWo4ck92bE1ndmlCDQpjSEE3eHR5KzcvdVlMTjZaUzdWcTEvR\ + i9sVmhWT2Y1ZWo2alpkbUI4NXN6RmJRSURBUUFCbzRJQlFEQ0NBVHd3DQpIUVlEVlIwT0JCWUVGS\ + 1dNL2pMTTZ3OHMxQm5HQ0xnQUpJaGR3OFczTUI4R0ExVWRJd1FZTUJhQUZFNGlWQ0FZDQpsZWJqY\ + nVZUCt2cTVFdTBHRjQ4NU1BNEdBMVVkRHdFQi93UUVBd0lCaGpBZEJnTlZIU1VFRmpBVUJnZ3JCZ\ + 0VGDQpCUWNEQVFZSUt3WUJCUVVIQXdJd0VnWURWUjBUQVFIL0JBZ3dCZ0VCL3dJQkFEQTBCZ2dyQ\ + mdFRkJRY0JBUVFvDQpNQ1l3SkFZSUt3WUJCUVVITUFHR0dHaDBkSEE2THk5dlkzTndMbVJwWjJsa\ + lpYSjBMbU52YlRCQ0JnTlZIUjhFDQpPekE1TURlZ05hQXpoakZvZEhSd09pOHZZM0pzTXk1a2FXZ\ + HBZMlZ5ZEM1amIyMHZSR2xuYVVObGNuUkhiRzlpDQpZV3hTYjI5MFJ6SXVZM0pzTUQwR0ExVWRJQ\ + VEyTURRd01nWUVWUjBnQURBcU1DZ0dDQ3NHQVFVRkJ3SUJGaHhvDQpkSFJ3Y3pvdkwzZDNkeTVrY\ + VdkcFkyVnlkQzVqYjIwdlExQlRNQTBHQ1NxR1NJYjNEUUVCQ3dVQUE0SUJBUUM2DQprbTBLQTRzV\ + GIyVllwRUJtL3VMMkhML3BaWDlCN0wvaGJKNE5jb0JlN1Y1Nm9DbnQ3YWVJbzhzTWpDUldUQ1daD\ + QpEMWRZMCsyS1pPQzFkS2o4ZDFWWFhBdG5qeXRERHVQUGY2L2lvdzBtWVFUTy9HQWcvTUx5TDZDR\ + G0zRnpEQjhWDQp0c0gvYWVNZ1A2cGdEMVhRcXoraGFEbmZuSlRLQnV4aGNwbngzQWRibGV1ZS9Rb\ + lBmMWhIWWE4TCtSdjhQaTVVDQpoNFY5RndIT2ZwaGRNWE94aTE0T3Ftc2lUYmM1Y09zOS91dWtIK\ + 1lWc3VGZFdUbmE2SVZ3MXFoK3RFdHlIMTZSDQp2bWk3cGtxeVpZVUxPUE1JRTdhdnJsalZWQlp1a\ + Wt3QVJ0WTh0Q1ZWNlBwOWwzVmVhZ0JxYjJmZmdxTkp0M0MwDQpUWU5ZUUkrQlhHMVIxY0FCbG9sZ\ + A0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==" + +users: +- name: "local" + user: + token: "kubeconfig-u-co2u74xluwwnlqq:r55zjkxnfdzzqstkvmx7cqrzvxvb8x4ks6txr27xpp4fkccvl6fxgr" + + +contexts: +- name: "local" + context: + user: "local" + cluster: "local" + +current-context: "local" diff --git a/deploy/k8s-provisioner/test/local/local.yaml b/deploy/k8s-provisioner/test/local/local.yaml new file mode 100644 index 0000000..2e1c639 --- /dev/null +++ b/deploy/k8s-provisioner/test/local/local.yaml @@ -0,0 +1,32 @@ +apiVersion: v1 +kind: Config +clusters: +- name: "local" + cluster: + server: "https://rancher-mgmt.product.lan/k8s/clusters/local" + certificate-authority-data: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJ2VENDQ\ + VdPZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQkdNUnd3R2dZRFZRUUtFeE5rZVc1aGJXbGoKY\ + kdsemRHVnVaWEl0YjNKbk1TWXdKQVlEVlFRRERCMWtlVzVoYldsamJHbHpkR1Z1WlhJdFkyRkFNV\ + GMyTkRFegpPVE0yTVRBZUZ3MHlOVEV4TWpZd05qUXlOREZhRncwek5URXhNalF3TmpReU5ERmFNR\ + Vl4SERBYUJnTlZCQW9UCkUyUjVibUZ0YVdOc2FYTjBaVzVsY2kxdmNtY3hKakFrQmdOVkJBTU1IV\ + 1I1Ym1GdGFXTnNhWE4wWlc1bGNpMWoKWVVBeE56WTBNVE01TXpZeE1Ga3dFd1lIS29aSXpqMENBU\ + VlJS29aSXpqMERBUWNEUWdBRWdWUytveHU0RWpnVQpueCt4VEJUbTVzY0s5dHA4Nk5LS1cvenU2e\ + DZYL0k1L3dTWXVNR1FjWTVKenNNbmlGM1JXQzdSQ2RYYi9yU1d2CkxGb1JwYVgzTEtOQ01FQXdEZ\ + 1lEVlIwUEFRSC9CQVFEQWdLa01BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0hRWUQKVlIwT0JCWUVGT\ + 1VkcnZLWkF6bWxFLzZpVTZjQkM1WlByQTR6TUFvR0NDcUdTTTQ5QkFNQ0EwZ0FNRVVDSVFEYgpNN\ + GQ5THE0TzZsNTNlc1A3bHdsWVVOenpEMzNJSXFCeFc4YTQxeWJkaUFJZ1piK0c1MVdZejFkd1lRc\ + 3lZd1pTClRia0prUk41d2lFTUFsWVZiM3NlTS9rPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t" + +users: +- name: "local" + user: + token: "kubeconfig-user-nlnddg6rkq:q9sfb4486q2v8pvlfvnhcn6s4nb5sbfzkgwvfxdfklmr642hkcb7ng" + + +contexts: +- name: "local" + context: + user: "local" + cluster: "local" + +current-context: "local" diff --git a/deploy/k8s-provisioner/test/utils/utils.go b/deploy/k8s-provisioner/test/utils/utils.go new file mode 100644 index 0000000..495bc7f --- /dev/null +++ b/deploy/k8s-provisioner/test/utils/utils.go @@ -0,0 +1,226 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +import ( + "bufio" + "bytes" + "fmt" + "os" + "os/exec" + "strings" + + . "github.com/onsi/ginkgo/v2" // nolint:revive,staticcheck +) + +const ( + certmanagerVersion = "v1.19.1" + certmanagerURLTmpl = "https://github.com/cert-manager/cert-manager/releases/download/%s/cert-manager.yaml" + + defaultKindBinary = "kind" + defaultKindCluster = "kind" +) + +func warnError(err error) { + _, _ = fmt.Fprintf(GinkgoWriter, "warning: %v\n", err) +} + +// Run executes the provided command within this context +func Run(cmd *exec.Cmd) (string, error) { + dir, _ := GetProjectDir() + cmd.Dir = dir + + if err := os.Chdir(cmd.Dir); err != nil { + _, _ = fmt.Fprintf(GinkgoWriter, "chdir dir: %q\n", err) + } + + cmd.Env = append(os.Environ(), "GO111MODULE=on") + command := strings.Join(cmd.Args, " ") + _, _ = fmt.Fprintf(GinkgoWriter, "running: %q\n", command) + output, err := cmd.CombinedOutput() + if err != nil { + return string(output), fmt.Errorf("%q failed with error %q: %w", command, string(output), err) + } + + return string(output), nil +} + +// UninstallCertManager uninstalls the cert manager +func UninstallCertManager() { + url := fmt.Sprintf(certmanagerURLTmpl, certmanagerVersion) + cmd := exec.Command("kubectl", "delete", "-f", url) + if _, err := Run(cmd); err != nil { + warnError(err) + } + + // Delete leftover leases in kube-system (not cleaned by default) + kubeSystemLeases := []string{ + "cert-manager-cainjector-leader-election", + "cert-manager-controller", + } + for _, lease := range kubeSystemLeases { + cmd = exec.Command("kubectl", "delete", "lease", lease, + "-n", "kube-system", "--ignore-not-found", "--force", "--grace-period=0") + if _, err := Run(cmd); err != nil { + warnError(err) + } + } +} + +// InstallCertManager installs the cert manager bundle. +func InstallCertManager() error { + url := fmt.Sprintf(certmanagerURLTmpl, certmanagerVersion) + cmd := exec.Command("kubectl", "apply", "-f", url) + if _, err := Run(cmd); err != nil { + return err + } + // Wait for cert-manager-webhook to be ready, which can take time if cert-manager + // was re-installed after uninstalling on a cluster. + cmd = exec.Command("kubectl", "wait", "deployment.apps/cert-manager-webhook", + "--for", "condition=Available", + "--namespace", "cert-manager", + "--timeout", "5m", + ) + + _, err := Run(cmd) + return err +} + +// IsCertManagerCRDsInstalled checks if any Cert Manager CRDs are installed +// by verifying the existence of key CRDs related to Cert Manager. +func IsCertManagerCRDsInstalled() bool { + // List of common Cert Manager CRDs + certManagerCRDs := []string{ + "certificates.cert-manager.io", + "issuers.cert-manager.io", + "clusterissuers.cert-manager.io", + "certificaterequests.cert-manager.io", + "orders.acme.cert-manager.io", + "challenges.acme.cert-manager.io", + } + + // Execute the kubectl command to get all CRDs + cmd := exec.Command("kubectl", "get", "crds") + output, err := Run(cmd) + if err != nil { + return false + } + + // Check if any of the Cert Manager CRDs are present + crdList := GetNonEmptyLines(output) + for _, crd := range certManagerCRDs { + for _, line := range crdList { + if strings.Contains(line, crd) { + return true + } + } + } + + return false +} + +// LoadImageToKindClusterWithName loads a local docker image to the kind cluster +func LoadImageToKindClusterWithName(name string) error { + cluster := defaultKindCluster + if v, ok := os.LookupEnv("KIND_CLUSTER"); ok { + cluster = v + } + kindOptions := []string{"load", "docker-image", name, "--name", cluster} + kindBinary := defaultKindBinary + if v, ok := os.LookupEnv("KIND"); ok { + kindBinary = v + } + cmd := exec.Command(kindBinary, kindOptions...) + _, err := Run(cmd) + return err +} + +// GetNonEmptyLines converts given command output string into individual objects +// according to line breakers, and ignores the empty elements in it. +func GetNonEmptyLines(output string) []string { + var res []string + elements := strings.Split(output, "\n") + for _, element := range elements { + if element != "" { + res = append(res, element) + } + } + + return res +} + +// GetProjectDir will return the directory where the project is +func GetProjectDir() (string, error) { + wd, err := os.Getwd() + if err != nil { + return wd, fmt.Errorf("failed to get current working directory: %w", err) + } + wd = strings.ReplaceAll(wd, "/test/e2e", "") + return wd, nil +} + +// UncommentCode searches for target in the file and remove the comment prefix +// of the target content. The target content may span multiple lines. +func UncommentCode(filename, target, prefix string) error { + // false positive + // nolint:gosec + content, err := os.ReadFile(filename) + if err != nil { + return fmt.Errorf("failed to read file %q: %w", filename, err) + } + strContent := string(content) + + idx := strings.Index(strContent, target) + if idx < 0 { + return fmt.Errorf("unable to find the code %q to be uncomment", target) + } + + out := new(bytes.Buffer) + _, err = out.Write(content[:idx]) + if err != nil { + return fmt.Errorf("failed to write to output: %w", err) + } + + scanner := bufio.NewScanner(bytes.NewBufferString(target)) + if !scanner.Scan() { + return nil + } + for { + if _, err = out.WriteString(strings.TrimPrefix(scanner.Text(), prefix)); err != nil { + return fmt.Errorf("failed to write to output: %w", err) + } + // Avoid writing a newline in case the previous line was the last in target. + if !scanner.Scan() { + break + } + if _, err = out.WriteString("\n"); err != nil { + return fmt.Errorf("failed to write to output: %w", err) + } + } + + if _, err = out.Write(content[idx+len(target):]); err != nil { + return fmt.Errorf("failed to write to output: %w", err) + } + + // false positive + // nolint:gosec + if err = os.WriteFile(filename, out.Bytes(), 0644); err != nil { + return fmt.Errorf("failed to write file %q: %w", filename, err) + } + + return nil +} diff --git a/deploy/rancher/README.md b/deploy/rancher/README.md new file mode 100644 index 0000000..7d7d5be --- /dev/null +++ b/deploy/rancher/README.md @@ -0,0 +1,31 @@ +# Rancher deployment overview + +## Prerequisites + +* IP address for LoadBalancer must be from the same subnet as Harvester node IP, i.e. 172.27.27.0/24 for teh LB in current implementation to work. +* Due to environment firewall restrictions _https://get.rke.io_ install source doe not work. All these instances must be replaced with alternative _install.sh_ download location _https://raw.githubusercontent.com/rancher/rke2/refs/heads/master/install.sh_. + + + +## Helm chart deployment + +An example Helm chart in the following [folder](./helm/rke2). +Two value files are prepared for DHCP and static IP allocation: +* [rancher_values_dhcp.yaml](./helm/rancher_values_dhcp.yaml) +* [rancher_values_static.yaml](./helm/rancher_values_static.yaml) + + +## Fleet bundle deployment + +2 Fleet bundles were prepared based upon Helm chart: +* [mgmt-dhcp.yaml](./deploy/fleet/mgmt-dhcp.yaml) +* [mgmt-static.yaml](./deploy/fleet/mgmt-static.yaml) + +## CAPI based deployment + +**Notes:** +* Not updated to R&D environment and tested yet! +* There is some compatibility issues with vcluster v0.30 version and Harvester. + +Harvester add-on based [deployment](./capi/addon.yaml). +Helm CRD based [deployment](./capi/helmchart.yaml). diff --git a/deploy/rancher/capi/addon.yaml b/deploy/rancher/capi/addon.yaml new file mode 100644 index 0000000..555e2b6 --- /dev/null +++ b/deploy/rancher/capi/addon.yaml @@ -0,0 +1,265 @@ +apiVersion: harvesterhci.io/v1beta1 +kind: Addon +metadata: + labels: + addon.harvesterhci.io/experimental: "true" + name: rancher-embedded + namespace: rancher-embedded +spec: + chart: vcluster + version: 0.19.0 + enabled: false + repo: https://charts.loft.sh + valuesContent: |- + vm_network_name: "k8s-network" + ssh_keypair: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPyW9YbYPE3efCdHMBgnP8AeVfs5Lw8MBCLhXuteliil" + vm_image_name: "ubuntu-22.04" + vm_default_user: "ubuntu" + harvester_vip: "172.27.27.40" + rancher_url: "rancher-mgmt.product.lan" + harvester_kubeconfig_b64: "YXBpVmVyc2lvbjogdjEKa2luZDogQ29uZmlnCmNsdXN0ZXJzOgotIG5hbWU6ICJsb2NhbCIKICBjbHVzdGVyOgogICAgc2VydmVyOiAiaHR0cHM6Ly8xNzIuMjcuMjcuMTkwL2s4cy9jbHVzdGVycy9sb2NhbCIKICAgIGNlcnRpZmljYXRlLWF1dGhvcml0eS1kYXRhOiAiTFMwdExTMUNSVWRKVGlCRFJWSlVTVVpKUTBGVVJTMHRMUzB0Q2sxSlNVSjJWRU5EUVwKICAgICAgVmRQWjBGM1NVSkJaMGxDUVVSQlMwSm5aM0ZvYTJwUFVGRlJSRUZxUWtkTlVuZDNSMmRaUkZaUlVVdEZlRTVyWlZjMWFHSlhiR29LWVwKICAgICAga2RzZW1SSFZuVmFXRWwwWWpOS2JrMVRXWGRLUVZsRVZsRlJSRVJDTVd0bFZ6Vm9ZbGRzYW1KSGJIcGtSMVoxV2xoSmRGa3lSa0ZOVlwKICAgICAgR014VDFSVmR3cFBSR2MxVFZSQlpVWjNNSGxPVkVWM1RVUk5lRTVxU1RSTlZFWmhSbmN3ZWs1VVJYZE5SRVY0VG1wSk5FMVVSbUZOUlwKICAgICAgVmw0U0VSQllVSm5UbFpDUVc5VUNrVXlValZpYlVaMFlWZE9jMkZZVGpCYVZ6VnNZMmt4ZG1OdFkzaEtha0ZyUW1kT1ZrSkJUVTFJVlwKICAgICAgMUkxWW0xR2RHRlhUbk5oV0U0d1dsYzFiR05wTVdvS1dWVkJlRTU2VlRWT1ZFRTBUMFJyZUUxR2EzZEZkMWxJUzI5YVNYcHFNRU5CVVwKICAgICAgVmxKUzI5YVNYcHFNRVJCVVdORVVXZEJSVUZWVlU0eFdtUmxURlY2UmdwTWFtSk1Wbk5TT1ZNMGJTdFRTWE5XWlVOa1JVcHVNVGhRYVwKICAgICAgWHBUYm1jMk5rNXhMMWhHVkZaT2RGRnFMMEl3T1hCR01GTXdUVFpMZDJSbmFHUldWM1Y1Q25vMWJFTmlSVzlVVkRaT1EwMUZRWGRFWlwKICAgICAgMWxFVmxJd1VFRlJTQzlDUVZGRVFXZExhMDFCT0VkQk1WVmtSWGRGUWk5M1VVWk5RVTFDUVdZNGQwaFJXVVFLVmxJd1QwSkNXVVZHU1wKICAgICAgRXd2Um5Ga05GRXJaamhpTlhkTFJtSjJUSEpwVTJrMWRtVnpUVUZ2UjBORGNVZFRUVFE1UWtGTlEwRXdaMEZOUlZWRFNVUk5XZ3BVUlwKICAgICAgWFl6VmpjM04zRjZja2RCTDBjNVdVUmxjMlUwVkdaNllWRlhiVmh3UWxWTE9FRm5XWFZJUVdsRlFXeHZaVEpNTVM5RU9VZE1VRGRXU1wKICAgICAgMU13TWxObUNsUnRRbHBxT1d4WVNVeFBSWEJJZDBkR05tSk1WR3hqUFFvdExTMHRMVVZPUkNCRFJWSlVTVVpKUTBGVVJTMHRMUzB0IgoKdXNlcnM6Ci0gbmFtZTogImxvY2FsIgogIHVzZXI6CiAgICB0b2tlbjogImt1YmVjb25maWctdXNlci1remo5OWJubmdmOmd4Nm1kdDVmMjlzZjRsY3R2Zm44Mnp4c3NsOXhydzJtNjg1NDhnOWpsN3psbHR2Nm00dHB6ZiIKCgpjb250ZXh0czoKLSBuYW1lOiAibG9jYWwiCiAgY29udGV4dDoKICAgIHVzZXI6ICJsb2NhbCIKICAgIGNsdXN0ZXI6ICJsb2NhbCIKCmN1cnJlbnQtY29udGV4dDogImxvY2FsIgo=" + + vcluster: + image: rancher/k3s:v1.30.6-k3s1 + sync: + ingresses: + enabled: "true" + + init: + manifestsTemplate: |- + --- + apiVersion: helm.cattle.io/v1 + kind: HelmChart + metadata: + name: cert-manager + spec: + chart: cert-manager + createNamespace: true + version: v1.13.0 + repo: https://charts.jetstack.io + targetNamespace: cert-manager + valuesContent: | + installCRDs: true + --- + apiVersion: helm.cattle.io/v1 + kind: HelmChart + metadata: + name: bootstrap-cluster + spec: + chart: cluster-api-operator + repo: https://kubernetes-sigs.github.io/cluster-api-operator + version: v0.14.0 + valuesContent: | + cert-manager: + enabled: true + bootstrap: rke2 + controlPlane: rke2 + --- + apiVersion: v1 + kind: Namespace + metadata: + name: caphv-system + --- + apiVersion: operator.cluster.x-k8s.io/v1alpha2 + kind: InfrastructureProvider + metadata: + name: harvester + namespace: caphv-system + spec: + version: v0.1.4 + fetchConfig: + url: https://github.com/rancher-sandbox/cluster-api-provider-harvester/releases/download/v0.1.4/components.yaml + --- + apiVersion: cluster.x-k8s.io/v1beta1 + kind: Cluster + metadata: + labels: + ccm: external + cluster.x-k8s.io/cluster-name: rke2-mgmt + cni: external + csi: external + name: rke2-mgmt + namespace: default + spec: + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1alpha1 + kind: RKE2ControlPlane + name: rke2-mgmt-control-plane + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: HarvesterCluster + name: rke2-mgmt-hv + --- + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: HarvesterCluster + metadata: + name: rke2-mgmt-hv + namespace: default + spec: + identitySecret: + name: hv-identity-secret + namespace: default + loadBalancerConfig: + ipamType: dhcp + listeners: + - backendPort: 9345 + name: rke2-server + port: 9345 + protocol: TCP + - backendPort: 443 + name: rke2-ingress + port: 443 + protocol: TCP + server: {{ .Values.harvester_vip }} + targetNamespace: default + --- + apiVersion: v1 + data: + kubeconfig: {{ .Values.harvester_kubeconfig_b64 }} + kind: Secret + metadata: + name: hv-identity-secret + namespace: default + --- + apiVersion: controlplane.cluster.x-k8s.io/v1alpha1 + kind: RKE2ControlPlane + metadata: + name: rke2-mgmt-control-plane + namespace: default + spec: + agentConfig: + version: v1.29.6+rke2r1 + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: HarvesterMachineTemplate + name: rke2-mgmt-cp-machine + namespace: default + replicas: 3 + serverConfig: + cni: canal + --- + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha1 + kind: RKE2ConfigTemplate + metadata: + name: rke2-mgmt-worker + namespace: default + spec: + template: + spec: + agentConfig: + version: v1.29.6+rke2r1 + --- + apiVersion: cluster.x-k8s.io/v1beta1 + kind: MachineDeployment + metadata: + name: rke2-mgmt-workers + namespace: default + spec: + clusterName: rke2-mgmt + replicas: 0 + selector: + matchLabels: + cluster.x-k8s.io/cluster-name: rke2-mgmt + template: + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha1 + kind: RKE2ConfigTemplate + name: rke2-mgmt-worker + namespace: default + clusterName: rke2-mgmt + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: HarvesterMachineTemplate + name: rke2-mgmt-wk-machine + namespace: default + version: v1.29.6+rke2r1 + --- + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: HarvesterMachineTemplate + metadata: + name: rke2-mgmt-wk-machine + namespace: default + spec: + template: + spec: + cpu: 2 + memory: 16Gi + networks: + - {{ .Values.vm_network_name }} + sshKeyPair: default/{{ .Values.ssh_keypair }} + sshUser: {{ .Values.vm_default_user }} + volumes: + - bootOrder: 0 + imageName: default/{{ .Values.vm_image_name }} + volumeSize: 40Gi + volumeType: image + --- + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: HarvesterMachineTemplate + metadata: + name: rke2-mgmt-cp-machine + namespace: default + spec: + template: + spec: + cpu: 2 + memory: 16Gi + networks: + - {{ .Values.vm_network_name }} + sshKeyPair: default/{{ .Values.ssh_keypair }} + sshUser: {{ .Values.vm_default_user }} + volumes: + - bootOrder: 0 + imageName: default/{{ .Values.vm_image_name }} + volumeSize: 40Gi + volumeType: image + --- + apiVersion: addons.cluster.x-k8s.io/v1beta1 + kind: ClusterResourceSet + metadata: + labels: + cluster.x-k8s.io/cluster-name: rke2-mgmt + name: rke2-mgmt-rancher-crs-0 + namespace: default + spec: + clusterSelector: + matchLabels: + cluster.x-k8s.io/cluster-name: rke2-mgmt + resources: + - kind: Secret + name: rancher-namespace + - kind: Secret + name: rancher-helmchart + - kind: Secret + name: certmanager-helmchart + strategy: Reconcile + --- + apiVersion: v1 + kind: Secret + metadata: + name: certmanager-helmchart + namespace: default + stringData: + data: "apiVersion: helm.cattle.io/v1\nkind: HelmChart\nmetadata:\n name: cert-manager\n + \ namespace: default \nspec:\n bootstrap: true\n targetNamespace: cert-manager\n + \ createNamespace: true\n valuesContent: |-\n securityContext:\n runAsNonRoot: + true\n crds:\n enabled: true\n version: v1.16.1\n repo: https://charts.jetstack.io\n + \ chart: cert-manager\n" + type: addons.cluster.x-k8s.io/resource-set + --- + apiVersion: v1 + kind: Secret + metadata: + name: rancher-helmchart + namespace: default + stringData: + data: "apiVersion: helm.cattle.io/v1\nkind: HelmChart\nmetadata:\n name: rancher\n + \ namespace: default \nspec:\n bootstrap: false\n targetNamespace: cattle-system\n + \ createNamespace: true\n set:\n hostname: {{ .Values.rancher_url }}\n + \ replicas: 3\n bootstrapPassword: admin\n valuesContent: |-\n global:\n + \ cattle:\n psp:\n enabled: false\n ingress:\n tls:\n + \ source: rancher\n repo: https://releases.rancher.com/server-charts/stable\n + \ chart: rancher\n version: v2.9.1\n" + type: addons.cluster.x-k8s.io/resource-set diff --git a/deploy/rancher/capi/harvester-addon.yaml b/deploy/rancher/capi/harvester-addon.yaml new file mode 100644 index 0000000..f736c4f --- /dev/null +++ b/deploy/rancher/capi/harvester-addon.yaml @@ -0,0 +1,90 @@ +apiVersion: harvesterhci.io/v1beta1 +kind: Addon +metadata: + labels: + addon.harvesterhci.io/experimental: 'true' + name: temp-vlcuster-fix + namespace: temp-vlcuster-fix +spec: + chart: vcluster + enabled: true + repo: https://charts.loft.sh + valuesContent: |- + serviceCIDR: 10.53.0.0/16 + controlPlane: + distro: + k3s: + resources: + limits: + memory: 16096Mi + cpu: 8000m + enabled: true + imagePullPolicy: IfNotPresent + image: + tag: v1.33.4-k3s1 + repository: rancher/k3s + sync: + toHost: + ingresses: + enabled: true + experimental: + deploy: + vcluster: + manifests: |- + apiVersion: v1 + kind: Namespace + metadata: + name: cattle-system + --- + apiVersion: v1 + kind: Namespace + metadata: + name: cert-manager + labels: + certmanager.k8s.io/disable-validation: "true" + helm: + - chart: + name: cert-manager + repo: https://charts.jetstack.io + version: v1.8.0 + release: + name: cert-manager + namespace: cert-manager + values: |- + installCRDs: true + + - chart: + name: rancher + repo: https://releases.rancher.com/server-charts/latest + version: v2.12.0 + release: + name: rancher + namespace: cattle-system + values: |- + hostname: rancher.product.lan + replicas: 1 + bootstrapPassword: ce6XxaBTv9pHpGln + rancherImage: rancher/rancher + ingress: + tls: + source: rancher + global: + cattle: + psp: + enabled: "false" + extraEnv: + - name: CATTLE_AGENT_IMAGE + value: rancher/rancher-agent:v2.12.0 + version: v0.28.0 +status: + conditions: + - lastUpdateTime: '2025-10-24T13:24:37Z' + status: 'True' + type: Completed + - lastUpdateTime: '2025-10-24T13:24:37Z' + status: 'False' + type: InProgress + - lastUpdateTime: '2025-10-24T13:23:08Z' + status: 'False' + type: OperationFailed + status: AddonDeploySuccessful diff --git a/deploy/rancher/capi/helmchart.yaml b/deploy/rancher/capi/helmchart.yaml new file mode 100644 index 0000000..0072763 --- /dev/null +++ b/deploy/rancher/capi/helmchart.yaml @@ -0,0 +1,294 @@ +apiVersion: helm.cattle.io/v1 +kind: HelmChart +metadata: + name: rancher-embedded +spec: + chart: vcluster + version: 0.30.1 + repo: https://charts.loft.sh + valuesContent: | + # vm_network_name: ${VM_NETWORK} + # ssh_keypair: ${VM_SSH_KEYPAIR} + # vm_image_name: ${VM_IMAGE_NAME} + # vm_default_user: ${VM_DEFAULT_USER} + # harvester_vip: ${HARVESTER_VIP} + # rancher_url: ${RANCHER_URL} + # harvester_kubeconfig_b64: ${HARVESTER_KUBECONFIG_B64} + #external: + + controlPlane: + distro: + k3s: + enabled: true + image: + tag: v1.33.5-k3s1 + statefulSet: + scheduling: + podManagementPolicy: OrderedReady + sync: + fromHost: + ingressClasses: + enabled: true + toHost: + ingresses: + enabled: true + + experimental: + + deploy: + vcluster: + + #vm_network_name: "k8s-network" + #ssh_keypair: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPyW9YbYPE3efCdHMBgnP8AeVfs5Lw8MBCLhXuteliil" + #vm_image_name: "ubuntu-22.04" + #vm_default_user: "ubuntu" + #harvester_vip: "172.27.27.40" + #rancher_url: "rancher-mgmt.product.lan" + #harvester_kubeconfig_b64: "YXBpVmVyc2lvbjogdjEKa2luZDogQ29uZmlnCmNsdXN0ZXJzOgotIG5hbWU6ICJsb2NhbCIKICBjbHVzdGVyOgogICAgc2VydmVyOiAiaHR0cHM6Ly8xNzIuMjcuMjcuMTkwL2s4cy9jbHVzdGVycy9sb2NhbCIKICAgIGNlcnRpZmljYXRlLWF1dGhvcml0eS1kYXRhOiAiTFMwdExTMUNSVWRKVGlCRFJWSlVTVVpKUTBGVVJTMHRMUzB0Q2sxSlNVSjJWRU5EUVwKICAgICAgVmRQWjBGM1NVSkJaMGxDUVVSQlMwSm5aM0ZvYTJwUFVGRlJSRUZxUWtkTlVuZDNSMmRaUkZaUlVVdEZlRTVyWlZjMWFHSlhiR29LWVwKICAgICAga2RzZW1SSFZuVmFXRWwwWWpOS2JrMVRXWGRLUVZsRVZsRlJSRVJDTVd0bFZ6Vm9ZbGRzYW1KSGJIcGtSMVoxV2xoSmRGa3lSa0ZOVlwKICAgICAgR014VDFSVmR3cFBSR2MxVFZSQlpVWjNNSGxPVkVWM1RVUk5lRTVxU1RSTlZFWmhSbmN3ZWs1VVJYZE5SRVY0VG1wSk5FMVVSbUZOUlwKICAgICAgVmw0U0VSQllVSm5UbFpDUVc5VUNrVXlValZpYlVaMFlWZE9jMkZZVGpCYVZ6VnNZMmt4ZG1OdFkzaEtha0ZyUW1kT1ZrSkJUVTFJVlwKICAgICAgMUkxWW0xR2RHRlhUbk5oV0U0d1dsYzFiR05wTVdvS1dWVkJlRTU2VlRWT1ZFRTBUMFJyZUUxR2EzZEZkMWxJUzI5YVNYcHFNRU5CVVwKICAgICAgVmxKUzI5YVNYcHFNRVJCVVdORVVXZEJSVUZWVlU0eFdtUmxURlY2UmdwTWFtSk1Wbk5TT1ZNMGJTdFRTWE5XWlVOa1JVcHVNVGhRYVwKICAgICAgWHBUYm1jMk5rNXhMMWhHVkZaT2RGRnFMMEl3T1hCR01GTXdUVFpMZDJSbmFHUldWM1Y1Q25vMWJFTmlSVzlVVkRaT1EwMUZRWGRFWlwKICAgICAgMWxFVmxJd1VFRlJTQzlDUVZGRVFXZExhMDFCT0VkQk1WVmtSWGRGUWk5M1VVWk5RVTFDUVdZNGQwaFJXVVFLVmxJd1QwSkNXVVZHU1wKICAgICAgRXd2Um5Ga05GRXJaamhpTlhkTFJtSjJUSEpwVTJrMWRtVnpUVUZ2UjBORGNVZFRUVFE1UWtGTlEwRXdaMEZOUlZWRFNVUk5XZ3BVUlwKICAgICAgWFl6VmpjM04zRjZja2RCTDBjNVdVUmxjMlUwVkdaNllWRlhiVmh3UWxWTE9FRm5XWFZJUVdsRlFXeHZaVEpNTVM5RU9VZE1VRGRXU1wKICAgICAgMU13TWxObUNsUnRRbHBxT1d4WVNVeFBSWEJJZDBkR05tSk1WR3hqUFFvdExTMHRMVVZPUkNCRFJWSlVTVVpKUTBGVVJTMHRMUzB0IgoKdXNlcnM6Ci0gbmFtZTogImxvY2FsIgogIHVzZXI6CiAgICB0b2tlbjogImt1YmVjb25maWctdXNlci1remo5OWJubmdmOmd4Nm1kdDVmMjlzZjRsY3R2Zm44Mnp4c3NsOXhydzJtNjg1NDhnOWpsN3psbHR2Nm00dHB6ZiIKCgpjb250ZXh0czoKLSBuYW1lOiAibG9jYWwiCiAgY29udGV4dDoKICAgIHVzZXI6ICJsb2NhbCIKICAgIGNsdXN0ZXI6ICJsb2NhbCIKCmN1cnJlbnQtY29udGV4dDogImxvY2FsIgo=" + + manifestsTemplate: |- + --- + apiVersion: helm.cattle.io/v1 + kind: HelmChart + metadata: + name: cert-manager + spec: + chart: cert-manager + createNamespace: true + version: v1.13.0 + repo: https://charts.jetstack.io + targetNamespace: cert-manager + valuesContent: | + installCRDs: true + --- + apiVersion: helm.cattle.io/v1 + kind: HelmChart + metadata: + name: bootstrap-cluster + spec: + chart: cluster-api-operator + repo: https://kubernetes-sigs.github.io/cluster-api-operator + version: v0.14.0 + valuesContent: | + cert-manager: + enabled: true + bootstrap: rke2 + controlPlane: rke2 + --- + apiVersion: v1 + kind: Namespace + metadata: + name: caphv-system + --- + apiVersion: operator.cluster.x-k8s.io/v1alpha2 + kind: InfrastructureProvider + metadata: + name: harvester + namespace: caphv-system + spec: + version: v0.1.4 + fetchConfig: + url: https://github.com/rancher-sandbox/cluster-api-provider-harvester/releases/download/v0.1.4/components.yaml + --- + apiVersion: cluster.x-k8s.io/v1beta1 + kind: Cluster + metadata: + labels: + ccm: external + cluster.x-k8s.io/cluster-name: rke2-mgmt + cni: external + csi: external + name: rke2-mgmt + namespace: default + spec: + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1alpha1 + kind: RKE2ControlPlane + name: rke2-mgmt-control-plane + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: HarvesterCluster + name: rke2-mgmt-hv + --- + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: HarvesterCluster + metadata: + name: rke2-mgmt-hv + namespace: default + spec: + identitySecret: + name: hv-identity-secret + namespace: default + loadBalancerConfig: + ipamType: dhcp + listeners: + - backendPort: 9345 + name: rke2-server + port: 9345 + protocol: TCP + - backendPort: 443 + name: rke2-ingress + port: 443 + protocol: TCP + #server: {{ .Values.experimental.deploy.vcluster.harvester_vip }} + server: 172.27.27.40 + targetNamespace: default + --- + apiVersion: v1 + data: + #kubeconfig: {{ .Values.experimental.deploy.vcluster.harvester_kubeconfig_b64 }} + kubeconfig: "YXBpVmVyc2lvbjogdjEKa2luZDogQ29uZmlnCmNsdXN0ZXJzOgotIG5hbWU6ICJsb2NhbCIKICBjbHVzdGVyOgogICAgc2VydmVyOiAiaHR0cHM6Ly8xNzIuMjcuMjcuMTkwL2s4cy9jbHVzdGVycy9sb2NhbCIKICAgIGNlcnRpZmljYXRlLWF1dGhvcml0eS1kYXRhOiAiTFMwdExTMUNSVWRKVGlCRFJWSlVTVVpKUTBGVVJTMHRMUzB0Q2sxSlNVSjJWRU5EUVwKICAgICAgVmRQWjBGM1NVSkJaMGxDUVVSQlMwSm5aM0ZvYTJwUFVGRlJSRUZxUWtkTlVuZDNSMmRaUkZaUlVVdEZlRTVyWlZjMWFHSlhiR29LWVwKICAgICAga2RzZW1SSFZuVmFXRWwwWWpOS2JrMVRXWGRLUVZsRVZsRlJSRVJDTVd0bFZ6Vm9ZbGRzYW1KSGJIcGtSMVoxV2xoSmRGa3lSa0ZOVlwKICAgICAgR014VDFSVmR3cFBSR2MxVFZSQlpVWjNNSGxPVkVWM1RVUk5lRTVxU1RSTlZFWmhSbmN3ZWs1VVJYZE5SRVY0VG1wSk5FMVVSbUZOUlwKICAgICAgVmw0U0VSQllVSm5UbFpDUVc5VUNrVXlValZpYlVaMFlWZE9jMkZZVGpCYVZ6VnNZMmt4ZG1OdFkzaEtha0ZyUW1kT1ZrSkJUVTFJVlwKICAgICAgMUkxWW0xR2RHRlhUbk5oV0U0d1dsYzFiR05wTVdvS1dWVkJlRTU2VlRWT1ZFRTBUMFJyZUUxR2EzZEZkMWxJUzI5YVNYcHFNRU5CVVwKICAgICAgVmxKUzI5YVNYcHFNRVJCVVdORVVXZEJSVUZWVlU0eFdtUmxURlY2UmdwTWFtSk1Wbk5TT1ZNMGJTdFRTWE5XWlVOa1JVcHVNVGhRYVwKICAgICAgWHBUYm1jMk5rNXhMMWhHVkZaT2RGRnFMMEl3T1hCR01GTXdUVFpMZDJSbmFHUldWM1Y1Q25vMWJFTmlSVzlVVkRaT1EwMUZRWGRFWlwKICAgICAgMWxFVmxJd1VFRlJTQzlDUVZGRVFXZExhMDFCT0VkQk1WVmtSWGRGUWk5M1VVWk5RVTFDUVdZNGQwaFJXVVFLVmxJd1QwSkNXVVZHU1wKICAgICAgRXd2Um5Ga05GRXJaamhpTlhkTFJtSjJUSEpwVTJrMWRtVnpUVUZ2UjBORGNVZFRUVFE1UWtGTlEwRXdaMEZOUlZWRFNVUk5XZ3BVUlwKICAgICAgWFl6VmpjM04zRjZja2RCTDBjNVdVUmxjMlUwVkdaNllWRlhiVmh3UWxWTE9FRm5XWFZJUVdsRlFXeHZaVEpNTVM5RU9VZE1VRGRXU1wKICAgICAgMU13TWxObUNsUnRRbHBxT1d4WVNVeFBSWEJJZDBkR05tSk1WR3hqUFFvdExTMHRMVVZPUkNCRFJWSlVTVVpKUTBGVVJTMHRMUzB0IgoKdXNlcnM6Ci0gbmFtZTogImxvY2FsIgogIHVzZXI6CiAgICB0b2tlbjogImt1YmVjb25maWctdXNlci1remo5OWJubmdmOmd4Nm1kdDVmMjlzZjRsY3R2Zm44Mnp4c3NsOXhydzJtNjg1NDhnOWpsN3psbHR2Nm00dHB6ZiIKCgpjb250ZXh0czoKLSBuYW1lOiAibG9jYWwiCiAgY29udGV4dDoKICAgIHVzZXI6ICJsb2NhbCIKICAgIGNsdXN0ZXI6ICJsb2NhbCIKCmN1cnJlbnQtY29udGV4dDogImxvY2FsIgo=" + kind: Secret + metadata: + name: hv-identity-secret + namespace: default + --- + apiVersion: controlplane.cluster.x-k8s.io/v1alpha1 + kind: RKE2ControlPlane + metadata: + name: rke2-mgmt-control-plane + namespace: default + spec: + agentConfig: + version: v1.33.5+rke2r1 + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: HarvesterMachineTemplate + name: rke2-mgmt-cp-machine + namespace: default + replicas: 3 + serverConfig: + cni: canal + --- + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha1 + kind: RKE2ConfigTemplate + metadata: + name: rke2-mgmt-worker + namespace: default + spec: + template: + spec: + agentConfig: + version: v1.33.5+rke2r1 + --- + apiVersion: cluster.x-k8s.io/v1beta1 + kind: MachineDeployment + metadata: + name: rke2-mgmt-workers + namespace: default + spec: + clusterName: rke2-mgmt + replicas: 0 + selector: + matchLabels: + cluster.x-k8s.io/cluster-name: rke2-mgmt + template: + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha1 + kind: RKE2ConfigTemplate + name: rke2-mgmt-worker + namespace: default + clusterName: rke2-mgmt + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: HarvesterMachineTemplate + name: rke2-mgmt-wk-machine + namespace: default + version: v1.29.6+rke2r1 + --- + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: HarvesterMachineTemplate + metadata: + name: rke2-mgmt-wk-machine + namespace: default + spec: + template: + spec: + cpu: 2 + memory: 16Gi + networks: + #- {{ .Values.experimental.deploy.vcluster.vm_network_name }} + - k8s-network + #sshKeyPair: default/{{ .Values.experimental.deploy.vcluster.ssh_keypair }} + sshKeyPair: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPyW9YbYPE3efCdHMBgnP8AeVfs5Lw8MBCLhXuteliil" + #sshUser: {{ .Values.experimental.deploy.vcluster.vm_default_user }} + sshUser: ubuntu + volumes: + - bootOrder: 0 + imageName: default/{{ .Values.experimental.deploy.vcluster.vm_image_name }} + volumeSize: 40Gi + volumeType: image + --- + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: HarvesterMachineTemplate + metadata: + name: rke2-mgmt-cp-machine + namespace: default + spec: + template: + spec: + cpu: 2 + memory: 16Gi + networks: + #- {{ .Values.experimental.deploy.vcluster.vm_network_name }} + - k8s-network + #sshKeyPair: default/{{ .Values.experimental.deploy.vcluster.ssh_keypair }} + sshKeyPair: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPyW9YbYPE3efCdHMBgnP8AeVfs5Lw8MBCLhXuteliil" + #sshUser: {{ .Values.experimental.deploy.vcluster.vm_default_user }} + sshUser: ubuntu + volumes: + - bootOrder: 0 + imageName: default/{{ .Values.experimental.deploy.vcluster.vm_image_name }} + volumeSize: 40Gi + volumeType: image + --- + apiVersion: addons.cluster.x-k8s.io/v1beta1 + kind: ClusterResourceSet + metadata: + labels: + cluster.x-k8s.io/cluster-name: rke2-mgmt + name: rke2-mgmt-rancher-crs-0 + namespace: default + spec: + clusterSelector: + matchLabels: + cluster.x-k8s.io/cluster-name: rke2-mgmt + resources: + - kind: Secret + name: rancher-namespace + - kind: Secret + name: rancher-helmchart + - kind: Secret + name: certmanager-helmchart + strategy: Reconcile + --- + apiVersion: v1 + kind: Secret + metadata: + name: certmanager-helmchart + namespace: default + stringData: + data: "apiVersion: helm.cattle.io/v1\nkind: HelmChart\nmetadata:\n name: cert-manager\n + \ namespace: default \nspec:\n bootstrap: true\n targetNamespace: cert-manager\n + \ createNamespace: true\n valuesContent: |-\n securityContext:\n runAsNonRoot: + true\n crds:\n enabled: true\n version: v1.16.1\n repo: https://charts.jetstack.io\n + \ chart: cert-manager\n" + type: addons.cluster.x-k8s.io/resource-set + --- + apiVersion: v1 + kind: Secret + metadata: + name: rancher-helmchart + namespace: default + stringData: + data: "apiVersion: helm.cattle.io/v1\nkind: HelmChart\nmetadata:\n name: rancher\n + \ namespace: default \nspec:\n bootstrap: false\n targetNamespace: cattle-system\n + \ createNamespace: true\n set:\n #hostname: {{ .Values.experimental.deploy.vcluster.rancher_url }}\n + \ hostname: rancher-mgmt.product.lan\n + \ replicas: 3\n bootstrapPassword: admin\n valuesContent: |-\n global:\n + \ cattle:\n psp:\n enabled: false\n ingress:\n tls:\n + \ source: rancher\n repo: https://releases.rancher.com/server-charts/latest\n + \ chart: rancher\n version: v2.12.3\n" + type: addons.cluster.x-k8s.io/resource-set diff --git a/deploy/rancher/capi/vcluster-v019.yaml b/deploy/rancher/capi/vcluster-v019.yaml new file mode 100644 index 0000000..c99adff --- /dev/null +++ b/deploy/rancher/capi/vcluster-v019.yaml @@ -0,0 +1,255 @@ +#vm_network_name: "k8s-network" +#ssh_keypair: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPyW9YbYPE3efCdHMBgnP8AeVfs5Lw8MBCLhXuteliil" +#vm_image_name: "ubuntu-22.04" +#vm_default_user: "ubuntu" +#harvester_vip: "172.27.27.40" +#rancher_url: "rancher-mgmt.product.lan" +#harvester_kubeconfig_b64: "YXBpVmVyc2lvbjogdjEKa2luZDogQ29uZmlnCmNsdXN0ZXJzOgotIG5hbWU6ICJsb2NhbCIKICBjbHVzdGVyOgogICAgc2VydmVyOiAiaHR0cHM6Ly8xNzIuMjcuMjcuMTkwL2s4cy9jbHVzdGVycy9sb2NhbCIKICAgIGNlcnRpZmljYXRlLWF1dGhvcml0eS1kYXRhOiAiTFMwdExTMUNSVWRKVGlCRFJWSlVTVVpKUTBGVVJTMHRMUzB0Q2sxSlNVSjJWRU5EUVwKICAgICAgVmRQWjBGM1NVSkJaMGxDUVVSQlMwSm5aM0ZvYTJwUFVGRlJSRUZxUWtkTlVuZDNSMmRaUkZaUlVVdEZlRTVyWlZjMWFHSlhiR29LWVwKICAgICAga2RzZW1SSFZuVmFXRWwwWWpOS2JrMVRXWGRLUVZsRVZsRlJSRVJDTVd0bFZ6Vm9ZbGRzYW1KSGJIcGtSMVoxV2xoSmRGa3lSa0ZOVlwKICAgICAgR014VDFSVmR3cFBSR2MxVFZSQlpVWjNNSGxPVkVWM1RVUk5lRTVxU1RSTlZFWmhSbmN3ZWs1VVJYZE5SRVY0VG1wSk5FMVVSbUZOUlwKICAgICAgVmw0U0VSQllVSm5UbFpDUVc5VUNrVXlValZpYlVaMFlWZE9jMkZZVGpCYVZ6VnNZMmt4ZG1OdFkzaEtha0ZyUW1kT1ZrSkJUVTFJVlwKICAgICAgMUkxWW0xR2RHRlhUbk5oV0U0d1dsYzFiR05wTVdvS1dWVkJlRTU2VlRWT1ZFRTBUMFJyZUUxR2EzZEZkMWxJUzI5YVNYcHFNRU5CVVwKICAgICAgVmxKUzI5YVNYcHFNRVJCVVdORVVXZEJSVUZWVlU0eFdtUmxURlY2UmdwTWFtSk1Wbk5TT1ZNMGJTdFRTWE5XWlVOa1JVcHVNVGhRYVwKICAgICAgWHBUYm1jMk5rNXhMMWhHVkZaT2RGRnFMMEl3T1hCR01GTXdUVFpMZDJSbmFHUldWM1Y1Q25vMWJFTmlSVzlVVkRaT1EwMUZRWGRFWlwKICAgICAgMWxFVmxJd1VFRlJTQzlDUVZGRVFXZExhMDFCT0VkQk1WVmtSWGRGUWk5M1VVWk5RVTFDUVdZNGQwaFJXVVFLVmxJd1QwSkNXVVZHU1wKICAgICAgRXd2Um5Ga05GRXJaamhpTlhkTFJtSjJUSEpwVTJrMWRtVnpUVUZ2UjBORGNVZFRUVFE1UWtGTlEwRXdaMEZOUlZWRFNVUk5XZ3BVUlwKICAgICAgWFl6VmpjM04zRjZja2RCTDBjNVdVUmxjMlUwVkdaNllWRlhiVmh3UWxWTE9FRm5XWFZJUVdsRlFXeHZaVEpNTVM5RU9VZE1VRGRXU1wKICAgICAgMU13TWxObUNsUnRRbHBxT1d4WVNVeFBSWEJJZDBkR05tSk1WR3hqUFFvdExTMHRMVVZPUkNCRFJWSlVTVVpKUTBGVVJTMHRMUzB0IgoKdXNlcnM6Ci0gbmFtZTogImxvY2FsIgogIHVzZXI6CiAgICB0b2tlbjogImt1YmVjb25maWctdXNlci1remo5OWJubmdmOmd4Nm1kdDVmMjlzZjRsY3R2Zm44Mnp4c3NsOXhydzJtNjg1NDhnOWpsN3psbHR2Nm00dHB6ZiIKCgpjb250ZXh0czoKLSBuYW1lOiAibG9jYWwiCiAgY29udGV4dDoKICAgIHVzZXI6ICJsb2NhbCIKICAgIGNsdXN0ZXI6ICJsb2NhbCIKCmN1cnJlbnQtY29udGV4dDogImxvY2FsIgo=" + + +vcluster: + image: rancher/k3s:v1.33.5-k3s1 + + +sync: + ingresses: + enabled: true + +init: + manifestsTemplate: |- + --- + apiVersion: helm.cattle.io/v1 + kind: HelmChart + metadata: + name: cert-manager + spec: + chart: cert-manager + createNamespace: true + version: v1.13.0 + repo: https://charts.jetstack.io + targetNamespace: cert-manager + valuesContent: | + installCRDs: true + --- + apiVersion: helm.cattle.io/v1 + kind: HelmChart + metadata: + name: bootstrap-cluster + spec: + chart: cluster-api-operator + repo: https://kubernetes-sigs.github.io/cluster-api-operator + version: v0.14.0 + valuesContent: | + cert-manager: + enabled: true + bootstrap: rke2 + controlPlane: rke2 + --- + apiVersion: v1 + kind: Namespace + metadata: + name: caphv-system + --- + apiVersion: operator.cluster.x-k8s.io/v1alpha2 + kind: InfrastructureProvider + metadata: + name: harvester + namespace: caphv-system + spec: + version: v0.1.4 + fetchConfig: + url: https://github.com/rancher-sandbox/cluster-api-provider-harvester/releases/download/v0.1.4/components.yaml + --- + apiVersion: cluster.x-k8s.io/v1beta1 + kind: Cluster + metadata: + labels: + ccm: external + cluster.x-k8s.io/cluster-name: rke2-mgmt + cni: external + csi: external + name: rke2-mgmt + namespace: default + spec: + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1alpha1 + kind: RKE2ControlPlane + name: rke2-mgmt-control-plane + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: HarvesterCluster + name: rke2-mgmt-hv + --- + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: HarvesterCluster + metadata: + name: rke2-mgmt-hv + namespace: default + spec: + identitySecret: + name: hv-identity-secret + namespace: default + loadBalancerConfig: + ipamType: dhcp + listeners: + - backendPort: 9345 + name: rke2-server + port: 9345 + protocol: TCP + - backendPort: 443 + name: rke2-ingress + port: 443 + protocol: TCP + server: {{ .Values.harvester_vip }} + targetNamespace: default + --- + apiVersion: v1 + data: + kubeconfig: {{ .Values.harvester_kubeconfig_b64 }} + kind: Secret + metadata: + name: hv-identity-secret + namespace: default + --- + apiVersion: controlplane.cluster.x-k8s.io/v1alpha1 + kind: RKE2ControlPlane + metadata: + name: rke2-mgmt-control-plane + namespace: default + spec: + agentConfig: + version: v1.33.5+rke2r1 + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: HarvesterMachineTemplate + name: rke2-mgmt-cp-machine + namespace: default + replicas: 3 + serverConfig: + cni: canal + --- + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha1 + kind: RKE2ConfigTemplate + metadata: + name: rke2-mgmt-worker + namespace: default + spec: + template: + spec: + agentConfig: + version: v1.33.5+rke2r1 + --- + apiVersion: cluster.x-k8s.io/v1beta1 + kind: MachineDeployment + metadata: + name: rke2-mgmt-workers + namespace: default + spec: + clusterName: rke2-mgmt + replicas: 0 + selector: + matchLabels: + cluster.x-k8s.io/cluster-name: rke2-mgmt + template: + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha1 + kind: RKE2ConfigTemplate + name: rke2-mgmt-worker + namespace: default + clusterName: rke2-mgmt + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: HarvesterMachineTemplate + name: rke2-mgmt-wk-machine + namespace: default + version: v1.29.6+rke2r1 + --- + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: HarvesterMachineTemplate + metadata: + name: rke2-mgmt-wk-machine + namespace: default + spec: + template: + spec: + cpu: 2 + memory: 16Gi + networks: + - {{ .Values.vm_network_name }} + sshKeyPair: default/{{ .Values.ssh_keypair }} + sshUser: {{ .Values.vm_default_user }} + volumes: + - bootOrder: 0 + imageName: default/{{ .Values.vm_image_name }} + volumeSize: 40Gi + volumeType: image + --- + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: HarvesterMachineTemplate + metadata: + name: rke2-mgmt-cp-machine + namespace: default + spec: + template: + spec: + cpu: 2 + memory: 16Gi + networks: + - {{ .Values.vm_network_name }} + sshKeyPair: default/{{ .Values.ssh_keypair }} + sshUser: {{ .Values.vm_default_user }} + volumes: + - bootOrder: 0 + imageName: default/{{ .Values.vm_image_name }} + volumeSize: 40Gi + volumeType: image + --- + apiVersion: addons.cluster.x-k8s.io/v1beta1 + kind: ClusterResourceSet + metadata: + labels: + cluster.x-k8s.io/cluster-name: rke2-mgmt + name: rke2-mgmt-rancher-crs-0 + namespace: default + spec: + clusterSelector: + matchLabels: + cluster.x-k8s.io/cluster-name: rke2-mgmt + resources: + - kind: Secret + name: rancher-namespace + - kind: Secret + name: rancher-helmchart + - kind: Secret + name: certmanager-helmchart + strategy: Reconcile + --- + apiVersion: v1 + kind: Secret + metadata: + name: certmanager-helmchart + namespace: default + stringData: + data: "apiVersion: helm.cattle.io/v1\nkind: HelmChart\nmetadata:\n name: cert-manager\n + \ namespace: default \nspec:\n bootstrap: true\n targetNamespace: cert-manager\n + \ createNamespace: true\n valuesContent: |-\n securityContext:\n runAsNonRoot: + true\n crds:\n enabled: true\n version: v1.16.1\n repo: https://charts.jetstack.io\n + \ chart: cert-manager\n" + type: addons.cluster.x-k8s.io/resource-set + --- + apiVersion: v1 + kind: Secret + metadata: + name: rancher-helmchart + namespace: default + stringData: + data: "apiVersion: helm.cattle.io/v1\nkind: HelmChart\nmetadata:\n name: rancher\n + \ namespace: default \nspec:\n bootstrap: false\n targetNamespace: cattle-system\n + \ createNamespace: true\n set:\n hostname: {{ .Values.rancher_url }}\n + \ replicas: 3\n bootstrapPassword: admin\n valuesContent: |-\n global:\n + \ cattle:\n psp:\n enabled: false\n ingress:\n tls:\n + \ source: rancher\n repo: https://releases.rancher.com/server-charts/latest\n + \ chart: rancher\n version: v2.12.3\n" + type: addons.cluster.x-k8s.io/resource-set diff --git a/deploy/rancher/capi/vcluster.yaml b/deploy/rancher/capi/vcluster.yaml new file mode 100644 index 0000000..93755df --- /dev/null +++ b/deploy/rancher/capi/vcluster.yaml @@ -0,0 +1,256 @@ +controlPlane: + distro: + k3s: + enabled: true + image: + tag: v1.33.5-k3s1 + statefulSet: + scheduling: + podManagementPolicy: OrderedReady +experimental: + deploy: + vcluster: + manifestsTemplate: |- + --- + apiVersion: helm.cattle.io/v1 + kind: HelmChart + metadata: + name: cert-manager + spec: + chart: cert-manager + createNamespace: true + version: v1.13.0 + repo: https://charts.jetstack.io + targetNamespace: cert-manager + valuesContent: | + installCRDs: true + --- + apiVersion: helm.cattle.io/v1 + kind: HelmChart + metadata: + name: bootstrap-cluster + spec: + chart: cluster-api-operator + repo: https://kubernetes-sigs.github.io/cluster-api-operator + version: v0.14.0 + valuesContent: | + cert-manager: + enabled: true + bootstrap: rke2 + controlPlane: rke2 + --- + apiVersion: v1 + kind: Namespace + metadata: + name: caphv-system + --- + apiVersion: operator.cluster.x-k8s.io/v1alpha2 + kind: InfrastructureProvider + metadata: + name: harvester + namespace: caphv-system + spec: + version: v0.1.4 + fetchConfig: + url: https://github.com/rancher-sandbox/cluster-api-provider-harvester/releases/download/v0.1.4/components.yaml + --- + apiVersion: cluster.x-k8s.io/v1beta1 + kind: Cluster + metadata: + labels: + ccm: external + cluster.x-k8s.io/cluster-name: rke2-mgmt + cni: external + csi: external + name: rke2-mgmt + namespace: default + spec: + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1alpha1 + kind: RKE2ControlPlane + name: rke2-mgmt-control-plane + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: HarvesterCluster + name: rke2-mgmt-hv + --- + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: HarvesterCluster + metadata: + name: rke2-mgmt-hv + namespace: default + spec: + identitySecret: + name: hv-identity-secret + namespace: default + loadBalancerConfig: + ipamType: dhcp + listeners: + - backendPort: 9345 + name: rke2-server + port: 9345 + protocol: TCP + - backendPort: 443 + name: rke2-ingress + port: 443 + protocol: TCP + server: {{ .Values.harvester_vip }} + targetNamespace: default + --- + apiVersion: v1 + data: + kubeconfig: {{ .Values.harvester_kubeconfig_b64 }} + kind: Secret + metadata: + name: hv-identity-secret + namespace: default + --- + apiVersion: controlplane.cluster.x-k8s.io/v1alpha1 + kind: RKE2ControlPlane + metadata: + name: rke2-mgmt-control-plane + namespace: default + spec: + agentConfig: + version: v1.33.5+rke2r1 + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: HarvesterMachineTemplate + name: rke2-mgmt-cp-machine + namespace: default + replicas: 3 + serverConfig: + cni: canal + --- + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha1 + kind: RKE2ConfigTemplate + metadata: + name: rke2-mgmt-worker + namespace: default + spec: + template: + spec: + agentConfig: + version: v1.33.5+rke2r1 + --- + apiVersion: cluster.x-k8s.io/v1beta1 + kind: MachineDeployment + metadata: + name: rke2-mgmt-workers + namespace: default + spec: + clusterName: rke2-mgmt + replicas: 0 + selector: + matchLabels: + cluster.x-k8s.io/cluster-name: rke2-mgmt + template: + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha1 + kind: RKE2ConfigTemplate + name: rke2-mgmt-worker + namespace: default + clusterName: rke2-mgmt + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: HarvesterMachineTemplate + name: rke2-mgmt-wk-machine + namespace: default + version: v1.29.6+rke2r1 + --- + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: HarvesterMachineTemplate + metadata: + name: rke2-mgmt-wk-machine + namespace: default + spec: + template: + spec: + cpu: 2 + memory: 16Gi + networks: + - {{ .Values.vm_network_name }} + sshKeyPair: default/{{ .Values.ssh_keypair }} + sshUser: {{ .Values.vm_default_user }} + volumes: + - bootOrder: 0 + imageName: default/{{ .Values.vm_image_name }} + volumeSize: 40Gi + volumeType: image + --- + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: HarvesterMachineTemplate + metadata: + name: rke2-mgmt-cp-machine + namespace: default + spec: + template: + spec: + cpu: 2 + memory: 16Gi + networks: + - {{ .Values.vm_network_name }} + sshKeyPair: default/{{ .Values.ssh_keypair }} + sshUser: {{ .Values.vm_default_user }} + volumes: + - bootOrder: 0 + imageName: default/{{ .Values.vm_image_name }} + volumeSize: 40Gi + volumeType: image + --- + apiVersion: addons.cluster.x-k8s.io/v1beta1 + kind: ClusterResourceSet + metadata: + labels: + cluster.x-k8s.io/cluster-name: rke2-mgmt + name: rke2-mgmt-rancher-crs-0 + namespace: default + spec: + clusterSelector: + matchLabels: + cluster.x-k8s.io/cluster-name: rke2-mgmt + resources: + - kind: Secret + name: rancher-namespace + - kind: Secret + name: rancher-helmchart + - kind: Secret + name: certmanager-helmchart + strategy: Reconcile + --- + apiVersion: v1 + kind: Secret + metadata: + name: certmanager-helmchart + namespace: default + stringData: + data: "apiVersion: helm.cattle.io/v1\nkind: HelmChart\nmetadata:\n name: cert-manager\n + \ namespace: default \nspec:\n bootstrap: true\n targetNamespace: cert-manager\n + \ createNamespace: true\n valuesContent: |-\n securityContext:\n runAsNonRoot: + true\n crds:\n enabled: true\n version: v1.16.1\n repo: https://charts.jetstack.io\n + \ chart: cert-manager\n" + type: addons.cluster.x-k8s.io/resource-set + --- + apiVersion: v1 + kind: Secret + metadata: + name: rancher-helmchart + namespace: default + stringData: + data: "apiVersion: helm.cattle.io/v1\nkind: HelmChart\nmetadata:\n name: rancher\n + \ namespace: default \nspec:\n bootstrap: false\n targetNamespace: cattle-system\n + \ createNamespace: true\n set:\n hostname: {{ .Values.rancher_url }}\n + \ replicas: 3\n bootstrapPassword: admin\n valuesContent: |-\n global:\n + \ cattle:\n psp:\n enabled: false\n ingress:\n tls:\n + \ source: rancher\n repo: https://releases.rancher.com/server-charts/latest\n + \ chart: rancher\n version: v2.12.3\n" + type: addons.cluster.x-k8s.io/resource-set +sync: + fromHost: + ingressClasses: + enabled: true + toHost: + ingresses: + enabled: true diff --git a/deploy/rancher/fleet/mgmt-dhcp.yaml b/deploy/rancher/fleet/mgmt-dhcp.yaml new file mode 100644 index 0000000..49da4a8 --- /dev/null +++ b/deploy/rancher/fleet/mgmt-dhcp.yaml @@ -0,0 +1,475 @@ +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: mgmt-cluster + namespace: fleet-local +spec: + helm: + chart: ./rke2 + # releaseName: rke2-mgmt + values: + cluster_name: rke2-mgmt + control_plane: + cpu_count: 8 + files: + - content: | + apiVersion: helm.cattle.io/v1 + kind: HelmChart + metadata: + name: cert-manager + namespace: default + spec: + bootstrap: true + targetNamespace: cert-manager + createNamespace: true + valuesContent: |- + securityContext: + runAsNonRoot: true + crds: + enabled: true + version: v1.16.1 + repo: https://charts.jetstack.io + chart: cert-manager + owner: root + path: /var/lib/rancher/rke2/server/manifests/certmanager.yaml + - content: | + apiVersion: helm.cattle.io/v1 + kind: HelmChart + metadata: + name: rancher + namespace: default + spec: + bootstrap: false + targetNamespace: cattle-system + createNamespace: true + set: + hostname: rancher-mgmt.product.lan + replicas: 3 + bootstrapPassword: admin + valuesContent: |- + global: + cattle: + psp: + enabled: false + ingress: + tls: + source: rancher + repo: https://releases.rancher.com/server-charts/stable + chart: rancher + version: v2.12.3 + owner: root + path: /var/lib/rancher/rke2/server/manifests/rancher.yaml + ipam: dhcp + loadbalancer_gateway: 172.27.27.1 + loadbalancer_subnet: 172.27.27.0/24 + memory_gb: 16 + network: + - | + network: + version: 2 + renderer: networkd + ethernets: + enp1s0: + dhcp4: yes + - | + network: + version: 2 + renderer: networkd + ethernets: + enp1s0: + dhcp4: yes + - | + network: + version: 2 + renderer: networkd + ethernets: + enp1s0: + dhcp4: yes + node_count: 3 + vip: 172.27.27.40 + network_name: k8s-network + registry_config: + configs: + rgcrprod.azurecr.us: + auth: + password: test + username: test + rke2_version: v1.33.4+rke2r1 + ssh_pub_key: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPyW9YbYPE3efCdHMBgnP8AeVfs5Lw8MBCLhXuteliil + system_default_registry: "" + vm: + airgapped_image: false + image: ubuntu-22.04 + qemu_agent_enable: true + qemu_agent_install: true + worker: + node_count: 0 + storage: + class: longhorn-image-99dd5 # StorageClass for image ubuntu-22.04 + valuesFiles: + - values.yaml + + resources: + - content: |2- + helm: + chart: ./rke2 + releaseName: rke2-mgmt + valuesFiles: + - values.yaml + name: fleet.yaml + - content: |- + apiVersion: v2 + name: rke2-cluster + description: RKE2 cluster designed for usage directly on Harvester + + type: application + version: 0.1.1 + appVersion: 0.1.1 + name: rke2/Chart.yaml + - content: "{{- range $i := until (.Values.control_plane.node_count | int) }}\n---\napiVersion: + v1\nkind: Secret\nmetadata:\n name: {{ $.Values.cluster_name }}-cp-{{ $i }}-cloudinit\n + \ namespace: {{ $.Values.cluster_namespace }}\nstringData:\n userdata: |\n + \ #cloud-config\n {{- if $.Values.vm.qemu_agent_install }}\n package_update: + true\n packages:\n - qemu-guest-agent\n {{- end }}\n write_files: + \n {{- if $.Values.control_plane.files }}\n{{ $.Values.control_plane.files + | toYaml | indent 4 }}\n {{- end }}\n - path: /etc/rancher/rke2/config.yaml\n + \ owner: root\n content: |\n token: {{ $.Values.shared_token + }}\n {{- if ne $i 0 }}\n server: https://{{ $.Values.control_plane.vip + }}:9345\n {{- end }}\n system-default-registry: {{ $.Values.system_default_registry + }}\n tls-san:\n - {{ $.Values.cluster_name }}-cp-{{ $i }}\n + \ - {{ $.Values.control_plane.vip }}\n secrets-encryption: true\n + \ write-kubeconfig-mode: 0640\n use-service-account-credentials: + true\n {{- if hasKey $.Values \"registry_config\" }}\n - path: /etc/rancher/rke2/registries.yaml\n + \ owner: root\n content: |-\n{{ $.Values.registry_config | toYaml | + indent 8 }}\n {{- end }}\n - path: /etc/hosts\n owner: root\n content: + |\n 127.0.0.1 localhost\n 127.0.0.1 {{$.Values.cluster_name }}-cp-{{ + $i }}\n runcmd:\n {{- if $.Values.vm.qemu_agent_enable }}\n - - systemctl\n + \ - enable\n - '--now'\n - qemu-guest-agent.service\n {{- end + }}\n {{- if not $.Values.vm.airgapped_image }}\n - mkdir -p /var/lib/rancher/rke2-artifacts + && wget https://raw.githubusercontent.com/rancher/rke2/refs/heads/master/install.sh -O /var/lib/rancher/install.sh && chmod +x /var/lib/rancher/install.sh\n + \ {{- end}}\n - INSTALL_RKE2_VERSION={{ $.Values.rke2_version }} /var/lib/rancher/install.sh\n + \ - systemctl enable rke2-server.service\n - useradd -r -c \"etcd user\" + -s /sbin/nologin -M etcd -U\n - systemctl start rke2-server.service\n ssh_authorized_keys: + \n - {{ $.Values.ssh_pub_key }}\n {{- if ne $.Values.control_plane.ipam + \"dhcp\" }}\n {{- if hasKey $.Values.control_plane \"network\" }}\n networkdata: + |\n{{ index $.Values.control_plane.network $i | indent 4 }}\n {{- end}}\n {{- + else}}\n networkdata: \"\"\n {{- end}}\n{{- end}}" + name: rke2/templates/rke2_cp_secret.yaml + - content: |- + {{- range $i := until (.Values.control_plane.node_count | int) }} + --- + apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: {{ $.Values.cluster_name }}-cp-disk-{{ $i }} + namespace: {{ $.Values.cluster_namespace }} + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ $.Values.control_plane.node_disk_gb }}Gi + storageClassName: {{ $.Values.storage.class }} + volumeMode: Block + --- + apiVersion: kubevirt.io/v1 + kind: VirtualMachine + metadata: + namespace: {{ $.Values.cluster_namespace }} + annotations: + # harvesterhci.io/volumeClaimTemplates: | + # [{"metadata":{"name":"{{ $.Values.cluster_name }}-cp-disk-{{ $i }}","annotations":{"harvesterhci.io/imageId":"{{ $.Values.vm.image_namespace }}/{{ $.Values.vm.image }}","helm.app":"rke2"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"{{ $.Values.control_plane.node_disk_gb }}Gi"}},"volumeMode":"Block","storageClassName":"{{ $.Values.storage.class }}"}}] + # network.harvesterhci.io/ips: '[]' + labels: + harvesterhci.io/creator: harvester + harvesterhci.io/os: {{ $.Values.vm.os }} + name: {{ $.Values.cluster_name }}-cp-{{ $i }} + finalizers: + - harvesterhci.io/VMController.UnsetOwnerOfPVCs + spec: + runStrategy: RerunOnFailure + template: + metadata: + annotations: {} + labels: + harvesterhci.io/vmName: {{ $.Values.cluster_name }}-cp-{{ $i }} + spec: + domain: + machine: + type: '' + cpu: + cores: {{ $.Values.control_plane.cpu_count }} + sockets: 1 + threads: 1 + devices: + interfaces: + - bridge: {} + model: virtio + name: default + disks: + - name: disk-0 + disk: + bus: virtio + bootOrder: 1 + - name: cloudinitdisk + disk: + bus: virtio + hostDevices: [] + resources: + limits: + memory: {{ $.Values.control_plane.memory_gb }}Gi + cpu: {{ $.Values.control_plane.cpu_count }} + features: + acpi: + enabled: {{ $.Values.vm.uefi_enabled }} + firmware: + bootloader: + efi: + secureBoot: false + evictionStrategy: LiveMigrate + hostname: {{ $.Values.cluster_name }}-cp-{{ $i }} + networks: + - name: default + multus: + networkName: default/{{ $.Values.network_name }} + volumes: + - name: disk-0 + persistentVolumeClaim: + claimName: {{ $.Values.cluster_name }}-cp-disk-{{ $i }} + - name: cloudinitdisk + cloudInitNoCloud: + secretRef: + name: {{ $.Values.cluster_name }}-cp-{{ $i }}-cloudinit + networkDataSecretRef: + name: {{ $.Values.cluster_name }}-cp-{{ $i }}-cloudinit + affinity: {} + terminationGracePeriodSeconds: 120 + {{- end }} + name: rke2/templates/rke2_cp_vm.yaml + - content: |- + --- + apiVersion: loadbalancer.harvesterhci.io/v1beta1 + kind: IPPool + metadata: + name: {{ $.Values.cluster_name }}-pool + spec: + ranges: + - gateway: {{ .Values.control_plane.loadbalancer_gateway }} + rangeEnd: {{ .Values.control_plane.vip }} + rangeStart: {{ .Values.control_plane.vip }} + subnet: {{ .Values.control_plane.loadbalancer_subnet }} + selector: {} + --- + apiVersion: loadbalancer.harvesterhci.io/v1beta1 + kind: LoadBalancer + metadata: + name: {{ .Values.cluster_name }}-lb + namespace: default + spec: + healthCheck: + failureThreshold: 2 + port: 6443 + successThreshold: 3 + timeoutSeconds: 5 + periodSeconds: 5 + ipam: pool + ipPool: {{ .Values.cluster_name }}-pool + listeners: + - name: k8s-api + port: 6443 + protocol: TCP + backendPort: 6443 + - name: ingress + port: 443 + protocol: TCP + backendPort: 443 + - name: join + port: 9345 + protocol: TCP + backendPort: 9345 + workloadType: vm + backendServerSelector: + harvesterhci.io/vmName: + {{- range $i := until (.Values.control_plane.node_count | int)}} + - {{ $.Values.cluster_name }}-cp-{{ $i }} + {{- end}} + name: rke2/templates/rke2_lb.yaml + - content: "{{- range $i := until (.Values.worker.node_count | int) }}\n---\napiVersion: + v1\nkind: Secret\nmetadata:\n name: {{ $.Values.cluster_name }}-worker-{{ $i + }}-cloudinit\n namespace: {{ $.Values.cluster_namespace }}\nstringData:\n userdata: + |\n #cloud-config\n {{- if $.Values.vm.qemu_agent_install }}\n package_update: + true\n packages:\n - qemu-guest-agent\n {{- end }}\n write_files: + \n {{- if $.Values.worker.files }}\n{{ $.Values.worker.files | toYaml | indent + 4 }}\n {{- end }}\n - path: /etc/rancher/rke2/config.yaml\n owner: + root\n content: |\n token: {{ $.Values.shared_token }}\n {{- + if ne $i 0 }}\n server: https://{{ $.Values.control_plane.vip }}:9345\n + \ {{- end }}\n system-default-registry: {{ $.Values.system_default_registry + }}\n secrets-encryption: true\n write-kubeconfig-mode: 0640\n + \ use-service-account-credentials: true\n {{- if hasKey $.Values \"registry_config\" + }}\n - path: /etc/rancher/rke2/registries.yaml\n owner: root\n content: + |-\n{{ $.Values.registry_config | toYaml | indent 8 }}\n {{- end }}\n - + path: /etc/hosts\n owner: root\n content: |\n 127.0.0.1 localhost\n + \ 127.0.0.1 {{$.Values.cluster_name }}-worker-{{ $i }}\n runcmd:\n + \ {{- if $.Values.vm.qemu_agent_enable }}\n - - systemctl\n - enable\n + \ - '--now'\n - qemu-guest-agent.service\n {{- end }}\n {{- if + not $.Values.vm.airgapped_image }}\n - mkdir -p /var/lib/rancher/rke2-artifacts + && wget https://raw.githubusercontent.com/rancher/rke2/refs/heads/master/install.sh -O /var/lib/rancher/install.sh && chmod +x /var/lib/rancher/install.sh\n + \ {{- end}}\n - INSTALL_RKE2_VERSION={{ $.Values.rke2_version }} INSTALL_RKE2_TYPE=\"agent\" + /var/lib/rancher/install.sh\n - systemctl enable rke2-server.service\n - + systemctl start rke2-server.service\n ssh_authorized_keys: \n - {{ $.Values.ssh_pub_key + }}\n {{- if ne $.Values.worker.ipam \"dhcp\" }}\n {{- if hasKey $.Values.worker + \"network\" }}\n networkdata: |\n{{ index $.Values.worker.network $i | indent + 4 }}\n {{- end}}\n {{- else}}\n networkdata: \"\"\n {{- end}}\n{{- end}}" + name: rke2/templates/rke2_worker_secret.yaml + - content: |- + {{- range $i := until (.Values.worker.node_count | int) }} + --- + apiVersion: kubevirt.io/v1 + kind: VirtualMachine + metadata: + namespace: {{ $.Values.cluster_namespace }} + annotations: + harvesterhci.io/volumeClaimTemplates: | + [{"metadata":{"name":"{{ $.Values.cluster_name }}-worker-disk-{{ $i }}","annotations":{"harvesterhci.io/imageId":"{{ $.Values.vm.image_namespace }}/{{ $.Values.vm.image }}","helm.app":"rke2"}},"spec":{"accessModes":["ReadWriteMany"],"resources":{"requests":{"storage":"{{ $.Values.worker.node_disk_gb }}Gi"}},"volumeMode":"Block","storageClassName":"{{ $.Values.storage.class }}"}}] + network.harvesterhci.io/ips: '[]' + labels: + harvesterhci.io/creator: harvester + harvesterhci.io/os: {{ $.Values.vm.os }} + name: {{ $.Values.cluster_name }}-worker-{{ $i }} + finalizers: + - harvesterhci.io/VMController.UnsetOwnerOfPVCs + spec: + runStrategy: RerunOnFailure + template: + metadata: + annotations: {} + labels: + harvesterhci.io/vmName: {{ $.Values.cluster_name }}-worker-{{ $i }} + spec: + domain: + machine: + type: '' + cpu: + cores: {{ $.Values.worker.cpu_count }} + sockets: 1 + threads: 1 + devices: + interfaces: + - bridge: {} + model: virtio + name: default + disks: + - name: disk-0 + disk: + bus: virtio + bootOrder: 1 + - name: cloudinitdisk + disk: + bus: virtio + hostDevices: [] + resources: + limits: + memory: {{ $.Values.worker.memory_gb }}Gi + cpu: {{ $.Values.worker.cpu_count }} + features: + acpi: + enabled: {{ $.Values.vm.uefi_enabled }} + firmware: + bootloader: + efi: + secureBoot: false + evictionStrategy: LiveMigrate + hostname: {{ $.Values.cluster_name }}-worker-{{ $i }} + networks: + - name: default + multus: + networkName: default/{{ $.Values.network_name }} + volumes: + - name: disk-0 + persistentVolumeClaim: + claimName: {{ $.Values.cluster_name }}-worker-disk-{{ $i }} + - name: cloudinitdisk + cloudInitNoCloud: + secretRef: + name: {{ $.Values.cluster_name }}-worker-{{ $i }}-cloudinit + networkData: "" + affinity: {} + terminationGracePeriodSeconds: 120 + {{- end }} + name: rke2/templates/rke2_worker_vm.yaml + - content: "cluster_name: mycluster\ncluster_namespace: default\n\nshared_token: + insecuretoken\nsystem_default_registry: \"\" #! empty value: use embedded + default\n #! non-empty value: use as regsitry + to source rke2 runtime image from\n #! if your + VM image contains the tarballs for RKE2, it will use those first\nrke2_version: + v1.26.10+rke2r2\n\nssh_pub_key: \"\" #! the public ssh key + to inject onto each node, required if you want to fetch a kubeconfig\n\n# registry_config:\n# + \ configs:\n# \"rgcrprod.azurecr.us\":\n# auth:\n# username: + test\n# password: test\n\nvm:\n image_namespace: default #! + namespace in your harvester cluster containing the vm base image\n image: ubuntu + \ #! name of base vm image to use for your RKE2 nodes\n os: + linux\n distro: ubuntu #! flag used for specific cloud-init + code tied to Ubuntu vs others (netplan)\n uefi_enabled: true\n qemu_agent_install: + true #! flag for installation of the qemu-agent service (Requires internet)\n + \ qemu_agent_enable: true #! flag for enabling the qemu-agent\n airgapped_image: + false #! flag to alert helm that your VM image already has the RKE2 + install script (and does not need to download it)\n\n\nnetwork_name: host\n\ncontrol_plane:\n + \ node_count: 1\n cpu_count: 4\n memory_gb: 8\n node_disk_gb: 40\n loadbalancer_gateway: + 10.10.0.1\n loadbalancer_subnet: 10.10.0.0/24\n files: []\n # files:\n # + - path: /tmp/test\n # owner: root\n # content: |\n # created a file\n\n + \ vip: #! this is the VIP for the Harvester LoadBalancer + object, ensure it is a routable IP\n ipam: dhcp #! this + can be dhcp or static, static requires an equal amount of cloud-init network-data + entries\n\n # network:\n # - | #! ubuntu example\n # network:\n # version: + 2\n # renderer: networkd\n # ethernets:\n # enp1s0:\n # dhcp4: + no\n # addresses: [ \"10.10.0.6/24\" ]\n # gateway4: 10.10.0.1\n + \ # nameservers:\n # addresses: \n # - 10.10.0.1\n\nworker:\n + \ node_count: 1\n cpu_count: 4\n memory_gb: 8\n node_disk_gb: 40\n files: + []\n # files:\n # - path: /tmp/test\n # owner: root\n # content: |\n + \ # created a file\n\n ipam: dhcp #! this can be dhcp + or static, static requires an equal amount of cloud-init network-data entries\n\n + \ # network:\n # - |\n # network:\n # version: 2\n # renderer: + networkd\n # ethernets:\n # enp1s0:\n # dhcp4: no\n # + \ addresses: [ \"10.10.0.20/24\" ]\n # gateway4: 10.10.0.1\n + \ # nameservers:\n # addresses: \n # - 10.10.0.1\n" + name: rke2/values.yaml + - content: "cluster_name: rke2-mgmt\nsystem_default_registry: \"\"\n\nrke2_version: + v1.29.6+rke2r1\n\nvm:\n image: ubuntu\n qemu_agent_install: true \n + \ qemu_agent_enable: true \n airgapped_image: false \nnetwork_name: + lab-workload\nssh_pub_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDK3vpDMfNKbXTkpOwC77k5xvEpUAcNyJa6pYT17YMbzvHuugSJRiouLElDtpqktto6plkDdrTRXnkDA3aVxUycCl/4IrnCEehlg8LGgFxKASNeXQYL0URIWRDicyJaolg5bFdcu3gYTA0JBtApiebrml6bj9cJGnS8lqRK9wmWIFv5lPICcrZMsw1AIRhghGI5BupUnttD+muHspAiGfjTbiiCvKo3fLmEMQ9pt/46wQuPbzOCVChpJByVG9AKO9IpdkOGgKeuy2y98ZxJIHBAx4B49jDfA8NNfyEBIdgiIvlv6QXgjFbazI5buLYM/RK36kf9JjYNBySZJuA3VMbHnWmWvZYBQUA6ypVMc4Wzvd3hhFmNQn1W+NEHl6v+bCDeo5QIv5dkpIoDgJd8CvWQ42bb2bi7zyO32v2zfaW03eDCeopFAKditMPhjqai0S2W4LRt7dRKEOCvUqPFYqZ99nBk1mmTWG8Gpp7VA/+shn171Yc/wDCwBcEyciqOYNtnW55O3eCiBHsnBcEFKy80zHJ1jckDSluypwBrsooYV5WKS8O+jqGyYfdruJ8oUCPw72b0JHs5AmFCRuhzOU6cZP6Ynghs1SkdVtq722uFjmDUR0X8+hoIZDEWutw6+91YhwnodA3MmGHtInlY+URqdz6TltOMP2X2vSMohnh2zQ==\n\nregistry_config:\n + \ configs:\n \"rgcrprod.azurecr.us\":\n auth:\n username: test\n + \ password: test\n\ncontrol_plane:\n vip: 10.2.0.20 \n loadbalancer_gateway: + 10.2.0.1\n loadbalancer_subnet: 10.2.0.0/24\n \n node_count: 3 \n cpu_count: + 8\n memory_gb: 16\n\n ipam: static\n network:\n - |\n network:\n version: + 2\n renderer: networkd\n ethernets:\n enp1s0:\n dhcp4: + no\n addresses: [ \"10.2.0.21/24\" ]\n gateway4: 10.2.0.1\n + \ nameservers:\n addresses: \n - 10.2.0.1\n - + |\n network:\n version: 2\n renderer: networkd\n ethernets:\n + \ enp1s0:\n dhcp4: no\n addresses: [ \"10.2.0.22/24\" + ]\n gateway4: 10.2.0.1\n nameservers:\n addresses: + \n - 10.2.0.1\n - |\n network:\n version: 2\n renderer: + networkd\n ethernets:\n enp1s0:\n dhcp4: no\n addresses: + [ \"10.2.0.23/24\" ]\n gateway4: 10.2.0.1\n nameservers:\n + \ addresses: \n - 10.2.0.1\n files:\n - path: /var/lib/rancher/rke2/server/manifests/certmanager.yaml\n + \ owner: root\n content: |\n apiVersion: helm.cattle.io/v1\n kind: + HelmChart\n metadata:\n name: cert-manager\n namespace: default + \ \n spec:\n bootstrap: true\n targetNamespace: cert-manager\n + \ createNamespace: true\n valuesContent: |-\n securityContext:\n + \ runAsNonRoot: true\n crds:\n enabled: true\n + \ version: v1.16.1\n repo: https://charts.jetstack.io\n chart: + cert-manager\n - path: /var/lib/rancher/rke2/server/manifests/rancher.yaml\n + \ owner: root\n content: |\n apiVersion: helm.cattle.io/v1\n kind: + HelmChart\n metadata:\n name: rancher\n namespace: default + \ \n spec:\n bootstrap: false\n targetNamespace: cattle-system\n + \ createNamespace: true\n set:\n hostname: rancher.lab.sienarfleet.systems\n + \ replicas: 3\n bootstrapPassword: admin\n valuesContent: + |-\n global:\n cattle:\n psp:\n enabled: + false\n ingress:\n tls:\n source: rancher\n + \ repo: https://releases.rancher.com/server-charts/stable\n chart: + rancher\n version: v2.10.1\nworker:\n node_count: 0" + name: values.yaml + targetRestrictions: + - clusterName: local + targets: + - clusterName: local + ignore: {} diff --git a/deploy/rancher/fleet/mgmt-static.yaml b/deploy/rancher/fleet/mgmt-static.yaml new file mode 100644 index 0000000..8710084 --- /dev/null +++ b/deploy/rancher/fleet/mgmt-static.yaml @@ -0,0 +1,493 @@ +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: mgmt-cluster + namespace: fleet-local +spec: + helm: + chart: ./rke2 + # releaseName: rke2-mgmt + values: + cluster_name: rke2-mgmt + control_plane: + cpu_count: 8 + files: + - content: | + apiVersion: helm.cattle.io/v1 + kind: HelmChart + metadata: + name: cert-manager + namespace: default + spec: + bootstrap: true + targetNamespace: cert-manager + createNamespace: true + valuesContent: |- + securityContext: + runAsNonRoot: true + crds: + enabled: true + version: v1.16.1 + repo: https://charts.jetstack.io + chart: cert-manager + owner: root + path: /var/lib/rancher/rke2/server/manifests/certmanager.yaml + - content: | + apiVersion: helm.cattle.io/v1 + kind: HelmChart + metadata: + name: rancher + namespace: default + spec: + bootstrap: false + targetNamespace: cattle-system + createNamespace: true + set: + hostname: rancher-mgmt.product.lan + replicas: 3 + bootstrapPassword: admin + valuesContent: |- + global: + cattle: + psp: + enabled: false + ingress: + tls: + source: rancher + repo: https://releases.rancher.com/server-charts/stable + chart: rancher + version: v2.12.3 + owner: root + path: /var/lib/rancher/rke2/server/manifests/rancher.yaml + ipam: static + loadbalancer_gateway: 172.27.27.1 + loadbalancer_subnet: 172.27.27.0/24 + memory_gb: 16 + network: + - | + network: + version: 2 + renderer: networkd + ethernets: + enp1s0: + dhcp4: no + addresses: [ "172.22.19.41/24" ] + gateway4: 172.22.19.1 + nameservers: + addresses: + - 172.22.19.15 + - 172.22.19.16 + - | + network: + version: 2 + renderer: networkd + ethernets: + enp1s0: + dhcp4: no + addresses: [ "172.22.19.42/24" ] + gateway4: 172.22.19.1 + nameservers: + addresses: + - 172.22.19.15 + - 172.22.19.16 + - | + network: + version: 2 + renderer: networkd + ethernets: + enp1s0: + dhcp4: no + addresses: [ "172.22.19.43/24" ] + gateway4: 172.22.19.1 + nameservers: + addresses: + - 172.22.19.15 + - 172.22.19.16 + node_count: 3 + vip: 172.27.27.40 + network_name: k8s-network + registry_config: + configs: + rgcrprod.azurecr.us: + auth: + password: test + username: test + rke2_version: v1.33.4+rke2r1 + ssh_pub_key: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPyW9YbYPE3efCdHMBgnP8AeVfs5Lw8MBCLhXuteliil + system_default_registry: "" + vm: + airgapped_image: false + image: ubuntu-22.04 + qemu_agent_enable: true + qemu_agent_install: true + worker: + node_count: 0 + storage: + class: longhorn-image-99dd5 # StorageClass for image ubuntu-22.04 + valuesFiles: + - values.yaml + + resources: + - content: |2- + helm: + chart: ./rke2 + releaseName: rke2-mgmt + valuesFiles: + - values.yaml + name: fleet.yaml + - content: |- + apiVersion: v2 + name: rke2-cluster + description: RKE2 cluster designed for usage directly on Harvester + + type: application + version: 0.1.1 + appVersion: 0.1.1 + name: rke2/Chart.yaml + - content: "{{- range $i := until (.Values.control_plane.node_count | int) }}\n---\napiVersion: + v1\nkind: Secret\nmetadata:\n name: {{ $.Values.cluster_name }}-cp-{{ $i }}-cloudinit\n + \ namespace: {{ $.Values.cluster_namespace }}\nstringData:\n userdata: |\n + \ #cloud-config\n {{- if $.Values.vm.qemu_agent_install }}\n package_update: + true\n packages:\n - qemu-guest-agent\n {{- end }}\n write_files: + \n {{- if $.Values.control_plane.files }}\n{{ $.Values.control_plane.files + | toYaml | indent 4 }}\n {{- end }}\n - path: /etc/rancher/rke2/config.yaml\n + \ owner: root\n content: |\n token: {{ $.Values.shared_token + }}\n {{- if ne $i 0 }}\n server: https://{{ $.Values.control_plane.vip + }}:9345\n {{- end }}\n system-default-registry: {{ $.Values.system_default_registry + }}\n tls-san:\n - {{ $.Values.cluster_name }}-cp-{{ $i }}\n + \ - {{ $.Values.control_plane.vip }}\n secrets-encryption: true\n + \ write-kubeconfig-mode: 0640\n use-service-account-credentials: + true\n {{- if hasKey $.Values \"registry_config\" }}\n - path: /etc/rancher/rke2/registries.yaml\n + \ owner: root\n content: |-\n{{ $.Values.registry_config | toYaml | + indent 8 }}\n {{- end }}\n - path: /etc/hosts\n owner: root\n content: + |\n 127.0.0.1 localhost\n 127.0.0.1 {{$.Values.cluster_name }}-cp-{{ + $i }}\n runcmd:\n {{- if $.Values.vm.qemu_agent_enable }}\n - - systemctl\n + \ - enable\n - '--now'\n - qemu-guest-agent.service\n {{- end + }}\n {{- if not $.Values.vm.airgapped_image }}\n - mkdir -p /var/lib/rancher/rke2-artifacts + && wget https://raw.githubusercontent.com/rancher/rke2/refs/heads/master/install.sh -O /var/lib/rancher/install.sh && chmod +x /var/lib/rancher/install.sh\n + \ {{- end}}\n - INSTALL_RKE2_VERSION={{ $.Values.rke2_version }} /var/lib/rancher/install.sh\n + \ - systemctl enable rke2-server.service\n - useradd -r -c \"etcd user\" + -s /sbin/nologin -M etcd -U\n - systemctl start rke2-server.service\n ssh_authorized_keys: + \n - {{ $.Values.ssh_pub_key }}\n {{- if ne $.Values.control_plane.ipam + \"dhcp\" }}\n {{- if hasKey $.Values.control_plane \"network\" }}\n networkdata: + |\n{{ index $.Values.control_plane.network $i | indent 4 }}\n {{- end}}\n {{- + else}}\n networkdata: \"\"\n {{- end}}\n{{- end}}" + name: rke2/templates/rke2_cp_secret.yaml + - content: |- + {{- range $i := until (.Values.control_plane.node_count | int) }} + --- + apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: {{ $.Values.cluster_name }}-cp-disk-{{ $i }} + namespace: {{ $.Values.cluster_namespace }} + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ $.Values.control_plane.node_disk_gb }}Gi + storageClassName: {{ $.Values.storage.class }} + volumeMode: Block + --- + apiVersion: kubevirt.io/v1 + kind: VirtualMachine + metadata: + namespace: {{ $.Values.cluster_namespace }} + annotations: + # harvesterhci.io/volumeClaimTemplates: | + # [{"metadata":{"name":"{{ $.Values.cluster_name }}-cp-disk-{{ $i }}","annotations":{"harvesterhci.io/imageId":"{{ $.Values.vm.image_namespace }}/{{ $.Values.vm.image }}","helm.app":"rke2"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"{{ $.Values.control_plane.node_disk_gb }}Gi"}},"volumeMode":"Block","storageClassName":"{{ $.Values.storage.class }}"}}] + # network.harvesterhci.io/ips: '[]' + labels: + harvesterhci.io/creator: harvester + harvesterhci.io/os: {{ $.Values.vm.os }} + name: {{ $.Values.cluster_name }}-cp-{{ $i }} + finalizers: + - harvesterhci.io/VMController.UnsetOwnerOfPVCs + spec: + runStrategy: RerunOnFailure + template: + metadata: + annotations: {} + labels: + harvesterhci.io/vmName: {{ $.Values.cluster_name }}-cp-{{ $i }} + spec: + domain: + machine: + type: '' + cpu: + cores: {{ $.Values.control_plane.cpu_count }} + sockets: 1 + threads: 1 + devices: + interfaces: + - bridge: {} + model: virtio + name: default + disks: + - name: disk-0 + disk: + bus: virtio + bootOrder: 1 + - name: cloudinitdisk + disk: + bus: virtio + hostDevices: [] + resources: + limits: + memory: {{ $.Values.control_plane.memory_gb }}Gi + cpu: {{ $.Values.control_plane.cpu_count }} + features: + acpi: + enabled: {{ $.Values.vm.uefi_enabled }} + firmware: + bootloader: + efi: + secureBoot: false + evictionStrategy: LiveMigrate + hostname: {{ $.Values.cluster_name }}-cp-{{ $i }} + networks: + - name: default + multus: + networkName: default/{{ $.Values.network_name }} + volumes: + - name: disk-0 + persistentVolumeClaim: + claimName: {{ $.Values.cluster_name }}-cp-disk-{{ $i }} + - name: cloudinitdisk + cloudInitNoCloud: + secretRef: + name: {{ $.Values.cluster_name }}-cp-{{ $i }}-cloudinit + networkDataSecretRef: + name: {{ $.Values.cluster_name }}-cp-{{ $i }}-cloudinit + affinity: {} + terminationGracePeriodSeconds: 120 + {{- end }} + name: rke2/templates/rke2_cp_vm.yaml + - content: |- + --- + apiVersion: loadbalancer.harvesterhci.io/v1beta1 + kind: IPPool + metadata: + name: {{ $.Values.cluster_name }}-pool + spec: + ranges: + - gateway: {{ .Values.control_plane.loadbalancer_gateway }} + rangeEnd: {{ .Values.control_plane.vip }} + rangeStart: {{ .Values.control_plane.vip }} + subnet: {{ .Values.control_plane.loadbalancer_subnet }} + selector: {} + --- + apiVersion: loadbalancer.harvesterhci.io/v1beta1 + kind: LoadBalancer + metadata: + name: {{ .Values.cluster_name }}-lb + namespace: default + spec: + healthCheck: + failureThreshold: 2 + port: 6443 + successThreshold: 3 + timeoutSeconds: 5 + periodSeconds: 5 + ipam: pool + ipPool: {{ .Values.cluster_name }}-pool + listeners: + - name: k8s-api + port: 6443 + protocol: TCP + backendPort: 6443 + - name: ingress + port: 443 + protocol: TCP + backendPort: 443 + - name: join + port: 9345 + protocol: TCP + backendPort: 9345 + workloadType: vm + backendServerSelector: + harvesterhci.io/vmName: + {{- range $i := until (.Values.control_plane.node_count | int)}} + - {{ $.Values.cluster_name }}-cp-{{ $i }} + {{- end}} + name: rke2/templates/rke2_lb.yaml + - content: "{{- range $i := until (.Values.worker.node_count | int) }}\n---\napiVersion: + v1\nkind: Secret\nmetadata:\n name: {{ $.Values.cluster_name }}-worker-{{ $i + }}-cloudinit\n namespace: {{ $.Values.cluster_namespace }}\nstringData:\n userdata: + |\n #cloud-config\n {{- if $.Values.vm.qemu_agent_install }}\n package_update: + true\n packages:\n - qemu-guest-agent\n {{- end }}\n write_files: + \n {{- if $.Values.worker.files }}\n{{ $.Values.worker.files | toYaml | indent + 4 }}\n {{- end }}\n - path: /etc/rancher/rke2/config.yaml\n owner: + root\n content: |\n token: {{ $.Values.shared_token }}\n {{- + if ne $i 0 }}\n server: https://{{ $.Values.control_plane.vip }}:9345\n + \ {{- end }}\n system-default-registry: {{ $.Values.system_default_registry + }}\n secrets-encryption: true\n write-kubeconfig-mode: 0640\n + \ use-service-account-credentials: true\n {{- if hasKey $.Values \"registry_config\" + }}\n - path: /etc/rancher/rke2/registries.yaml\n owner: root\n content: + |-\n{{ $.Values.registry_config | toYaml | indent 8 }}\n {{- end }}\n - + path: /etc/hosts\n owner: root\n content: |\n 127.0.0.1 localhost\n + \ 127.0.0.1 {{$.Values.cluster_name }}-worker-{{ $i }}\n runcmd:\n + \ {{- if $.Values.vm.qemu_agent_enable }}\n - - systemctl\n - enable\n + \ - '--now'\n - qemu-guest-agent.service\n {{- end }}\n {{- if + not $.Values.vm.airgapped_image }}\n - mkdir -p /var/lib/rancher/rke2-artifacts + && wget https://raw.githubusercontent.com/rancher/rke2/refs/heads/master/install.sh -O /var/lib/rancher/install.sh && chmod +x /var/lib/rancher/install.sh\n + \ {{- end}}\n - INSTALL_RKE2_VERSION={{ $.Values.rke2_version }} INSTALL_RKE2_TYPE=\"agent\" + /var/lib/rancher/install.sh\n - systemctl enable rke2-server.service\n - + systemctl start rke2-server.service\n ssh_authorized_keys: \n - {{ $.Values.ssh_pub_key + }}\n {{- if ne $.Values.worker.ipam \"dhcp\" }}\n {{- if hasKey $.Values.worker + \"network\" }}\n networkdata: |\n{{ index $.Values.worker.network $i | indent + 4 }}\n {{- end}}\n {{- else}}\n networkdata: \"\"\n {{- end}}\n{{- end}}" + name: rke2/templates/rke2_worker_secret.yaml + - content: |- + {{- range $i := until (.Values.worker.node_count | int) }} + --- + apiVersion: kubevirt.io/v1 + kind: VirtualMachine + metadata: + namespace: {{ $.Values.cluster_namespace }} + annotations: + harvesterhci.io/volumeClaimTemplates: | + [{"metadata":{"name":"{{ $.Values.cluster_name }}-worker-disk-{{ $i }}","annotations":{"harvesterhci.io/imageId":"{{ $.Values.vm.image_namespace }}/{{ $.Values.vm.image }}","helm.app":"rke2"}},"spec":{"accessModes":["ReadWriteMany"],"resources":{"requests":{"storage":"{{ $.Values.worker.node_disk_gb }}Gi"}},"volumeMode":"Block","storageClassName":"{{ $.Values.storage.class }}"}}] + network.harvesterhci.io/ips: '[]' + labels: + harvesterhci.io/creator: harvester + harvesterhci.io/os: {{ $.Values.vm.os }} + name: {{ $.Values.cluster_name }}-worker-{{ $i }} + finalizers: + - harvesterhci.io/VMController.UnsetOwnerOfPVCs + spec: + runStrategy: RerunOnFailure + template: + metadata: + annotations: {} + labels: + harvesterhci.io/vmName: {{ $.Values.cluster_name }}-worker-{{ $i }} + spec: + domain: + machine: + type: '' + cpu: + cores: {{ $.Values.worker.cpu_count }} + sockets: 1 + threads: 1 + devices: + interfaces: + - bridge: {} + model: virtio + name: default + disks: + - name: disk-0 + disk: + bus: virtio + bootOrder: 1 + - name: cloudinitdisk + disk: + bus: virtio + hostDevices: [] + resources: + limits: + memory: {{ $.Values.worker.memory_gb }}Gi + cpu: {{ $.Values.worker.cpu_count }} + features: + acpi: + enabled: {{ $.Values.vm.uefi_enabled }} + firmware: + bootloader: + efi: + secureBoot: false + evictionStrategy: LiveMigrate + hostname: {{ $.Values.cluster_name }}-worker-{{ $i }} + networks: + - name: default + multus: + networkName: default/{{ $.Values.network_name }} + volumes: + - name: disk-0 + persistentVolumeClaim: + claimName: {{ $.Values.cluster_name }}-worker-disk-{{ $i }} + - name: cloudinitdisk + cloudInitNoCloud: + secretRef: + name: {{ $.Values.cluster_name }}-worker-{{ $i }}-cloudinit + networkData: "" + affinity: {} + terminationGracePeriodSeconds: 120 + {{- end }} + name: rke2/templates/rke2_worker_vm.yaml + - content: "cluster_name: mycluster\ncluster_namespace: default\n\nshared_token: + insecuretoken\nsystem_default_registry: \"\" #! empty value: use embedded + default\n #! non-empty value: use as regsitry + to source rke2 runtime image from\n #! if your + VM image contains the tarballs for RKE2, it will use those first\nrke2_version: + v1.26.10+rke2r2\n\nssh_pub_key: \"\" #! the public ssh key + to inject onto each node, required if you want to fetch a kubeconfig\n\n# registry_config:\n# + \ configs:\n# \"rgcrprod.azurecr.us\":\n# auth:\n# username: + test\n# password: test\n\nvm:\n image_namespace: default #! + namespace in your harvester cluster containing the vm base image\n image: ubuntu + \ #! name of base vm image to use for your RKE2 nodes\n os: + linux\n distro: ubuntu #! flag used for specific cloud-init + code tied to Ubuntu vs others (netplan)\n uefi_enabled: true\n qemu_agent_install: + true #! flag for installation of the qemu-agent service (Requires internet)\n + \ qemu_agent_enable: true #! flag for enabling the qemu-agent\n airgapped_image: + false #! flag to alert helm that your VM image already has the RKE2 + install script (and does not need to download it)\n\n\nnetwork_name: host\n\ncontrol_plane:\n + \ node_count: 1\n cpu_count: 4\n memory_gb: 8\n node_disk_gb: 40\n loadbalancer_gateway: + 10.10.0.1\n loadbalancer_subnet: 10.10.0.0/24\n files: []\n # files:\n # + - path: /tmp/test\n # owner: root\n # content: |\n # created a file\n\n + \ vip: #! this is the VIP for the Harvester LoadBalancer + object, ensure it is a routable IP\n ipam: dhcp #! this + can be dhcp or static, static requires an equal amount of cloud-init network-data + entries\n\n # network:\n # - | #! ubuntu example\n # network:\n # version: + 2\n # renderer: networkd\n # ethernets:\n # enp1s0:\n # dhcp4: + no\n # addresses: [ \"10.10.0.6/24\" ]\n # gateway4: 10.10.0.1\n + \ # nameservers:\n # addresses: \n # - 10.10.0.1\n\nworker:\n + \ node_count: 1\n cpu_count: 4\n memory_gb: 8\n node_disk_gb: 40\n files: + []\n # files:\n # - path: /tmp/test\n # owner: root\n # content: |\n + \ # created a file\n\n ipam: dhcp #! this can be dhcp + or static, static requires an equal amount of cloud-init network-data entries\n\n + \ # network:\n # - |\n # network:\n # version: 2\n # renderer: + networkd\n # ethernets:\n # enp1s0:\n # dhcp4: no\n # + \ addresses: [ \"10.10.0.20/24\" ]\n # gateway4: 10.10.0.1\n + \ # nameservers:\n # addresses: \n # - 10.10.0.1\n" + name: rke2/values.yaml + - content: "cluster_name: rke2-mgmt\nsystem_default_registry: \"\"\n\nrke2_version: + v1.29.6+rke2r1\n\nvm:\n image: ubuntu\n qemu_agent_install: true \n + \ qemu_agent_enable: true \n airgapped_image: false \nnetwork_name: + lab-workload\nssh_pub_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDK3vpDMfNKbXTkpOwC77k5xvEpUAcNyJa6pYT17YMbzvHuugSJRiouLElDtpqktto6plkDdrTRXnkDA3aVxUycCl/4IrnCEehlg8LGgFxKASNeXQYL0URIWRDicyJaolg5bFdcu3gYTA0JBtApiebrml6bj9cJGnS8lqRK9wmWIFv5lPICcrZMsw1AIRhghGI5BupUnttD+muHspAiGfjTbiiCvKo3fLmEMQ9pt/46wQuPbzOCVChpJByVG9AKO9IpdkOGgKeuy2y98ZxJIHBAx4B49jDfA8NNfyEBIdgiIvlv6QXgjFbazI5buLYM/RK36kf9JjYNBySZJuA3VMbHnWmWvZYBQUA6ypVMc4Wzvd3hhFmNQn1W+NEHl6v+bCDeo5QIv5dkpIoDgJd8CvWQ42bb2bi7zyO32v2zfaW03eDCeopFAKditMPhjqai0S2W4LRt7dRKEOCvUqPFYqZ99nBk1mmTWG8Gpp7VA/+shn171Yc/wDCwBcEyciqOYNtnW55O3eCiBHsnBcEFKy80zHJ1jckDSluypwBrsooYV5WKS8O+jqGyYfdruJ8oUCPw72b0JHs5AmFCRuhzOU6cZP6Ynghs1SkdVtq722uFjmDUR0X8+hoIZDEWutw6+91YhwnodA3MmGHtInlY+URqdz6TltOMP2X2vSMohnh2zQ==\n\nregistry_config:\n + \ configs:\n \"rgcrprod.azurecr.us\":\n auth:\n username: test\n + \ password: test\n\ncontrol_plane:\n vip: 10.2.0.20 \n loadbalancer_gateway: + 10.2.0.1\n loadbalancer_subnet: 10.2.0.0/24\n \n node_count: 3 \n cpu_count: + 8\n memory_gb: 16\n\n ipam: static\n network:\n - |\n network:\n version: + 2\n renderer: networkd\n ethernets:\n enp1s0:\n dhcp4: + no\n addresses: [ \"10.2.0.21/24\" ]\n gateway4: 10.2.0.1\n + \ nameservers:\n addresses: \n - 10.2.0.1\n - + |\n network:\n version: 2\n renderer: networkd\n ethernets:\n + \ enp1s0:\n dhcp4: no\n addresses: [ \"10.2.0.22/24\" + ]\n gateway4: 10.2.0.1\n nameservers:\n addresses: + \n - 10.2.0.1\n - |\n network:\n version: 2\n renderer: + networkd\n ethernets:\n enp1s0:\n dhcp4: no\n addresses: + [ \"10.2.0.23/24\" ]\n gateway4: 10.2.0.1\n nameservers:\n + \ addresses: \n - 10.2.0.1\n files:\n - path: /var/lib/rancher/rke2/server/manifests/certmanager.yaml\n + \ owner: root\n content: |\n apiVersion: helm.cattle.io/v1\n kind: + HelmChart\n metadata:\n name: cert-manager\n namespace: default + \ \n spec:\n bootstrap: true\n targetNamespace: cert-manager\n + \ createNamespace: true\n valuesContent: |-\n securityContext:\n + \ runAsNonRoot: true\n crds:\n enabled: true\n + \ version: v1.16.1\n repo: https://charts.jetstack.io\n chart: + cert-manager\n - path: /var/lib/rancher/rke2/server/manifests/rancher.yaml\n + \ owner: root\n content: |\n apiVersion: helm.cattle.io/v1\n kind: + HelmChart\n metadata:\n name: rancher\n namespace: default + \ \n spec:\n bootstrap: false\n targetNamespace: cattle-system\n + \ createNamespace: true\n set:\n hostname: rancher.lab.sienarfleet.systems\n + \ replicas: 3\n bootstrapPassword: admin\n valuesContent: + |-\n global:\n cattle:\n psp:\n enabled: + false\n ingress:\n tls:\n source: rancher\n + \ repo: https://releases.rancher.com/server-charts/stable\n chart: + rancher\n version: v2.10.1\nworker:\n node_count: 0" + name: values.yaml + targetRestrictions: + - clusterName: local + targets: + - clusterName: local + ignore: {} diff --git a/deploy/rancher/helm/rancher_values_dhcp.yaml b/deploy/rancher/helm/rancher_values_dhcp.yaml new file mode 100644 index 0000000..3eda166 --- /dev/null +++ b/deploy/rancher/helm/rancher_values_dhcp.yaml @@ -0,0 +1,101 @@ +cluster_name: rke2-rancher +cluster_namespace: vanderlande +control_plane: + cpu_count: 4 + files: + - content: | + apiVersion: helm.cattle.io/v1 + kind: HelmChart + metadata: + name: cert-manager + #namespace: default + spec: + bootstrap: true + targetNamespace: cert-manager + createNamespace: true + valuesContent: |- + securityContext: + runAsNonRoot: true + crds: + enabled: true + version: v1.16.1 + repo: https://charts.jetstack.io + chart: cert-manager + owner: root + path: /var/lib/rancher/rke2/server/manifests/certmanager.yaml + - content: | + apiVersion: helm.cattle.io/v1 + kind: HelmChart + metadata: + name: rancher + #namespace: default + spec: + bootstrap: false + targetNamespace: cattle-system + createNamespace: true + set: + hostname: rancher-mgmt.product.lan + replicas: 3 + bootstrapPassword: admin + valuesContent: |- + global: + cattle: + psp: + enabled: false + ingress: + tls: + source: rancher + repo: https://releases.rancher.com/server-charts/stable + chart: rancher + version: v2.12.3 + owner: root + path: /var/lib/rancher/rke2/server/manifests/rancher.yaml + ipam: dhcp + loadbalancer_gateway: 172.27.27.1 + loadbalancer_subnet: 172.27.27.0/24 + memory_gb: 12 + network: + - | + network: + version: 2 + renderer: networkd + ethernets: + enp1s0: + dhcp4: yes + + - | + network: + version: 2 + renderer: networkd + ethernets: + enp1s0: + dhcp4: yes + + - | + network: + version: 2 + renderer: networkd + ethernets: + enp1s0: + dhcp4: yes + node_count: 3 + vip: 172.27.27.40 +network_name: vm-lan +registry_config: + configs: + rgcrprod.azurecr.us: + auth: + password: test + username: test +rke2_version: v1.33.4+rke2r1 +ssh_pub_key: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPyW9YbYPE3efCdHMBgnP8AeVfs5Lw8MBCLhXuteliil +system_default_registry: "" +vm: + airgapped_image: false + image: noble-server-cloudimg-amd64.img + qemu_agent_enable: true + qemu_agent_install: true +worker: + node_count: 0 +storage: + class: longhorn-image-t4n82 # StorageClass for image noble-server-cloudimg-amd64.img \ No newline at end of file diff --git a/deploy/rancher/helm/rancher_values_static.yaml b/deploy/rancher/helm/rancher_values_static.yaml new file mode 100644 index 0000000..874cbc8 --- /dev/null +++ b/deploy/rancher/helm/rancher_values_static.yaml @@ -0,0 +1,117 @@ +cluster_name: rke2-mgmt +cluster_namespace: vanderlande +control_plane: + cpu_count: 4 + files: + - content: | + apiVersion: helm.cattle.io/v1 + kind: HelmChart + metadata: + name: cert-manager + #namespace: default + spec: + bootstrap: true + targetNamespace: cert-manager + createNamespace: true + valuesContent: |- + securityContext: + runAsNonRoot: true + crds: + enabled: true + version: v1.16.1 + repo: https://charts.jetstack.io + chart: cert-manager + owner: root + path: /var/lib/rancher/rke2/server/manifests/certmanager.yaml + - content: | + apiVersion: helm.cattle.io/v1 + kind: HelmChart + metadata: + name: rancher + #namespace: default + spec: + bootstrap: false + targetNamespace: cattle-system + createNamespace: true + set: + hostname: rancher-mgmt.product.lan + replicas: 3 + bootstrapPassword: admin + valuesContent: |- + global: + cattle: + psp: + enabled: false + ingress: + tls: + source: rancher + repo: https://releases.rancher.com/server-charts/stable + chart: rancher + version: v2.12.3 + owner: root + path: /var/lib/rancher/rke2/server/manifests/rancher.yaml + ipam: static + loadbalancer_gateway: 172.27.27.1 + loadbalancer_subnet: 172.27.27.0/24 + memory_gb: 12 + network: + - | + network: + version: 2 + renderer: networkd + ethernets: + enp1s0: + dhcp4: no + addresses: [ "172.22.19.41/24" ] + gateway4: 172.22.19.1 + nameservers: + addresses: + - 172.22.19.15 + - 172.22.19.16 + - | + network: + version: 2 + renderer: networkd + ethernets: + enp1s0: + dhcp4: no + addresses: [ "172.22.19.42/24" ] + gateway4: 172.22.19.1 + nameservers: + addresses: + - 172.22.19.15 + - 172.22.19.16 + - | + network: + version: 2 + renderer: networkd + ethernets: + enp1s0: + dhcp4: no + addresses: [ "172.22.19.43/24" ] + gateway4: 172.22.19.1 + nameservers: + addresses: + - 172.22.19.15 + - 172.22.19.16 + node_count: 3 + vip: 172.27.27.40 +network_name: vm-lan +registry_config: + configs: + rgcrprod.azurecr.us: + auth: + password: test + username: test +rke2_version: v1.33.4+rke2r1 +ssh_pub_key: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPyW9YbYPE3efCdHMBgnP8AeVfs5Lw8MBCLhXuteliil +system_default_registry: "" +vm: + airgapped_image: false + image: noble-server-cloudimg-amd64.img + qemu_agent_enable: true + qemu_agent_install: true +worker: + node_count: 0 +storage: + class: longhorn-image-t4n82 # StorageClass for image noble-server-cloudimg-amd64.img \ No newline at end of file diff --git a/deploy/rancher/helm/rke2/Chart.yaml b/deploy/rancher/helm/rke2/Chart.yaml new file mode 100644 index 0000000..3d29afd --- /dev/null +++ b/deploy/rancher/helm/rke2/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v2 +name: rke2-cluster +description: RKE2 cluster designed for usage directly on Harvester + +type: application +version: 0.1.1 +appVersion: 0.1.1 \ No newline at end of file diff --git a/deploy/rancher/helm/rke2/templates/rke2_cp_secret.yaml b/deploy/rancher/helm/rke2/templates/rke2_cp_secret.yaml new file mode 100644 index 0000000..8649874 --- /dev/null +++ b/deploy/rancher/helm/rke2/templates/rke2_cp_secret.yaml @@ -0,0 +1,69 @@ +{{- range $i := until (.Values.control_plane.node_count | int) }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ $.Values.cluster_name }}-cp-{{ $i }}-cloudinit + namespace: {{ $.Values.cluster_namespace }} +stringData: + userdata: | + #cloud-config + {{- if $.Values.vm.qemu_agent_install }} + package_update: true + packages: + - qemu-guest-agent + {{- end }} + write_files: + {{- if $.Values.control_plane.files }} +{{ $.Values.control_plane.files | toYaml | indent 4 }} + {{- end }} + - path: /etc/rancher/rke2/config.yaml + owner: root + content: | + token: {{ $.Values.shared_token }} + {{- if ne $i 0 }} + server: https://{{ $.Values.control_plane.vip }}:9345 + {{- end }} + system-default-registry: {{ $.Values.system_default_registry }} + tls-san: + - {{ $.Values.cluster_name }}-cp-{{ $i }} + - {{ $.Values.control_plane.vip }} + secrets-encryption: true + write-kubeconfig-mode: 0640 + use-service-account-credentials: true + {{- if hasKey $.Values "registry_config" }} + - path: /etc/rancher/rke2/registries.yaml + owner: root + content: |- +{{ $.Values.registry_config | toYaml | indent 8 }} + {{- end }} + - path: /etc/hosts + owner: root + content: | + 127.0.0.1 localhost + 127.0.0.1 {{$.Values.cluster_name }}-cp-{{ $i }} + runcmd: + {{- if $.Values.vm.qemu_agent_enable }} + - - systemctl + - enable + - '--now' + - qemu-guest-agent.service + {{- end }} + {{- if not $.Values.vm.airgapped_image }} + - mkdir -p /var/lib/rancher/rke2-artifacts && wget https://raw.githubusercontent.com/rancher/rke2/refs/heads/master/install.sh -O /var/lib/rancher/install.sh && chmod +x /var/lib/rancher/install.sh + {{- end}} + - INSTALL_RKE2_VERSION={{ $.Values.rke2_version }} /var/lib/rancher/install.sh + - systemctl enable rke2-server.service + - useradd -r -c "etcd user" -s /sbin/nologin -M etcd -U + - systemctl start rke2-server.service + ssh_authorized_keys: + - {{ $.Values.ssh_pub_key }} + {{- if ne $.Values.control_plane.ipam "dhcp" }} + {{- if hasKey $.Values.control_plane "network" }} + networkdata: | +{{ index $.Values.control_plane.network $i | indent 4 }} + {{- end}} + {{- else}} + networkdata: "" + {{- end}} +{{- end}} \ No newline at end of file diff --git a/deploy/rancher/helm/rke2/templates/rke2_cp_vm.yaml b/deploy/rancher/helm/rke2/templates/rke2_cp_vm.yaml new file mode 100644 index 0000000..0e8bd8d --- /dev/null +++ b/deploy/rancher/helm/rke2/templates/rke2_cp_vm.yaml @@ -0,0 +1,89 @@ +{{- range $i := until (.Values.control_plane.node_count | int) }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ $.Values.cluster_name }}-cp-disk-{{ $i }} + namespace: {{ $.Values.cluster_namespace }} +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ $.Values.control_plane.node_disk_gb }}Gi + storageClassName: {{ $.Values.storage.class }} + volumeMode: Block +--- +apiVersion: kubevirt.io/v1 +kind: VirtualMachine +metadata: + namespace: {{ $.Values.cluster_namespace }} + annotations: + # harvesterhci.io/volumeClaimTemplates: | + # [{"metadata":{"name":"{{ $.Values.cluster_name }}-cp-disk-{{ $i }}","annotations":{"harvesterhci.io/imageId":"{{ $.Values.vm.image_namespace }}/{{ $.Values.vm.image }}","helm.app":"rke2"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"{{ $.Values.control_plane.node_disk_gb }}Gi"}},"volumeMode":"Block","storageClassName":"{{ $.Values.storage.class }}"}}] + # network.harvesterhci.io/ips: '[]' + labels: + harvesterhci.io/creator: harvester + harvesterhci.io/os: {{ $.Values.vm.os }} + name: {{ $.Values.cluster_name }}-cp-{{ $i }} + finalizers: + - harvesterhci.io/VMController.UnsetOwnerOfPVCs +spec: + runStrategy: RerunOnFailure + template: + metadata: + annotations: {} + labels: + harvesterhci.io/vmName: {{ $.Values.cluster_name }}-cp-{{ $i }} + spec: + domain: + machine: + type: '' + cpu: + cores: {{ $.Values.control_plane.cpu_count }} + sockets: 1 + threads: 1 + devices: + interfaces: + - bridge: {} + model: virtio + name: default + disks: + - name: disk-0 + disk: + bus: virtio + bootOrder: 1 + - name: cloudinitdisk + disk: + bus: virtio + hostDevices: [] + resources: + limits: + memory: {{ $.Values.control_plane.memory_gb }}Gi + cpu: {{ $.Values.control_plane.cpu_count }} + features: + acpi: + enabled: {{ $.Values.vm.uefi_enabled }} + firmware: + bootloader: + efi: + secureBoot: false + evictionStrategy: LiveMigrate + hostname: {{ $.Values.cluster_name }}-cp-{{ $i }} + networks: + - name: default + multus: + networkName: {{ $.Values.cluster_namespace }}/{{ $.Values.network_name }} + volumes: + - name: disk-0 + persistentVolumeClaim: + claimName: {{ $.Values.cluster_name }}-cp-disk-{{ $i }} + - name: cloudinitdisk + cloudInitNoCloud: + secretRef: + name: {{ $.Values.cluster_name }}-cp-{{ $i }}-cloudinit + networkDataSecretRef: + name: {{ $.Values.cluster_name }}-cp-{{ $i }}-cloudinit + affinity: {} + terminationGracePeriodSeconds: 120 +{{- end }} \ No newline at end of file diff --git a/deploy/rancher/helm/rke2/templates/rke2_lb.yaml b/deploy/rancher/helm/rke2/templates/rke2_lb.yaml new file mode 100644 index 0000000..0307ff6 --- /dev/null +++ b/deploy/rancher/helm/rke2/templates/rke2_lb.yaml @@ -0,0 +1,46 @@ +--- +apiVersion: loadbalancer.harvesterhci.io/v1beta1 +kind: IPPool +metadata: + name: {{ $.Values.cluster_name }}-pool +spec: + ranges: + - gateway: {{ .Values.control_plane.loadbalancer_gateway }} + rangeEnd: {{ .Values.control_plane.vip }} + rangeStart: {{ .Values.control_plane.vip }} + subnet: {{ .Values.control_plane.loadbalancer_subnet }} + selector: {} +--- +apiVersion: loadbalancer.harvesterhci.io/v1beta1 +kind: LoadBalancer +metadata: + name: {{ .Values.cluster_name }}-lb + #namespace: default +spec: + healthCheck: + failureThreshold: 2 + port: 6443 + successThreshold: 3 + timeoutSeconds: 5 + periodSeconds: 5 + ipam: pool + ipPool: {{ .Values.cluster_name }}-pool + listeners: + - name: k8s-api + port: 6443 + protocol: TCP + backendPort: 6443 + - name: ingress + port: 443 + protocol: TCP + backendPort: 443 + - name: join + port: 9345 + protocol: TCP + backendPort: 9345 + workloadType: vm + backendServerSelector: + harvesterhci.io/vmName: + {{- range $i := until (.Values.control_plane.node_count | int)}} + - {{ $.Values.cluster_name }}-cp-{{ $i }} + {{- end}} \ No newline at end of file diff --git a/deploy/rancher/helm/rke2/templates/rke2_worker_secret.yaml b/deploy/rancher/helm/rke2/templates/rke2_worker_secret.yaml new file mode 100644 index 0000000..637cf63 --- /dev/null +++ b/deploy/rancher/helm/rke2/templates/rke2_worker_secret.yaml @@ -0,0 +1,66 @@ +{{- range $i := until (.Values.worker.node_count | int) }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ $.Values.cluster_name }}-worker-{{ $i }}-cloudinit + namespace: {{ $.Values.cluster_namespace }} +stringData: + userdata: | + #cloud-config + {{- if $.Values.vm.qemu_agent_install }} + package_update: true + packages: + - qemu-guest-agent + {{- end }} + write_files: + {{- if $.Values.worker.files }} +{{ $.Values.worker.files | toYaml | indent 4 }} + {{- end }} + - path: /etc/rancher/rke2/config.yaml + owner: root + content: | + token: {{ $.Values.shared_token }} + {{- if ne $i 0 }} + server: https://{{ $.Values.control_plane.vip }}:9345 + {{- end }} + system-default-registry: {{ $.Values.system_default_registry }} + secrets-encryption: true + write-kubeconfig-mode: 0640 + use-service-account-credentials: true + {{- if hasKey $.Values "registry_config" }} + - path: /etc/rancher/rke2/registries.yaml + owner: root + content: |- +{{ $.Values.registry_config | toYaml | indent 8 }} + {{- end }} + - path: /etc/hosts + owner: root + content: | + 127.0.0.1 localhost + 127.0.0.1 {{$.Values.cluster_name }}-worker-{{ $i }} + runcmd: + {{- if $.Values.vm.qemu_agent_enable }} + - - systemctl + - enable + - '--now' + - qemu-guest-agent.service + {{- end }} + {{- if not $.Values.vm.airgapped_image }} + #- mkdir -p /var/lib/rancher/rke2-artifacts && wget https://get.rke2.io -O /var/lib/rancher/install.sh && chmod +x /var/lib/rancher/install.sh + - mkdir -p /var/lib/rancher/rke2-artifacts && wget https://raw.githubusercontent.com/rancher/rke2/refs/heads/master/install.sh -O /var/lib/rancher/install.sh && chmod +x /var/lib/rancher/install.sh + {{- end}} + - INSTALL_RKE2_VERSION={{ $.Values.rke2_version }} INSTALL_RKE2_TYPE="agent" /var/lib/rancher/install.sh + - systemctl enable rke2-server.service + - systemctl start rke2-server.service + ssh_authorized_keys: + - {{ $.Values.ssh_pub_key }} + {{- if ne $.Values.worker.ipam "dhcp" }} + {{- if hasKey $.Values.worker "network" }} + networkdata: | +{{ index $.Values.worker.network $i | indent 4 }} + {{- end}} + {{- else}} + networkdata: "" + {{- end}} +{{- end}} \ No newline at end of file diff --git a/deploy/rancher/helm/rke2/templates/rke2_worker_vm.yaml b/deploy/rancher/helm/rke2/templates/rke2_worker_vm.yaml new file mode 100644 index 0000000..44dd352 --- /dev/null +++ b/deploy/rancher/helm/rke2/templates/rke2_worker_vm.yaml @@ -0,0 +1,74 @@ +{{- range $i := until (.Values.worker.node_count | int) }} +--- +apiVersion: kubevirt.io/v1 +kind: VirtualMachine +metadata: + namespace: {{ $.Values.cluster_namespace }} + annotations: + harvesterhci.io/volumeClaimTemplates: | + [{"metadata":{"name":"{{ $.Values.cluster_name }}-worker-disk-{{ $i }}","annotations":{"harvesterhci.io/imageId":"{{ $.Values.vm.image_namespace }}/{{ $.Values.vm.image }}","helm.app":"rke2"}},"spec":{"accessModes":["ReadWriteMany"],"resources":{"requests":{"storage":"{{ $.Values.worker.node_disk_gb }}Gi"}},"volumeMode":"Block","storageClassName":"{{ $.Values.storage.class }}"}}] + network.harvesterhci.io/ips: '[]' + labels: + harvesterhci.io/creator: harvester + harvesterhci.io/os: {{ $.Values.vm.os }} + name: {{ $.Values.cluster_name }}-worker-{{ $i }} + finalizers: + - harvesterhci.io/VMController.UnsetOwnerOfPVCs +spec: + runStrategy: RerunOnFailure + template: + metadata: + annotations: {} + labels: + harvesterhci.io/vmName: {{ $.Values.cluster_name }}-worker-{{ $i }} + spec: + domain: + machine: + type: '' + cpu: + cores: {{ $.Values.worker.cpu_count }} + sockets: 1 + threads: 1 + devices: + interfaces: + - bridge: {} + model: virtio + name: default + disks: + - name: disk-0 + disk: + bus: virtio + bootOrder: 1 + - name: cloudinitdisk + disk: + bus: virtio + hostDevices: [] + resources: + limits: + memory: {{ $.Values.worker.memory_gb }}Gi + cpu: {{ $.Values.worker.cpu_count }} + features: + acpi: + enabled: {{ $.Values.vm.uefi_enabled }} + firmware: + bootloader: + efi: + secureBoot: false + evictionStrategy: LiveMigrate + hostname: {{ $.Values.cluster_name }}-worker-{{ $i }} + networks: + - name: default + multus: + networkName: {{ $.Values.cluster_namespace }}/{{ $.Values.network_name }} + volumes: + - name: disk-0 + persistentVolumeClaim: + claimName: {{ $.Values.cluster_name }}-worker-disk-{{ $i }} + - name: cloudinitdisk + cloudInitNoCloud: + secretRef: + name: {{ $.Values.cluster_name }}-worker-{{ $i }}-cloudinit + networkData: "" + affinity: {} + terminationGracePeriodSeconds: 120 +{{- end }} \ No newline at end of file diff --git a/deploy/rancher/helm/rke2/values.yaml b/deploy/rancher/helm/rke2/values.yaml new file mode 100644 index 0000000..8704405 --- /dev/null +++ b/deploy/rancher/helm/rke2/values.yaml @@ -0,0 +1,92 @@ +cluster_name: mycluster +cluster_namespace: default + +shared_token: insecuretoken +system_default_registry: "" #! empty value: use embedded default + #! non-empty value: use as regsitry to source rke2 runtime image from + #! if your VM image contains the tarballs for RKE2, it will use those first +rke2_version: v1.26.10+rke2r2 + +ssh_pub_key: "" #! the public ssh key to inject onto each node, required if you want to fetch a kubeconfig + +# registry_config: +# configs: +# "rgcrprod.azurecr.us": +# auth: +# username: test +# password: test + +storage: + class: longhorn + +vm: + image_namespace: default #! namespace in your harvester cluster containing the vm base image + image: ubuntu #! name of base vm image to use for your RKE2 nodes + os: linux + distro: ubuntu #! flag used for specific cloud-init code tied to Ubuntu vs others (netplan) + uefi_enabled: true + qemu_agent_install: true #! flag for installation of the qemu-agent service (Requires internet) + qemu_agent_enable: true #! flag for enabling the qemu-agent + airgapped_image: false #! flag to alert helm that your VM image already has the RKE2 install script (and does not need to download it) + + +network_name: host + +control_plane: + node_count: 1 + cpu_count: 4 + memory_gb: 8 + node_disk_gb: 40 + loadbalancer_gateway: 10.10.0.1 + loadbalancer_subnet: 10.10.0.0/24 + files: [] + # files: + # - path: /tmp/test + # owner: root + # content: | + # created a file + + vip: #! this is the VIP for the Harvester LoadBalancer object, ensure it is a routable IP + ipam: dhcp #! this can be dhcp or static, static requires an equal amount of cloud-init network-data entries + + # network: + # - | #! ubuntu example + # network: + # version: 2 + # renderer: networkd + # ethernets: + # enp1s0: + # dhcp4: no + # addresses: [ "10.10.0.6/24" ] + # gateway4: 10.10.0.1 + # nameservers: + # addresses: + # - 10.10.0.1 + +worker: + node_count: 1 + cpu_count: 4 + memory_gb: 8 + node_disk_gb: 40 + files: [] + # files: + # - path: /tmp/test + # owner: root + # content: | + # created a file + + ipam: dhcp #! this can be dhcp or static, static requires an equal amount of cloud-init network-data entries + + # network: + # - | + # network: + # version: 2 + # renderer: networkd + # ethernets: + # enp1s0: + # dhcp4: no + # addresses: [ "10.10.0.20/24" ] + # gateway4: 10.10.0.1 + # nameservers: + # addresses: + # - 10.10.0.1 diff --git a/deploy/rig-operator/.devcontainer/devcontainer.json b/deploy/rig-operator/.devcontainer/devcontainer.json new file mode 100644 index 0000000..a3ab754 --- /dev/null +++ b/deploy/rig-operator/.devcontainer/devcontainer.json @@ -0,0 +1,25 @@ +{ + "name": "Kubebuilder DevContainer", + "image": "golang:1.24", + "features": { + "ghcr.io/devcontainers/features/docker-in-docker:2": {}, + "ghcr.io/devcontainers/features/git:1": {} + }, + + "runArgs": ["--network=host"], + + "customizations": { + "vscode": { + "settings": { + "terminal.integrated.shell.linux": "/bin/bash" + }, + "extensions": [ + "ms-kubernetes-tools.vscode-kubernetes-tools", + "ms-azuretools.vscode-docker" + ] + } + }, + + "onCreateCommand": "bash .devcontainer/post-install.sh" +} + diff --git a/deploy/rig-operator/.devcontainer/post-install.sh b/deploy/rig-operator/.devcontainer/post-install.sh new file mode 100644 index 0000000..67f3e97 --- /dev/null +++ b/deploy/rig-operator/.devcontainer/post-install.sh @@ -0,0 +1,23 @@ +#!/bin/bash +set -x + +curl -Lo ./kind https://kind.sigs.k8s.io/dl/latest/kind-linux-$(go env GOARCH) +chmod +x ./kind +mv ./kind /usr/local/bin/kind + +curl -L -o kubebuilder https://go.kubebuilder.io/dl/latest/linux/$(go env GOARCH) +chmod +x kubebuilder +mv kubebuilder /usr/local/bin/ + +KUBECTL_VERSION=$(curl -L -s https://dl.k8s.io/release/stable.txt) +curl -LO "https://dl.k8s.io/release/$KUBECTL_VERSION/bin/linux/$(go env GOARCH)/kubectl" +chmod +x kubectl +mv kubectl /usr/local/bin/kubectl + +docker network create -d=bridge --subnet=172.19.0.0/24 kind + +kind version +kubebuilder version +docker --version +go version +kubectl version --client diff --git a/deploy/rig-operator/.dockerignore b/deploy/rig-operator/.dockerignore new file mode 100644 index 0000000..9af8280 --- /dev/null +++ b/deploy/rig-operator/.dockerignore @@ -0,0 +1,11 @@ +# More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file +# Ignore everything by default and re-include only needed files +** + +# Re-include Go source files (but not *_test.go) +!**/*.go +**/*_test.go + +# Re-include Go module files +!go.mod +!go.sum diff --git a/deploy/rig-operator/.github/workflows/lint.yml b/deploy/rig-operator/.github/workflows/lint.yml new file mode 100644 index 0000000..4838c54 --- /dev/null +++ b/deploy/rig-operator/.github/workflows/lint.yml @@ -0,0 +1,23 @@ +name: Lint + +on: + push: + pull_request: + +jobs: + lint: + name: Run on Ubuntu + runs-on: ubuntu-latest + steps: + - name: Clone the code + uses: actions/checkout@v4 + + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version-file: go.mod + + - name: Run linter + uses: golangci/golangci-lint-action@v8 + with: + version: v2.5.0 diff --git a/deploy/rig-operator/.github/workflows/test-e2e.yml b/deploy/rig-operator/.github/workflows/test-e2e.yml new file mode 100644 index 0000000..4cdfb30 --- /dev/null +++ b/deploy/rig-operator/.github/workflows/test-e2e.yml @@ -0,0 +1,32 @@ +name: E2E Tests + +on: + push: + pull_request: + +jobs: + test-e2e: + name: Run on Ubuntu + runs-on: ubuntu-latest + steps: + - name: Clone the code + uses: actions/checkout@v4 + + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version-file: go.mod + + - name: Install the latest version of kind + run: | + curl -Lo ./kind https://kind.sigs.k8s.io/dl/latest/kind-linux-$(go env GOARCH) + chmod +x ./kind + sudo mv ./kind /usr/local/bin/kind + + - name: Verify kind installation + run: kind version + + - name: Running Test e2e + run: | + go mod tidy + make test-e2e diff --git a/deploy/rig-operator/.github/workflows/test.yml b/deploy/rig-operator/.github/workflows/test.yml new file mode 100644 index 0000000..fc2e80d --- /dev/null +++ b/deploy/rig-operator/.github/workflows/test.yml @@ -0,0 +1,23 @@ +name: Tests + +on: + push: + pull_request: + +jobs: + test: + name: Run on Ubuntu + runs-on: ubuntu-latest + steps: + - name: Clone the code + uses: actions/checkout@v4 + + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version-file: go.mod + + - name: Running Tests + run: | + go mod tidy + make test diff --git a/deploy/rig-operator/.gitignore b/deploy/rig-operator/.gitignore new file mode 100644 index 0000000..9f0f3a1 --- /dev/null +++ b/deploy/rig-operator/.gitignore @@ -0,0 +1,30 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +bin/* +Dockerfile.cross + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Go workspace file +go.work + +# Kubernetes Generated files - skip generated files, except for vendored files +!vendor/**/zz_generated.* + +# editor and IDE paraphernalia +.idea +.vscode +*.swp +*.swo +*~ + +# Kubeconfig might contain secrets +*.kubeconfig diff --git a/deploy/rig-operator/.golangci.yml b/deploy/rig-operator/.golangci.yml new file mode 100644 index 0000000..e5b21b0 --- /dev/null +++ b/deploy/rig-operator/.golangci.yml @@ -0,0 +1,52 @@ +version: "2" +run: + allow-parallel-runners: true +linters: + default: none + enable: + - copyloopvar + - dupl + - errcheck + - ginkgolinter + - goconst + - gocyclo + - govet + - ineffassign + - lll + - misspell + - nakedret + - prealloc + - revive + - staticcheck + - unconvert + - unparam + - unused + settings: + revive: + rules: + - name: comment-spacings + - name: import-shadowing + exclusions: + generated: lax + rules: + - linters: + - lll + path: api/* + - linters: + - dupl + - lll + path: internal/* + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gofmt + - goimports + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/deploy/rig-operator/Dockerfile b/deploy/rig-operator/Dockerfile new file mode 100644 index 0000000..6466c48 --- /dev/null +++ b/deploy/rig-operator/Dockerfile @@ -0,0 +1,31 @@ +# Build the manager binary +FROM golang:1.24 AS builder +ARG TARGETOS +ARG TARGETARCH + +WORKDIR /workspace +# Copy the Go Modules manifests +COPY go.mod go.mod +COPY go.sum go.sum +# cache deps before building and copying source so that we don't need to re-download as much +# and so that source changes don't invalidate our downloaded layer +RUN go mod download + +# Copy the Go source (relies on .dockerignore to filter) +COPY . . + +# Build +# the GOARCH has no default value to allow the binary to be built according to the host where the command +# was called. For example, if we call make docker-build in a local env which has the Apple Silicon M1 SO +# the docker BUILDPLATFORM arg will be linux/arm64 when for Apple x86 it will be linux/amd64. Therefore, +# by leaving it empty we can ensure that the container and binary shipped on it will have the same platform. +RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o manager cmd/main.go + +# Use distroless as minimal base image to package the manager binary +# Refer to https://github.com/GoogleContainerTools/distroless for more details +FROM gcr.io/distroless/static:nonroot +WORKDIR / +COPY --from=builder /workspace/manager . +USER 65532:65532 + +ENTRYPOINT ["/manager"] diff --git a/deploy/rig-operator/Makefile b/deploy/rig-operator/Makefile new file mode 100644 index 0000000..207f05b --- /dev/null +++ b/deploy/rig-operator/Makefile @@ -0,0 +1,250 @@ +# Image URL to use all building/pushing image targets +IMG ?= controller:latest + +# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) +ifeq (,$(shell go env GOBIN)) +GOBIN=$(shell go env GOPATH)/bin +else +GOBIN=$(shell go env GOBIN) +endif + +# CONTAINER_TOOL defines the container tool to be used for building images. +# Be aware that the target commands are only tested with Docker which is +# scaffolded by default. However, you might want to replace it to use other +# tools. (i.e. podman) +CONTAINER_TOOL ?= docker + +# Setting SHELL to bash allows bash commands to be executed by recipes. +# Options are set to exit when a recipe line exits non-zero or a piped command fails. +SHELL = /usr/bin/env bash -o pipefail +.SHELLFLAGS = -ec + +.PHONY: all +all: build + +##@ General + +# The help target prints out all targets with their descriptions organized +# beneath their categories. The categories are represented by '##@' and the +# target descriptions by '##'. The awk command is responsible for reading the +# entire set of makefiles included in this invocation, looking for lines of the +# file as xyz: ## something, and then pretty-format the target and help. Then, +# if there's a line with ##@ something, that gets pretty-printed as a category. +# More info on the usage of ANSI control characters for terminal formatting: +# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters +# More info on the awk command: +# http://linuxcommand.org/lc3_adv_awk.php + +.PHONY: help +help: ## Display this help. + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + +##@ Development + +.PHONY: manifests +manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. + "$(CONTROLLER_GEN)" rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases + +.PHONY: generate +generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. + "$(CONTROLLER_GEN)" object:headerFile="hack/boilerplate.go.txt" paths="./..." + +.PHONY: fmt +fmt: ## Run go fmt against code. + go fmt ./... + +.PHONY: vet +vet: ## Run go vet against code. + go vet ./... + +.PHONY: test +test: manifests generate fmt vet setup-envtest ## Run tests. + KUBEBUILDER_ASSETS="$(shell "$(ENVTEST)" use $(ENVTEST_K8S_VERSION) --bin-dir "$(LOCALBIN)" -p path)" go test $$(go list ./... | grep -v /e2e) -coverprofile cover.out + +# TODO(user): To use a different vendor for e2e tests, modify the setup under 'tests/e2e'. +# The default setup assumes Kind is pre-installed and builds/loads the Manager Docker image locally. +# CertManager is installed by default; skip with: +# - CERT_MANAGER_INSTALL_SKIP=true +KIND_CLUSTER ?= deploy-test-e2e + +.PHONY: setup-test-e2e +setup-test-e2e: ## Set up a Kind cluster for e2e tests if it does not exist + @command -v $(KIND) >/dev/null 2>&1 || { \ + echo "Kind is not installed. Please install Kind manually."; \ + exit 1; \ + } + @case "$$($(KIND) get clusters)" in \ + *"$(KIND_CLUSTER)"*) \ + echo "Kind cluster '$(KIND_CLUSTER)' already exists. Skipping creation." ;; \ + *) \ + echo "Creating Kind cluster '$(KIND_CLUSTER)'..."; \ + $(KIND) create cluster --name $(KIND_CLUSTER) ;; \ + esac + +.PHONY: test-e2e +test-e2e: setup-test-e2e manifests generate fmt vet ## Run the e2e tests. Expected an isolated environment using Kind. + KIND=$(KIND) KIND_CLUSTER=$(KIND_CLUSTER) go test -tags=e2e ./test/e2e/ -v -ginkgo.v + $(MAKE) cleanup-test-e2e + +.PHONY: cleanup-test-e2e +cleanup-test-e2e: ## Tear down the Kind cluster used for e2e tests + @$(KIND) delete cluster --name $(KIND_CLUSTER) + +.PHONY: lint +lint: golangci-lint ## Run golangci-lint linter + "$(GOLANGCI_LINT)" run + +.PHONY: lint-fix +lint-fix: golangci-lint ## Run golangci-lint linter and perform fixes + "$(GOLANGCI_LINT)" run --fix + +.PHONY: lint-config +lint-config: golangci-lint ## Verify golangci-lint linter configuration + "$(GOLANGCI_LINT)" config verify + +##@ Build + +.PHONY: build +build: manifests generate fmt vet ## Build manager binary. + go build -o bin/manager cmd/main.go + +.PHONY: run +run: manifests generate fmt vet ## Run a controller from your host. + go run ./cmd/main.go + +# If you wish to build the manager image targeting other platforms you can use the --platform flag. +# (i.e. docker build --platform linux/arm64). However, you must enable docker buildKit for it. +# More info: https://docs.docker.com/develop/develop-images/build_enhancements/ +.PHONY: docker-build +docker-build: ## Build docker image with the manager. + $(CONTAINER_TOOL) build -t ${IMG} . + +.PHONY: docker-push +docker-push: ## Push docker image with the manager. + $(CONTAINER_TOOL) push ${IMG} + +# PLATFORMS defines the target platforms for the manager image be built to provide support to multiple +# architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to: +# - be able to use docker buildx. More info: https://docs.docker.com/build/buildx/ +# - have enabled BuildKit. More info: https://docs.docker.com/develop/develop-images/build_enhancements/ +# - be able to push the image to your registry (i.e. if you do not set a valid value via IMG=> then the export will fail) +# To adequately provide solutions that are compatible with multiple platforms, you should consider using this option. +PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x,linux/ppc64le +.PHONY: docker-buildx +docker-buildx: ## Build and push docker image for the manager for cross-platform support + # copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile + sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross + - $(CONTAINER_TOOL) buildx create --name deploy-builder + $(CONTAINER_TOOL) buildx use deploy-builder + - $(CONTAINER_TOOL) buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile.cross . + - $(CONTAINER_TOOL) buildx rm deploy-builder + rm Dockerfile.cross + +.PHONY: build-installer +build-installer: manifests generate kustomize ## Generate a consolidated YAML with CRDs and deployment. + mkdir -p dist + cd config/manager && "$(KUSTOMIZE)" edit set image controller=${IMG} + "$(KUSTOMIZE)" build config/default > dist/install.yaml + +##@ Deployment + +ifndef ignore-not-found + ignore-not-found = false +endif + +.PHONY: install +install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config. + @out="$$( "$(KUSTOMIZE)" build config/crd 2>/dev/null || true )"; \ + if [ -n "$$out" ]; then echo "$$out" | "$(KUBECTL)" apply -f -; else echo "No CRDs to install; skipping."; fi + +.PHONY: uninstall +uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. + @out="$$( "$(KUSTOMIZE)" build config/crd 2>/dev/null || true )"; \ + if [ -n "$$out" ]; then echo "$$out" | "$(KUBECTL)" delete --ignore-not-found=$(ignore-not-found) -f -; else echo "No CRDs to delete; skipping."; fi + +.PHONY: deploy +deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. + cd config/manager && "$(KUSTOMIZE)" edit set image controller=${IMG} + "$(KUSTOMIZE)" build config/default | "$(KUBECTL)" apply -f - + +.PHONY: undeploy +undeploy: kustomize ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. + "$(KUSTOMIZE)" build config/default | "$(KUBECTL)" delete --ignore-not-found=$(ignore-not-found) -f - + +##@ Dependencies + +## Location to install dependencies to +LOCALBIN ?= $(shell pwd)/bin +$(LOCALBIN): + mkdir -p "$(LOCALBIN)" + +## Tool Binaries +KUBECTL ?= kubectl +KIND ?= kind +KUSTOMIZE ?= $(LOCALBIN)/kustomize +CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen +ENVTEST ?= $(LOCALBIN)/setup-envtest +GOLANGCI_LINT = $(LOCALBIN)/golangci-lint + +## Tool Versions +KUSTOMIZE_VERSION ?= v5.7.1 +CONTROLLER_TOOLS_VERSION ?= v0.19.0 + +#ENVTEST_VERSION is the version of controller-runtime release branch to fetch the envtest setup script (i.e. release-0.20) +ENVTEST_VERSION ?= $(shell v='$(call gomodver,sigs.k8s.io/controller-runtime)'; \ + [ -n "$$v" ] || { echo "Set ENVTEST_VERSION manually (controller-runtime replace has no tag)" >&2; exit 1; }; \ + printf '%s\n' "$$v" | sed -E 's/^v?([0-9]+)\.([0-9]+).*/release-\1.\2/') + +#ENVTEST_K8S_VERSION is the version of Kubernetes to use for setting up ENVTEST binaries (i.e. 1.31) +ENVTEST_K8S_VERSION ?= $(shell v='$(call gomodver,k8s.io/api)'; \ + [ -n "$$v" ] || { echo "Set ENVTEST_K8S_VERSION manually (k8s.io/api replace has no tag)" >&2; exit 1; }; \ + printf '%s\n' "$$v" | sed -E 's/^v?[0-9]+\.([0-9]+).*/1.\1/') + +GOLANGCI_LINT_VERSION ?= v2.5.0 +.PHONY: kustomize +kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. +$(KUSTOMIZE): $(LOCALBIN) + $(call go-install-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v5,$(KUSTOMIZE_VERSION)) + +.PHONY: controller-gen +controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary. +$(CONTROLLER_GEN): $(LOCALBIN) + $(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen,$(CONTROLLER_TOOLS_VERSION)) + +.PHONY: setup-envtest +setup-envtest: envtest ## Download the binaries required for ENVTEST in the local bin directory. + @echo "Setting up envtest binaries for Kubernetes version $(ENVTEST_K8S_VERSION)..." + @"$(ENVTEST)" use $(ENVTEST_K8S_VERSION) --bin-dir "$(LOCALBIN)" -p path || { \ + echo "Error: Failed to set up envtest binaries for version $(ENVTEST_K8S_VERSION)."; \ + exit 1; \ + } + +.PHONY: envtest +envtest: $(ENVTEST) ## Download setup-envtest locally if necessary. +$(ENVTEST): $(LOCALBIN) + $(call go-install-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest,$(ENVTEST_VERSION)) + +.PHONY: golangci-lint +golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary. +$(GOLANGCI_LINT): $(LOCALBIN) + $(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/v2/cmd/golangci-lint,$(GOLANGCI_LINT_VERSION)) + +# go-install-tool will 'go install' any package with custom target and name of binary, if it doesn't exist +# $1 - target path with name of binary +# $2 - package url which can be installed +# $3 - specific version of package +define go-install-tool +@[ -f "$(1)-$(3)" ] && [ "$$(readlink -- "$(1)" 2>/dev/null)" = "$(1)-$(3)" ] || { \ +set -e; \ +package=$(2)@$(3) ;\ +echo "Downloading $${package}" ;\ +rm -f "$(1)" ;\ +GOBIN="$(LOCALBIN)" go install $${package} ;\ +mv "$(LOCALBIN)/$$(basename "$(1)")" "$(1)-$(3)" ;\ +} ;\ +ln -sf "$$(realpath "$(1)-$(3)")" "$(1)" +endef + +define gomodver +$(shell go list -m -f '{{if .Replace}}{{.Replace.Version}}{{else}}{{.Version}}{{end}}' $(1) 2>/dev/null) +endef diff --git a/deploy/rig-operator/PROJECT b/deploy/rig-operator/PROJECT new file mode 100644 index 0000000..4e9d5df --- /dev/null +++ b/deploy/rig-operator/PROJECT @@ -0,0 +1,46 @@ +# Code generated by tool. DO NOT EDIT. +# This file is used to track the info used to scaffold your project +# and allow the plugins properly work. +# More info: https://book.kubebuilder.io/reference/project-config.html +cliVersion: 4.10.1 +domain: appstack.io +layout: +- go.kubebuilder.io/v4 +projectName: deploy +repo: vanderlande.com/ittp/appstack/rig-operator +resources: +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: appstack.io + group: rig + kind: ClusterBlueprint + path: vanderlande.com/ittp/appstack/rig-operator/api/v1alpha1 + version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: appstack.io + group: rig + kind: InfraBlueprint + path: vanderlande.com/ittp/appstack/rig-operator/api/v1alpha1 + version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + domain: appstack.io + group: rig + kind: HarvesterBlueprint + path: vanderlande.com/ittp/appstack/rig-operator/api/v1alpha1 + version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + domain: appstack.io + group: rig + kind: VsphereBlueprint + path: vanderlande.com/ittp/appstack/rig-operator/api/v1alpha1 + version: v1alpha1 +version: "3" diff --git a/deploy/rig-operator/README-DEV.md b/deploy/rig-operator/README-DEV.md new file mode 100644 index 0000000..8e6d63a --- /dev/null +++ b/deploy/rig-operator/README-DEV.md @@ -0,0 +1,141 @@ +# RIG Operator (Resource Infrastructure Gateway) + +**RIG Operator** is a Kubernetes operator designed to decouple **Infrastructure Management** (Quotas, Credentials, Providers) from **Cluster Provisioning** (Kubernetes versions, Node sizing). + +It replaces legacy monolithic provisioners with a **Strategy-Pattern Architecture**, allowing a single operator to manage hybrid fleets (Harvester, vSphere, etc.) using a unified API while strictly enforcing resource quotas. + +--- + +## 🏗 Architecture & Design + +### The Problem +Legacy controllers often mix concerns: +* *Hardcoded Providers:* Adding vSphere requires rewriting the main loop. +* *No Accounting:* Users can provision infinite clusters until the underlying storage fills up. +* *Tight Coupling:* Helm values, RKE2 configs, and VM details are mashed into one huge struct. + +### The RIG Solution +RIG segregates responsibilities into three distinct layers, acting as a **Gatekeeper** and **Router**. + + + +#### 1. The Data Model (Blueprints) +* **`InfraBlueprint` (The Accountant):** Owned by Platform Admins. Defines **Quotas** (CPU/RAM/Disk), **Credentials**, and points to the specific Provider Config. It automatically tracks usage across all child clusters. +* **`ClusterBlueprint` (The Request):** Owned by Users. Defines **What** is needed (e.g., "3 nodes, 4 CPU, 16GB RAM") but not **How** it is provided. +* **`HarvesterBlueprint` / `VsphereBlueprint` (The Tech Specs):** Holds low-level details (Image names, Networks, VM Namespaces). + +#### 2. The Logic Flow +1. **Gatekeeper:** Before doing anything, the Controller checks `Infra.Quota`. If `(Used + Request) > Max`, provisioning is **blocked**. +2. **Router:** The Controller reads `Infra.ProviderRef` and dynamically loads the correct **Strategy** (Harvester vs. vSphere). +3. **Builder:** A generic `MasterBuilder` combines the Strategy's output with a Base Helm Template to generate the final values. + +--- + +## 📂 Project Structure + +| Directory | Role | Description | +| :--- | :--- | :--- | +| `api/v1alpha1` | **The Contract** | Defines the CRDs (`ibp`, `cbp`, `hbp`). | +| `internal/controller` | **The Brain** | `ClusterBlueprint` (Provisioning/Gatekeeping) & `InfraBlueprint` (Accounting). | +| `internal/provider` | **The Interface** | Defines the `Strategy` interface that all clouds must obey. | +| `internal/provider/harvester`| **The Implementation** | Logic specific to Harvester (Identity minting, NodePool mapping). | +| `internal/builder` | **The Assembler** | Merges Strategy output with Helm Templates. Agnostic to the cloud provider. | +| `internal/helm` | **The Tool** | Wrapper around the Helm SDK (OCI supported). | +| `internal/templates` | **The Defaults** | Embedded YAML files containing default values (CPU/RAM, UserData). | + +--- + +## 🚀 Development Workflow + +### Prerequisites +* Go 1.22+ +* Helm v3 (Binary) +* Kubernetes Cluster (or local Kind/Minikube) +* `kubectl` pointing to your dev cluster + +### 1. Common Commands + +**Initial Setup (Download Dependencies):** +```bash +go mod tidy + +``` + +**Update APIs (CRDs):** +*Run this whenever you edit `api/v1alpha1/*.go*` + +```bash +make manifests generate + +``` + +**Run Locally:** +*Runs the controller against your current `~/.kube/config` context.* + +```bash +make install run + +``` + +### 2. Debugging (VS Code / Delve) + +`make run` is great for logs, but if you need to set breakpoints (e.g., to inspect the Helm Values map before it applies), use the debugger. + +**VS Code `launch.json` Configuration:** + +```json +{ + "version": "0.2.0", + "configurations": [ + { + "name": "Debug RIG Operator", + "type": "go", + "request": "launch", + "mode": "auto", + "program": "${workspaceFolder}/cmd/main.go", + "args": [], + "env": { + "KUBECONFIG": "${userHome}/.kube/config" + } + } + ] +} + +``` + +1. Select **"Debug RIG Operator"** from the Run menu. +2. Set a breakpoint in `internal/controller/clusterblueprint_controller.go` (e.g., inside the `Reconcile` loop). +3. Apply a generic cluster manifest to trigger the breakpoint. + +--- + +## 🛠 Maintenance Guide + +### "I need to add a new field..." + +| Scenario | Files to Touch | Command to Run | +| --- | --- | --- | +| **Add a field to the API** (e.g., `ProxyURL` to Infra) | `api/v1alpha1/infrablueprint_types.go` | `make manifests generate` | +| **Update Default CPU/RAM** | `internal/templates/harvester/values.yaml` | `make build` (Recompiles embedded file) | +| **Change Harvester UserData logic** | `internal/provider/harvester/strategy.go` | `go test ./...` | +| **Add a new Cloud Provider (e.g. AWS)** | 1. Create `api/.../awsblueprint_types.go`
+ +
2. Create `internal/provider/aws/strategy.go`
+ +
3. Update `controller` switch case. | `make manifests generate` | + +### "The Quota isn't updating!" + +Remember that the **InfraController** is responsible for math. It watches `ClusterBlueprint` events. + +1. Check logs: `kubectl logs -l control-plane=controller-manager` +2. Ensure your Cluster actually points to the correct Infra name (`spec.infraBlueprintRef`). + +--- + +## 📊 Documentation & Diagrams + +Visual flows (Mermaid) are available in the `docs/` folder: + +* `docs/flow-diagram.svg`: High-level Request Flow. +* `docs/controllerflow.mermaid`: Detailed Controller logic. diff --git a/deploy/rig-operator/README.md b/deploy/rig-operator/README.md new file mode 100644 index 0000000..75175c2 --- /dev/null +++ b/deploy/rig-operator/README.md @@ -0,0 +1,135 @@ +# deploy +// TODO(user): Add simple overview of use/purpose + +## Description +// TODO(user): An in-depth paragraph about your project and overview of use + +## Getting Started + +### Prerequisites +- go version v1.24.6+ +- docker version 17.03+. +- kubectl version v1.11.3+. +- Access to a Kubernetes v1.11.3+ cluster. + +### To Deploy on the cluster +**Build and push your image to the location specified by `IMG`:** + +```sh +make docker-build docker-push IMG=/deploy:tag +``` + +**NOTE:** This image ought to be published in the personal registry you specified. +And it is required to have access to pull the image from the working environment. +Make sure you have the proper permission to the registry if the above commands don’t work. + +**Install the CRDs into the cluster:** + +```sh +make install +``` + +**Deploy the Manager to the cluster with the image specified by `IMG`:** + +```sh +make deploy IMG=/deploy:tag +``` + +> **NOTE**: If you encounter RBAC errors, you may need to grant yourself cluster-admin +privileges or be logged in as admin. + +**Create instances of your solution** +You can apply the samples (examples) from the config/sample: + +```sh +kubectl apply -k config/samples/ +``` + +>**NOTE**: Ensure that the samples has default values to test it out. + +### To Uninstall +**Delete the instances (CRs) from the cluster:** + +```sh +kubectl delete -k config/samples/ +``` + +**Delete the APIs(CRDs) from the cluster:** + +```sh +make uninstall +``` + +**UnDeploy the controller from the cluster:** + +```sh +make undeploy +``` + +## Project Distribution + +Following the options to release and provide this solution to the users. + +### By providing a bundle with all YAML files + +1. Build the installer for the image built and published in the registry: + +```sh +make build-installer IMG=/deploy:tag +``` + +**NOTE:** The makefile target mentioned above generates an 'install.yaml' +file in the dist directory. This file contains all the resources built +with Kustomize, which are necessary to install this project without its +dependencies. + +2. Using the installer + +Users can just run 'kubectl apply -f ' to install +the project, i.e.: + +```sh +kubectl apply -f https://raw.githubusercontent.com//deploy//dist/install.yaml +``` + +### By providing a Helm Chart + +1. Build the chart using the optional helm plugin + +```sh +kubebuilder edit --plugins=helm/v2-alpha +``` + +2. See that a chart was generated under 'dist/chart', and users +can obtain this solution from there. + +**NOTE:** If you change the project, you need to update the Helm Chart +using the same command above to sync the latest changes. Furthermore, +if you create webhooks, you need to use the above command with +the '--force' flag and manually ensure that any custom configuration +previously added to 'dist/chart/values.yaml' or 'dist/chart/manager/manager.yaml' +is manually re-applied afterwards. + +## Contributing +// TODO(user): Add detailed information on how you would like others to contribute to this project + +**NOTE:** Run `make help` for more information on all potential `make` targets + +More information can be found via the [Kubebuilder Documentation](https://book.kubebuilder.io/introduction.html) + +## License + +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + diff --git a/deploy/rig-operator/api/v1alpha1/clusterblueprint_types.go b/deploy/rig-operator/api/v1alpha1/clusterblueprint_types.go new file mode 100644 index 0000000..5aa5e59 --- /dev/null +++ b/deploy/rig-operator/api/v1alpha1/clusterblueprint_types.go @@ -0,0 +1,101 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// GenericPoolReq defines a request for a set of nodes with specific sizing. +// This is provider-agnostic. +type GenericPoolReq struct { + // Name is the identifier for this node pool (e.g. "workers-gpu"). + // +required + Name string `json:"name"` + + // Quantity is the number of nodes desired. + // +required + // +kubebuilder:validation:Minimum=0 + Quantity int `json:"quantity"` + + // CpuCores is the number of vCPUs per node. + // +required + // +kubebuilder:validation:Minimum=1 + CpuCores int `json:"cpuCores"` + + // MemoryGB is the amount of RAM per node in Gigabytes. + // +required + // +kubebuilder:validation:Minimum=1 + MemoryGB int `json:"memoryGb"` + + // DiskGB is the root disk size per node in Gigabytes. + // +required + // +kubebuilder:validation:Minimum=10 + DiskGB int `json:"diskGb"` +} + +// ClusterBlueprintSpec defines the desired state of ClusterBlueprint +type ClusterBlueprintSpec struct { + // InfraBlueprintRef points to the InfraBlueprint (IBP) that manages + // the quotas and provider details for this cluster. + // +required + InfraBlueprintRef string `json:"infraBlueprintRef"` + + // KubernetesVersion is the target RKE2/K3s version (e.g., v1.28.0+rke2r1). + // +required + KubernetesVersion string `json:"kubernetesVersion"` + + // ControlPlaneHA determines if we provision 3 CP nodes (true) or 1 (false). + // +optional + ControlPlaneHA bool `json:"controlPlaneHA"` + + // WorkerPools is the list of worker node groups to provision. + // +optional + WorkerPools []GenericPoolReq `json:"workerPools,omitempty"` +} + +// IdentityStatus tracks the generated cloud provider identity +type IdentityStatus struct { + // SecretRef is the name of the generated secret used by this cluster. + SecretRef string `json:"secretRef,omitempty"` + + // ServiceAccount is the name of the SA created on the provider (if applicable). + ServiceAccount string `json:"serviceAccount,omitempty"` +} + +// ClusterBlueprintStatus defines the observed state of ClusterBlueprint +type ClusterBlueprintStatus struct { + // Ready indicates if the Helm Chart has been successfully applied. + Ready bool `json:"ready"` + + // Identity tracks the cloud credentials generated for this cluster. + // +optional + Identity *IdentityStatus `json:"identity,omitempty"` + + // Phase can be "Pending", "Provisioning", "Deployed", or "Failed" + // +optional + Phase string `json:"phase,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:shortName=cbp +// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase" +// +kubebuilder:printcolumn:name="K8s Version",type="string",JSONPath=".spec.kubernetesVersion" +// +kubebuilder:printcolumn:name="Infra",type="string",JSONPath=".spec.infraBlueprintRef" +type ClusterBlueprint struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ClusterBlueprintSpec `json:"spec,omitempty"` + Status ClusterBlueprintStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true +type ClusterBlueprintList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ClusterBlueprint `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ClusterBlueprint{}, &ClusterBlueprintList{}) +} diff --git a/deploy/rig-operator/api/v1alpha1/groupversion_info.go b/deploy/rig-operator/api/v1alpha1/groupversion_info.go new file mode 100644 index 0000000..c90c93a --- /dev/null +++ b/deploy/rig-operator/api/v1alpha1/groupversion_info.go @@ -0,0 +1,36 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha1 contains API Schema definitions for the rig v1alpha1 API group. +// +kubebuilder:object:generate=true +// +groupName=rig.appstack.io +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects. + GroupVersion = schema.GroupVersion{Group: "rig.appstack.io", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/deploy/rig-operator/api/v1alpha1/harvesterblueprint_types.go b/deploy/rig-operator/api/v1alpha1/harvesterblueprint_types.go new file mode 100644 index 0000000..99e6bc1 --- /dev/null +++ b/deploy/rig-operator/api/v1alpha1/harvesterblueprint_types.go @@ -0,0 +1,59 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// HarvesterBlueprintSpec defines the desired state of HarvesterBlueprint +type HarvesterBlueprintSpec struct { + // HarvesterURL is the endpoint of the Harvester cluster (e.g. https://10.x.x.x:6443). + // This replaces the need for auto-discovery. + // +required + HarvesterURL string `json:"harvesterUrl"` + + // VmNamespace is the namespace in Harvester where VMs will be created. + // +required + VmNamespace string `json:"vmNamespace"` + + // ImageName is the specific image name in Harvester to clone (e.g. image-abcde). + // +required + ImageName string `json:"imageName"` + + // NetworkName is the VM Network to attach to the nodes. + // +required + NetworkName string `json:"networkName"` + + // SshUser is the username to configure on the VM (e.g. ubuntu, rancher). + // +required + SshUser string `json:"sshUser"` +} + +// HarvesterBlueprintStatus defines the observed state of HarvesterBlueprint +type HarvesterBlueprintStatus struct { + // Ready indicates the configuration is valid (optional future use) + Ready bool `json:"ready,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:shortName=hbp +type HarvesterBlueprint struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec HarvesterBlueprintSpec `json:"spec,omitempty"` + Status HarvesterBlueprintStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// HarvesterBlueprintList contains a list of HarvesterBlueprint +type HarvesterBlueprintList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []HarvesterBlueprint `json:"items"` +} + +func init() { + SchemeBuilder.Register(&HarvesterBlueprint{}, &HarvesterBlueprintList{}) +} diff --git a/deploy/rig-operator/api/v1alpha1/infrablueprint_types.go b/deploy/rig-operator/api/v1alpha1/infrablueprint_types.go new file mode 100644 index 0000000..de4d48f --- /dev/null +++ b/deploy/rig-operator/api/v1alpha1/infrablueprint_types.go @@ -0,0 +1,112 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// InfraQuota defines the resource limits for this infrastructure account +type InfraQuota struct { + // MaxCPU is the total number of cores allowed across all clusters + // +optional + MaxCPU int `json:"maxCpu,omitempty"` + + // MaxMemoryGB is the total RAM (in GB) allowed across all clusters + // +optional + MaxMemoryGB int `json:"maxMemoryGb,omitempty"` + + // MaxDiskGB is the total Storage (in GB) allowed across all clusters + // +optional + MaxDiskGB int `json:"maxDiskGb,omitempty"` +} + +// InfraQuotaStatus tracks current usage +type InfraQuotaStatus struct { + // UsedCPU is the sum of cores currently provisioned + UsedCPU int `json:"usedCpu"` + + // UsedMemoryGB is the sum of RAM currently provisioned + UsedMemoryGB int `json:"usedMemoryGb"` + + // UsedDiskGB tracks storage consumption + UsedDiskGB int `json:"usedDiskGb"` +} + +// ProviderRef points to the specific provider configuration (HBP or VBP) +type ProviderRef struct { + // Kind is the type of resource being referenced (e.g., HarvesterBlueprint) + // +required + Kind string `json:"kind"` + + // Name is the name of resource being referenced + // +required + Name string `json:"name"` + + // APIGroup defaults to rig.appstack.io if not specified + // +optional + APIGroup string `json:"apiGroup,omitempty"` +} + +// InfraBlueprintSpec defines the desired state of InfraBlueprint +type InfraBlueprintSpec struct { + // RancherURL is the public URL of the Rancher Manager (e.g. https://rancher.example.com) + // This is injected into the Helm Chart to register the cluster. + // +required + RancherURL string `json:"rancherUrl"` + + // CloudCredentialSecret is the name of the Secret containing the + // master cloud credentials (e.g., kubeconfig or username/password). + // +required + CloudCredentialSecret string `json:"cloudCredentialSecret"` + + // ProviderRef points to the technical configuration (HarvesterBlueprint/VsphereBlueprint). + // +required + ProviderRef ProviderRef `json:"providerRef"` + + // Quota defines the maximum resources allocatable by this Infra. + // +optional + Quota InfraQuota `json:"quota,omitempty"` + + // UserData is the default cloud-init user data for all clusters in this Infra. + // +optional + UserData string `json:"userData,omitempty"` +} + +// InfraBlueprintStatus defines the observed state of InfraBlueprint +type InfraBlueprintStatus struct { + // Ready indicates the provider connection is verified + Ready bool `json:"ready,omitempty"` + + // Usage tracks the current resource consumption + Usage InfraQuotaStatus `json:"usage,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:shortName=ibp +// +kubebuilder:printcolumn:name="Ready",type="boolean",JSONPath=".status.ready" +// +kubebuilder:printcolumn:name="MaxCPU",type="integer",JSONPath=".spec.quota.maxCpu" +// +kubebuilder:printcolumn:name="UsedCPU",type="integer",JSONPath=".status.usage.usedCpu" +// +kubebuilder:printcolumn:name="MaxMem(GB)",type="integer",JSONPath=".spec.quota.maxMemoryGb" +// +kubebuilder:printcolumn:name="UsedMem(GB)",type="integer",JSONPath=".status.usage.usedMemoryGb" +// +kubebuilder:printcolumn:name="MaxDisk(GB)",type="integer",JSONPath=".spec.quota.maxDiskGb" +// +kubebuilder:printcolumn:name="UsedDisk(GB)",type="integer",JSONPath=".status.usage.usedDiskGb" +type InfraBlueprint struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec InfraBlueprintSpec `json:"spec,omitempty"` + Status InfraBlueprintStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// InfraBlueprintList contains a list of InfraBlueprint +type InfraBlueprintList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []InfraBlueprint `json:"items"` +} + +func init() { + SchemeBuilder.Register(&InfraBlueprint{}, &InfraBlueprintList{}) +} diff --git a/deploy/rig-operator/api/v1alpha1/vsphereblueprint_types.go b/deploy/rig-operator/api/v1alpha1/vsphereblueprint_types.go new file mode 100644 index 0000000..1b268c3 --- /dev/null +++ b/deploy/rig-operator/api/v1alpha1/vsphereblueprint_types.go @@ -0,0 +1,67 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// VsphereBlueprintSpec defines the desired state of VsphereBlueprint +type VsphereBlueprintSpec struct { + // vCenter address (e.g. vcenter.example.com) + // +required + VCenter string `json:"vCenter"` + + // Datacenter name (e.g. NL001) + // +required + Datacenter string `json:"datacenter"` + + // Folder path where VMs will be organized (e.g. "ICT Digitalisation - Rancher") + // +required + Folder string `json:"folder"` + + // ResourcePool path (e.g. "NL001 Development - Rancher/Resources") + // +required + ResourcePool string `json:"resourcePool"` + + // DatastoreCluster or Datastore name (e.g. "NL001 Development - Rancher SDRS") + // +required + Datastore string `json:"datastore"` + + // Network name to attach to (e.g. "nl001.vDS.Distri.Vlan.1542") + // +required + Network string `json:"network"` + + // Template is the VM template name to clone from + // +required + Template string `json:"template"` +} + +// VsphereBlueprintStatus defines the observed state +type VsphereBlueprintStatus struct { + Ready bool `json:"ready,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:shortName=vbp + +// VsphereBlueprint is the Schema for the vsphereblueprints API +type VsphereBlueprint struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec VsphereBlueprintSpec `json:"spec,omitempty"` + Status VsphereBlueprintStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// VsphereBlueprintList contains a list of VsphereBlueprint +type VsphereBlueprintList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []VsphereBlueprint `json:"items"` +} + +func init() { + SchemeBuilder.Register(&VsphereBlueprint{}, &VsphereBlueprintList{}) +} diff --git a/deploy/rig-operator/api/v1alpha1/zz_generated.deepcopy.go b/deploy/rig-operator/api/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..1f79af9 --- /dev/null +++ b/deploy/rig-operator/api/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,469 @@ +//go:build !ignore_autogenerated + +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterBlueprint) DeepCopyInto(out *ClusterBlueprint) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterBlueprint. +func (in *ClusterBlueprint) DeepCopy() *ClusterBlueprint { + if in == nil { + return nil + } + out := new(ClusterBlueprint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterBlueprint) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterBlueprintList) DeepCopyInto(out *ClusterBlueprintList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterBlueprint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterBlueprintList. +func (in *ClusterBlueprintList) DeepCopy() *ClusterBlueprintList { + if in == nil { + return nil + } + out := new(ClusterBlueprintList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterBlueprintList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterBlueprintSpec) DeepCopyInto(out *ClusterBlueprintSpec) { + *out = *in + if in.WorkerPools != nil { + in, out := &in.WorkerPools, &out.WorkerPools + *out = make([]GenericPoolReq, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterBlueprintSpec. +func (in *ClusterBlueprintSpec) DeepCopy() *ClusterBlueprintSpec { + if in == nil { + return nil + } + out := new(ClusterBlueprintSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterBlueprintStatus) DeepCopyInto(out *ClusterBlueprintStatus) { + *out = *in + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityStatus) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterBlueprintStatus. +func (in *ClusterBlueprintStatus) DeepCopy() *ClusterBlueprintStatus { + if in == nil { + return nil + } + out := new(ClusterBlueprintStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GenericPoolReq) DeepCopyInto(out *GenericPoolReq) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericPoolReq. +func (in *GenericPoolReq) DeepCopy() *GenericPoolReq { + if in == nil { + return nil + } + out := new(GenericPoolReq) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HarvesterBlueprint) DeepCopyInto(out *HarvesterBlueprint) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HarvesterBlueprint. +func (in *HarvesterBlueprint) DeepCopy() *HarvesterBlueprint { + if in == nil { + return nil + } + out := new(HarvesterBlueprint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HarvesterBlueprint) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HarvesterBlueprintList) DeepCopyInto(out *HarvesterBlueprintList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HarvesterBlueprint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HarvesterBlueprintList. +func (in *HarvesterBlueprintList) DeepCopy() *HarvesterBlueprintList { + if in == nil { + return nil + } + out := new(HarvesterBlueprintList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HarvesterBlueprintList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HarvesterBlueprintSpec) DeepCopyInto(out *HarvesterBlueprintSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HarvesterBlueprintSpec. +func (in *HarvesterBlueprintSpec) DeepCopy() *HarvesterBlueprintSpec { + if in == nil { + return nil + } + out := new(HarvesterBlueprintSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HarvesterBlueprintStatus) DeepCopyInto(out *HarvesterBlueprintStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HarvesterBlueprintStatus. +func (in *HarvesterBlueprintStatus) DeepCopy() *HarvesterBlueprintStatus { + if in == nil { + return nil + } + out := new(HarvesterBlueprintStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityStatus) DeepCopyInto(out *IdentityStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityStatus. +func (in *IdentityStatus) DeepCopy() *IdentityStatus { + if in == nil { + return nil + } + out := new(IdentityStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InfraBlueprint) DeepCopyInto(out *InfraBlueprint) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfraBlueprint. +func (in *InfraBlueprint) DeepCopy() *InfraBlueprint { + if in == nil { + return nil + } + out := new(InfraBlueprint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InfraBlueprint) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InfraBlueprintList) DeepCopyInto(out *InfraBlueprintList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]InfraBlueprint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfraBlueprintList. +func (in *InfraBlueprintList) DeepCopy() *InfraBlueprintList { + if in == nil { + return nil + } + out := new(InfraBlueprintList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InfraBlueprintList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InfraBlueprintSpec) DeepCopyInto(out *InfraBlueprintSpec) { + *out = *in + out.ProviderRef = in.ProviderRef + out.Quota = in.Quota +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfraBlueprintSpec. +func (in *InfraBlueprintSpec) DeepCopy() *InfraBlueprintSpec { + if in == nil { + return nil + } + out := new(InfraBlueprintSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InfraBlueprintStatus) DeepCopyInto(out *InfraBlueprintStatus) { + *out = *in + out.Usage = in.Usage +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfraBlueprintStatus. +func (in *InfraBlueprintStatus) DeepCopy() *InfraBlueprintStatus { + if in == nil { + return nil + } + out := new(InfraBlueprintStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InfraQuota) DeepCopyInto(out *InfraQuota) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfraQuota. +func (in *InfraQuota) DeepCopy() *InfraQuota { + if in == nil { + return nil + } + out := new(InfraQuota) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InfraQuotaStatus) DeepCopyInto(out *InfraQuotaStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfraQuotaStatus. +func (in *InfraQuotaStatus) DeepCopy() *InfraQuotaStatus { + if in == nil { + return nil + } + out := new(InfraQuotaStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderRef) DeepCopyInto(out *ProviderRef) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderRef. +func (in *ProviderRef) DeepCopy() *ProviderRef { + if in == nil { + return nil + } + out := new(ProviderRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VsphereBlueprint) DeepCopyInto(out *VsphereBlueprint) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VsphereBlueprint. +func (in *VsphereBlueprint) DeepCopy() *VsphereBlueprint { + if in == nil { + return nil + } + out := new(VsphereBlueprint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VsphereBlueprint) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VsphereBlueprintList) DeepCopyInto(out *VsphereBlueprintList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VsphereBlueprint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VsphereBlueprintList. +func (in *VsphereBlueprintList) DeepCopy() *VsphereBlueprintList { + if in == nil { + return nil + } + out := new(VsphereBlueprintList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VsphereBlueprintList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VsphereBlueprintSpec) DeepCopyInto(out *VsphereBlueprintSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VsphereBlueprintSpec. +func (in *VsphereBlueprintSpec) DeepCopy() *VsphereBlueprintSpec { + if in == nil { + return nil + } + out := new(VsphereBlueprintSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VsphereBlueprintStatus) DeepCopyInto(out *VsphereBlueprintStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VsphereBlueprintStatus. +func (in *VsphereBlueprintStatus) DeepCopy() *VsphereBlueprintStatus { + if in == nil { + return nil + } + out := new(VsphereBlueprintStatus) + in.DeepCopyInto(out) + return out +} diff --git a/deploy/rig-operator/cmd/main.go b/deploy/rig-operator/cmd/main.go new file mode 100644 index 0000000..aceb7e9 --- /dev/null +++ b/deploy/rig-operator/cmd/main.go @@ -0,0 +1,213 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "crypto/tls" + "flag" + "os" + + // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) + // to ensure that exec-entrypoint and run can make use of them. + _ "k8s.io/client-go/plugin/pkg/client/auth" + + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/healthz" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/metrics/filters" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + "sigs.k8s.io/controller-runtime/pkg/webhook" + + rigv1alpha1 "vanderlande.com/ittp/appstack/rig-operator/api/v1alpha1" + "vanderlande.com/ittp/appstack/rig-operator/internal/controller" + // +kubebuilder:scaffold:imports +) + +var ( + scheme = runtime.NewScheme() + setupLog = ctrl.Log.WithName("setup") +) + +func init() { + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + + utilruntime.Must(rigv1alpha1.AddToScheme(scheme)) + // +kubebuilder:scaffold:scheme +} + +// nolint:gocyclo +func main() { + var metricsAddr string + var metricsCertPath, metricsCertName, metricsCertKey string + var webhookCertPath, webhookCertName, webhookCertKey string + var enableLeaderElection bool + var probeAddr string + var secureMetrics bool + var enableHTTP2 bool + var tlsOpts []func(*tls.Config) + flag.StringVar(&metricsAddr, "metrics-bind-address", "0", "The address the metrics endpoint binds to. "+ + "Use :8443 for HTTPS or :8080 for HTTP, or leave as 0 to disable the metrics service.") + flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") + flag.BoolVar(&enableLeaderElection, "leader-elect", false, + "Enable leader election for controller manager. "+ + "Enabling this will ensure there is only one active controller manager.") + flag.BoolVar(&secureMetrics, "metrics-secure", true, + "If set, the metrics endpoint is served securely via HTTPS. Use --metrics-secure=false to use HTTP instead.") + flag.StringVar(&webhookCertPath, "webhook-cert-path", "", "The directory that contains the webhook certificate.") + flag.StringVar(&webhookCertName, "webhook-cert-name", "tls.crt", "The name of the webhook certificate file.") + flag.StringVar(&webhookCertKey, "webhook-cert-key", "tls.key", "The name of the webhook key file.") + flag.StringVar(&metricsCertPath, "metrics-cert-path", "", + "The directory that contains the metrics server certificate.") + flag.StringVar(&metricsCertName, "metrics-cert-name", "tls.crt", "The name of the metrics server certificate file.") + flag.StringVar(&metricsCertKey, "metrics-cert-key", "tls.key", "The name of the metrics server key file.") + flag.BoolVar(&enableHTTP2, "enable-http2", false, + "If set, HTTP/2 will be enabled for the metrics and webhook servers") + opts := zap.Options{ + Development: true, + } + opts.BindFlags(flag.CommandLine) + flag.Parse() + + ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) + + // if the enable-http2 flag is false (the default), http/2 should be disabled + // due to its vulnerabilities. More specifically, disabling http/2 will + // prevent from being vulnerable to the HTTP/2 Stream Cancellation and + // Rapid Reset CVEs. For more information see: + // - https://github.com/advisories/GHSA-qppj-fm5r-hxr3 + // - https://github.com/advisories/GHSA-4374-p667-p6c8 + disableHTTP2 := func(c *tls.Config) { + setupLog.Info("disabling http/2") + c.NextProtos = []string{"http/1.1"} + } + + if !enableHTTP2 { + tlsOpts = append(tlsOpts, disableHTTP2) + } + + // Initial webhook TLS options + webhookTLSOpts := tlsOpts + webhookServerOptions := webhook.Options{ + TLSOpts: webhookTLSOpts, + } + + if len(webhookCertPath) > 0 { + setupLog.Info("Initializing webhook certificate watcher using provided certificates", + "webhook-cert-path", webhookCertPath, "webhook-cert-name", webhookCertName, "webhook-cert-key", webhookCertKey) + + webhookServerOptions.CertDir = webhookCertPath + webhookServerOptions.CertName = webhookCertName + webhookServerOptions.KeyName = webhookCertKey + } + + webhookServer := webhook.NewServer(webhookServerOptions) + + // Metrics endpoint is enabled in 'config/default/kustomization.yaml'. The Metrics options configure the server. + // More info: + // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.22.4/pkg/metrics/server + // - https://book.kubebuilder.io/reference/metrics.html + metricsServerOptions := metricsserver.Options{ + BindAddress: metricsAddr, + SecureServing: secureMetrics, + TLSOpts: tlsOpts, + } + + if secureMetrics { + // FilterProvider is used to protect the metrics endpoint with authn/authz. + // These configurations ensure that only authorized users and service accounts + // can access the metrics endpoint. The RBAC are configured in 'config/rbac/kustomization.yaml'. More info: + // https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.22.4/pkg/metrics/filters#WithAuthenticationAndAuthorization + metricsServerOptions.FilterProvider = filters.WithAuthenticationAndAuthorization + } + + // If the certificate is not specified, controller-runtime will automatically + // generate self-signed certificates for the metrics server. While convenient for development and testing, + // this setup is not recommended for production. + // + // TODO(user): If you enable certManager, uncomment the following lines: + // - [METRICS-WITH-CERTS] at config/default/kustomization.yaml to generate and use certificates + // managed by cert-manager for the metrics server. + // - [PROMETHEUS-WITH-CERTS] at config/prometheus/kustomization.yaml for TLS certification. + if len(metricsCertPath) > 0 { + setupLog.Info("Initializing metrics certificate watcher using provided certificates", + "metrics-cert-path", metricsCertPath, "metrics-cert-name", metricsCertName, "metrics-cert-key", metricsCertKey) + + metricsServerOptions.CertDir = metricsCertPath + metricsServerOptions.CertName = metricsCertName + metricsServerOptions.KeyName = metricsCertKey + } + + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ + Scheme: scheme, + Metrics: metricsServerOptions, + WebhookServer: webhookServer, + HealthProbeBindAddress: probeAddr, + LeaderElection: enableLeaderElection, + LeaderElectionID: "47b7cef0.appstack.io", + // LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily + // when the Manager ends. This requires the binary to immediately end when the + // Manager is stopped, otherwise, this setting is unsafe. Setting this significantly + // speeds up voluntary leader transitions as the new leader don't have to wait + // LeaseDuration time first. + // + // In the default scaffold provided, the program ends immediately after + // the manager stops, so would be fine to enable this option. However, + // if you are doing or is intended to do any operation such as perform cleanups + // after the manager stops then its usage might be unsafe. + // LeaderElectionReleaseOnCancel: true, + }) + if err != nil { + setupLog.Error(err, "unable to start manager") + os.Exit(1) + } + + if err := (&controller.ClusterBlueprintReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + // [IMPORTANT] Add this line to enable event broadcasting + Recorder: mgr.GetEventRecorderFor("rig-operator"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "ClusterBlueprint") + os.Exit(1) + } + if err := (&controller.InfraBlueprintReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "InfraBlueprint") + os.Exit(1) + } + // +kubebuilder:scaffold:builder + + if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up health check") + os.Exit(1) + } + if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up ready check") + os.Exit(1) + } + + setupLog.Info("starting manager") + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + setupLog.Error(err, "problem running manager") + os.Exit(1) + } +} diff --git a/deploy/rig-operator/config/crd/bases/rig.appstack.io_clusterblueprints.yaml b/deploy/rig-operator/config/crd/bases/rig.appstack.io_clusterblueprints.yaml new file mode 100644 index 0000000..c9b0b3c --- /dev/null +++ b/deploy/rig-operator/config/crd/bases/rig.appstack.io_clusterblueprints.yaml @@ -0,0 +1,136 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.19.0 + name: clusterblueprints.rig.appstack.io +spec: + group: rig.appstack.io + names: + kind: ClusterBlueprint + listKind: ClusterBlueprintList + plural: clusterblueprints + shortNames: + - cbp + singular: clusterblueprint + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .spec.kubernetesVersion + name: K8s Version + type: string + - jsonPath: .spec.infraBlueprintRef + name: Infra + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ClusterBlueprintSpec defines the desired state of ClusterBlueprint + properties: + controlPlaneHA: + description: ControlPlaneHA determines if we provision 3 CP nodes + (true) or 1 (false). + type: boolean + infraBlueprintRef: + description: |- + InfraBlueprintRef points to the InfraBlueprint (IBP) that manages + the quotas and provider details for this cluster. + type: string + kubernetesVersion: + description: KubernetesVersion is the target RKE2/K3s version (e.g., + v1.28.0+rke2r1). + type: string + workerPools: + description: WorkerPools is the list of worker node groups to provision. + items: + description: |- + GenericPoolReq defines a request for a set of nodes with specific sizing. + This is provider-agnostic. + properties: + cpuCores: + description: CpuCores is the number of vCPUs per node. + minimum: 1 + type: integer + diskGb: + description: DiskGB is the root disk size per node in Gigabytes. + minimum: 10 + type: integer + memoryGb: + description: MemoryGB is the amount of RAM per node in Gigabytes. + minimum: 1 + type: integer + name: + description: Name is the identifier for this node pool (e.g. + "workers-gpu"). + type: string + quantity: + description: Quantity is the number of nodes desired. + minimum: 0 + type: integer + required: + - cpuCores + - diskGb + - memoryGb + - name + - quantity + type: object + type: array + required: + - infraBlueprintRef + - kubernetesVersion + type: object + status: + description: ClusterBlueprintStatus defines the observed state of ClusterBlueprint + properties: + identity: + description: Identity tracks the cloud credentials generated for this + cluster. + properties: + secretRef: + description: SecretRef is the name of the generated secret used + by this cluster. + type: string + serviceAccount: + description: ServiceAccount is the name of the SA created on the + provider (if applicable). + type: string + type: object + phase: + description: Phase can be "Pending", "Provisioning", "Deployed", or + "Failed" + type: string + ready: + description: Ready indicates if the Helm Chart has been successfully + applied. + type: boolean + required: + - ready + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/deploy/rig-operator/config/crd/bases/rig.appstack.io_harvesterblueprints.yaml b/deploy/rig-operator/config/crd/bases/rig.appstack.io_harvesterblueprints.yaml new file mode 100644 index 0000000..cac7ea4 --- /dev/null +++ b/deploy/rig-operator/config/crd/bases/rig.appstack.io_harvesterblueprints.yaml @@ -0,0 +1,82 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.19.0 + name: harvesterblueprints.rig.appstack.io +spec: + group: rig.appstack.io + names: + kind: HarvesterBlueprint + listKind: HarvesterBlueprintList + plural: harvesterblueprints + shortNames: + - hbp + singular: harvesterblueprint + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: HarvesterBlueprintSpec defines the desired state of HarvesterBlueprint + properties: + harvesterUrl: + description: |- + HarvesterURL is the endpoint of the Harvester cluster (e.g. https://10.x.x.x:6443). + This replaces the need for auto-discovery. + type: string + imageName: + description: ImageName is the specific image name in Harvester to + clone (e.g. image-abcde). + type: string + networkName: + description: NetworkName is the VM Network to attach to the nodes. + type: string + sshUser: + description: SshUser is the username to configure on the VM (e.g. + ubuntu, rancher). + type: string + vmNamespace: + description: VmNamespace is the namespace in Harvester where VMs will + be created. + type: string + required: + - harvesterUrl + - imageName + - networkName + - sshUser + - vmNamespace + type: object + status: + description: HarvesterBlueprintStatus defines the observed state of HarvesterBlueprint + properties: + ready: + description: Ready indicates the configuration is valid (optional + future use) + type: boolean + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/deploy/rig-operator/config/crd/bases/rig.appstack.io_infrablueprints.yaml b/deploy/rig-operator/config/crd/bases/rig.appstack.io_infrablueprints.yaml new file mode 100644 index 0000000..20f7d8e --- /dev/null +++ b/deploy/rig-operator/config/crd/bases/rig.appstack.io_infrablueprints.yaml @@ -0,0 +1,146 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.19.0 + name: infrablueprints.rig.appstack.io +spec: + group: rig.appstack.io + names: + kind: InfraBlueprint + listKind: InfraBlueprintList + plural: infrablueprints + shortNames: + - ibp + singular: infrablueprint + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.ready + name: Ready + type: boolean + - jsonPath: .spec.quota.maxCpu + name: MaxCPU + type: integer + - jsonPath: .status.usage.usedCpu + name: UsedCPU + type: integer + - jsonPath: .spec.quota.maxMemoryGb + name: MaxMem(GB) + type: integer + - jsonPath: .status.usage.usedMemoryGb + name: UsedMem(GB) + type: integer + - jsonPath: .spec.quota.maxDiskGb + name: MaxDisk(GB) + type: integer + - jsonPath: .status.usage.usedDiskGb + name: UsedDisk(GB) + type: integer + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: InfraBlueprintSpec defines the desired state of InfraBlueprint + properties: + cloudCredentialSecret: + description: |- + CloudCredentialSecret is the name of the Secret containing the + master cloud credentials (e.g., kubeconfig or username/password). + type: string + providerRef: + description: ProviderRef points to the technical configuration (HarvesterBlueprint/VsphereBlueprint). + properties: + apiGroup: + description: APIGroup defaults to rig.appstack.io if not specified + type: string + kind: + description: Kind is the type of resource being referenced (e.g., + HarvesterBlueprint) + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + quota: + description: Quota defines the maximum resources allocatable by this + Infra. + properties: + maxCpu: + description: MaxCPU is the total number of cores allowed across + all clusters + type: integer + maxDiskGb: + description: MaxDiskGB is the total Storage (in GB) allowed across + all clusters + type: integer + maxMemoryGb: + description: MaxMemoryGB is the total RAM (in GB) allowed across + all clusters + type: integer + type: object + rancherUrl: + description: |- + RancherURL is the public URL of the Rancher Manager (e.g. https://rancher.example.com) + This is injected into the Helm Chart to register the cluster. + type: string + userData: + description: UserData is the default cloud-init user data for all + clusters in this Infra. + type: string + required: + - cloudCredentialSecret + - providerRef + - rancherUrl + type: object + status: + description: InfraBlueprintStatus defines the observed state of InfraBlueprint + properties: + ready: + description: Ready indicates the provider connection is verified + type: boolean + usage: + description: Usage tracks the current resource consumption + properties: + usedCpu: + description: UsedCPU is the sum of cores currently provisioned + type: integer + usedDiskGb: + description: UsedDiskGB tracks storage consumption + type: integer + usedMemoryGb: + description: UsedMemoryGB is the sum of RAM currently provisioned + type: integer + required: + - usedCpu + - usedDiskGb + - usedMemoryGb + type: object + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/deploy/rig-operator/config/crd/bases/rig.appstack.io_vsphereblueprints.yaml b/deploy/rig-operator/config/crd/bases/rig.appstack.io_vsphereblueprints.yaml new file mode 100644 index 0000000..84cfe79 --- /dev/null +++ b/deploy/rig-operator/config/crd/bases/rig.appstack.io_vsphereblueprints.yaml @@ -0,0 +1,86 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.19.0 + name: vsphereblueprints.rig.appstack.io +spec: + group: rig.appstack.io + names: + kind: VsphereBlueprint + listKind: VsphereBlueprintList + plural: vsphereblueprints + shortNames: + - vbp + singular: vsphereblueprint + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: VsphereBlueprint is the Schema for the vsphereblueprints API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: VsphereBlueprintSpec defines the desired state of VsphereBlueprint + properties: + datacenter: + description: Datacenter name (e.g. NL001) + type: string + datastore: + description: DatastoreCluster or Datastore name (e.g. "NL001 Development + - Rancher SDRS") + type: string + folder: + description: Folder path where VMs will be organized (e.g. "ICT Digitalisation + - Rancher") + type: string + network: + description: Network name to attach to (e.g. "nl001.vDS.Distri.Vlan.1542") + type: string + resourcePool: + description: ResourcePool path (e.g. "NL001 Development - Rancher/Resources") + type: string + template: + description: Template is the VM template name to clone from + type: string + vCenter: + description: vCenter address (e.g. vcenter.example.com) + type: string + required: + - datacenter + - datastore + - folder + - network + - resourcePool + - template + - vCenter + type: object + status: + description: VsphereBlueprintStatus defines the observed state + properties: + ready: + type: boolean + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/deploy/rig-operator/config/crd/kustomization.yaml b/deploy/rig-operator/config/crd/kustomization.yaml new file mode 100644 index 0000000..fae5f90 --- /dev/null +++ b/deploy/rig-operator/config/crd/kustomization.yaml @@ -0,0 +1,19 @@ +# This kustomization.yaml is not intended to be run by itself, +# since it depends on service name and namespace that are out of this kustomize package. +# It should be run by config/default +resources: +- bases/rig.appstack.io_clusterblueprints.yaml +- bases/rig.appstack.io_infrablueprints.yaml +- bases/rig.appstack.io_harvesterblueprints.yaml +- bases/rig.appstack.io_vsphereblueprints.yaml +# +kubebuilder:scaffold:crdkustomizeresource + +patches: +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. +# patches here are for enabling the conversion webhook for each CRD +# +kubebuilder:scaffold:crdkustomizewebhookpatch + +# [WEBHOOK] To enable webhook, uncomment the following section +# the following config is for teaching kustomize how to do kustomization for CRDs. +#configurations: +#- kustomizeconfig.yaml diff --git a/deploy/rig-operator/config/crd/kustomizeconfig.yaml b/deploy/rig-operator/config/crd/kustomizeconfig.yaml new file mode 100644 index 0000000..ec5c150 --- /dev/null +++ b/deploy/rig-operator/config/crd/kustomizeconfig.yaml @@ -0,0 +1,19 @@ +# This file is for teaching kustomize how to substitute name and namespace reference in CRD +nameReference: +- kind: Service + version: v1 + fieldSpecs: + - kind: CustomResourceDefinition + version: v1 + group: apiextensions.k8s.io + path: spec/conversion/webhook/clientConfig/service/name + +namespace: +- kind: CustomResourceDefinition + version: v1 + group: apiextensions.k8s.io + path: spec/conversion/webhook/clientConfig/service/namespace + create: false + +varReference: +- path: metadata/annotations diff --git a/deploy/rig-operator/config/default/cert_metrics_manager_patch.yaml b/deploy/rig-operator/config/default/cert_metrics_manager_patch.yaml new file mode 100644 index 0000000..d975015 --- /dev/null +++ b/deploy/rig-operator/config/default/cert_metrics_manager_patch.yaml @@ -0,0 +1,30 @@ +# This patch adds the args, volumes, and ports to allow the manager to use the metrics-server certs. + +# Add the volumeMount for the metrics-server certs +- op: add + path: /spec/template/spec/containers/0/volumeMounts/- + value: + mountPath: /tmp/k8s-metrics-server/metrics-certs + name: metrics-certs + readOnly: true + +# Add the --metrics-cert-path argument for the metrics server +- op: add + path: /spec/template/spec/containers/0/args/- + value: --metrics-cert-path=/tmp/k8s-metrics-server/metrics-certs + +# Add the metrics-server certs volume configuration +- op: add + path: /spec/template/spec/volumes/- + value: + name: metrics-certs + secret: + secretName: metrics-server-cert + optional: false + items: + - key: ca.crt + path: ca.crt + - key: tls.crt + path: tls.crt + - key: tls.key + path: tls.key diff --git a/deploy/rig-operator/config/default/kustomization.yaml b/deploy/rig-operator/config/default/kustomization.yaml new file mode 100644 index 0000000..bb35f39 --- /dev/null +++ b/deploy/rig-operator/config/default/kustomization.yaml @@ -0,0 +1,234 @@ +# Adds namespace to all resources. +namespace: deploy-system + +# Value of this field is prepended to the +# names of all resources, e.g. a deployment named +# "wordpress" becomes "alices-wordpress". +# Note that it should also match with the prefix (text before '-') of the namespace +# field above. +namePrefix: deploy- + +# Labels to add to all resources and selectors. +#labels: +#- includeSelectors: true +# pairs: +# someName: someValue + +resources: +- ../crd +- ../rbac +- ../manager +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in +# crd/kustomization.yaml +#- ../webhook +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. +#- ../certmanager +# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. +#- ../prometheus +# [METRICS] Expose the controller manager metrics service. +- metrics_service.yaml +# [NETWORK POLICY] Protect the /metrics endpoint and Webhook Server with NetworkPolicy. +# Only Pod(s) running a namespace labeled with 'metrics: enabled' will be able to gather the metrics. +# Only CR(s) which requires webhooks and are applied on namespaces labeled with 'webhooks: enabled' will +# be able to communicate with the Webhook Server. +#- ../network-policy + +# Uncomment the patches line if you enable Metrics +patches: +# [METRICS] The following patch will enable the metrics endpoint using HTTPS and the port :8443. +# More info: https://book.kubebuilder.io/reference/metrics +- path: manager_metrics_patch.yaml + target: + kind: Deployment + +# Uncomment the patches line if you enable Metrics and CertManager +# [METRICS-WITH-CERTS] To enable metrics protected with certManager, uncomment the following line. +# This patch will protect the metrics with certManager self-signed certs. +#- path: cert_metrics_manager_patch.yaml +# target: +# kind: Deployment + +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in +# crd/kustomization.yaml +#- path: manager_webhook_patch.yaml +# target: +# kind: Deployment + +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. +# Uncomment the following replacements to add the cert-manager CA injection annotations +#replacements: +# - source: # Uncomment the following block to enable certificates for metrics +# kind: Service +# version: v1 +# name: controller-manager-metrics-service +# fieldPath: metadata.name +# targets: +# - select: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: metrics-certs +# fieldPaths: +# - spec.dnsNames.0 +# - spec.dnsNames.1 +# options: +# delimiter: '.' +# index: 0 +# create: true +# - select: # Uncomment the following to set the Service name for TLS config in Prometheus ServiceMonitor +# kind: ServiceMonitor +# group: monitoring.coreos.com +# version: v1 +# name: controller-manager-metrics-monitor +# fieldPaths: +# - spec.endpoints.0.tlsConfig.serverName +# options: +# delimiter: '.' +# index: 0 +# create: true + +# - source: +# kind: Service +# version: v1 +# name: controller-manager-metrics-service +# fieldPath: metadata.namespace +# targets: +# - select: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: metrics-certs +# fieldPaths: +# - spec.dnsNames.0 +# - spec.dnsNames.1 +# options: +# delimiter: '.' +# index: 1 +# create: true +# - select: # Uncomment the following to set the Service namespace for TLS in Prometheus ServiceMonitor +# kind: ServiceMonitor +# group: monitoring.coreos.com +# version: v1 +# name: controller-manager-metrics-monitor +# fieldPaths: +# - spec.endpoints.0.tlsConfig.serverName +# options: +# delimiter: '.' +# index: 1 +# create: true + +# - source: # Uncomment the following block if you have any webhook +# kind: Service +# version: v1 +# name: webhook-service +# fieldPath: .metadata.name # Name of the service +# targets: +# - select: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert +# fieldPaths: +# - .spec.dnsNames.0 +# - .spec.dnsNames.1 +# options: +# delimiter: '.' +# index: 0 +# create: true +# - source: +# kind: Service +# version: v1 +# name: webhook-service +# fieldPath: .metadata.namespace # Namespace of the service +# targets: +# - select: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert +# fieldPaths: +# - .spec.dnsNames.0 +# - .spec.dnsNames.1 +# options: +# delimiter: '.' +# index: 1 +# create: true + +# - source: # Uncomment the following block if you have a ValidatingWebhook (--programmatic-validation) +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert # This name should match the one in certificate.yaml +# fieldPath: .metadata.namespace # Namespace of the certificate CR +# targets: +# - select: +# kind: ValidatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 0 +# create: true +# - source: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert +# fieldPath: .metadata.name +# targets: +# - select: +# kind: ValidatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 1 +# create: true + +# - source: # Uncomment the following block if you have a DefaultingWebhook (--defaulting ) +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert +# fieldPath: .metadata.namespace # Namespace of the certificate CR +# targets: +# - select: +# kind: MutatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 0 +# create: true +# - source: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert +# fieldPath: .metadata.name +# targets: +# - select: +# kind: MutatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 1 +# create: true + +# - source: # Uncomment the following block if you have a ConversionWebhook (--conversion) +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert +# fieldPath: .metadata.namespace # Namespace of the certificate CR +# targets: # Do not remove or uncomment the following scaffold marker; required to generate code for target CRD. +# +kubebuilder:scaffold:crdkustomizecainjectionns +# - source: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert +# fieldPath: .metadata.name +# targets: # Do not remove or uncomment the following scaffold marker; required to generate code for target CRD. +# +kubebuilder:scaffold:crdkustomizecainjectionname diff --git a/deploy/rig-operator/config/default/manager_metrics_patch.yaml b/deploy/rig-operator/config/default/manager_metrics_patch.yaml new file mode 100644 index 0000000..2aaef65 --- /dev/null +++ b/deploy/rig-operator/config/default/manager_metrics_patch.yaml @@ -0,0 +1,4 @@ +# This patch adds the args to allow exposing the metrics endpoint using HTTPS +- op: add + path: /spec/template/spec/containers/0/args/0 + value: --metrics-bind-address=:8443 diff --git a/deploy/rig-operator/config/default/metrics_service.yaml b/deploy/rig-operator/config/default/metrics_service.yaml new file mode 100644 index 0000000..1c552fd --- /dev/null +++ b/deploy/rig-operator/config/default/metrics_service.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: deploy + app.kubernetes.io/managed-by: kustomize + name: controller-manager-metrics-service + namespace: system +spec: + ports: + - name: https + port: 8443 + protocol: TCP + targetPort: 8443 + selector: + control-plane: controller-manager + app.kubernetes.io/name: deploy diff --git a/deploy/rig-operator/config/manager/kustomization.yaml b/deploy/rig-operator/config/manager/kustomization.yaml new file mode 100644 index 0000000..5c5f0b8 --- /dev/null +++ b/deploy/rig-operator/config/manager/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- manager.yaml diff --git a/deploy/rig-operator/config/manager/manager.yaml b/deploy/rig-operator/config/manager/manager.yaml new file mode 100644 index 0000000..56c7047 --- /dev/null +++ b/deploy/rig-operator/config/manager/manager.yaml @@ -0,0 +1,99 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: deploy + app.kubernetes.io/managed-by: kustomize + name: system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system + labels: + control-plane: controller-manager + app.kubernetes.io/name: deploy + app.kubernetes.io/managed-by: kustomize +spec: + selector: + matchLabels: + control-plane: controller-manager + app.kubernetes.io/name: deploy + replicas: 1 + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: manager + labels: + control-plane: controller-manager + app.kubernetes.io/name: deploy + spec: + # TODO(user): Uncomment the following code to configure the nodeAffinity expression + # according to the platforms which are supported by your solution. + # It is considered best practice to support multiple architectures. You can + # build your manager image using the makefile target docker-buildx. + # affinity: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/arch + # operator: In + # values: + # - amd64 + # - arm64 + # - ppc64le + # - s390x + # - key: kubernetes.io/os + # operator: In + # values: + # - linux + securityContext: + # Projects are configured by default to adhere to the "restricted" Pod Security Standards. + # This ensures that deployments meet the highest security requirements for Kubernetes. + # For more details, see: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + containers: + - command: + - /manager + args: + - --leader-elect + - --health-probe-bind-address=:8081 + image: controller:latest + name: manager + ports: [] + securityContext: + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: + - "ALL" + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + # TODO(user): Configure the resources accordingly based on the project requirements. + # More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 10m + memory: 64Mi + volumeMounts: [] + volumes: [] + serviceAccountName: controller-manager + terminationGracePeriodSeconds: 10 diff --git a/deploy/rig-operator/config/network-policy/allow-metrics-traffic.yaml b/deploy/rig-operator/config/network-policy/allow-metrics-traffic.yaml new file mode 100644 index 0000000..ca4e31d --- /dev/null +++ b/deploy/rig-operator/config/network-policy/allow-metrics-traffic.yaml @@ -0,0 +1,27 @@ +# This NetworkPolicy allows ingress traffic +# with Pods running on namespaces labeled with 'metrics: enabled'. Only Pods on those +# namespaces are able to gather data from the metrics endpoint. +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + labels: + app.kubernetes.io/name: deploy + app.kubernetes.io/managed-by: kustomize + name: allow-metrics-traffic + namespace: system +spec: + podSelector: + matchLabels: + control-plane: controller-manager + app.kubernetes.io/name: deploy + policyTypes: + - Ingress + ingress: + # This allows ingress traffic from any namespace with the label metrics: enabled + - from: + - namespaceSelector: + matchLabels: + metrics: enabled # Only from namespaces with this label + ports: + - port: 8443 + protocol: TCP diff --git a/deploy/rig-operator/config/network-policy/kustomization.yaml b/deploy/rig-operator/config/network-policy/kustomization.yaml new file mode 100644 index 0000000..ec0fb5e --- /dev/null +++ b/deploy/rig-operator/config/network-policy/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- allow-metrics-traffic.yaml diff --git a/deploy/rig-operator/config/prometheus/kustomization.yaml b/deploy/rig-operator/config/prometheus/kustomization.yaml new file mode 100644 index 0000000..fdc5481 --- /dev/null +++ b/deploy/rig-operator/config/prometheus/kustomization.yaml @@ -0,0 +1,11 @@ +resources: +- monitor.yaml + +# [PROMETHEUS-WITH-CERTS] The following patch configures the ServiceMonitor in ../prometheus +# to securely reference certificates created and managed by cert-manager. +# Additionally, ensure that you uncomment the [METRICS WITH CERTMANAGER] patch under config/default/kustomization.yaml +# to mount the "metrics-server-cert" secret in the Manager Deployment. +#patches: +# - path: monitor_tls_patch.yaml +# target: +# kind: ServiceMonitor diff --git a/deploy/rig-operator/config/prometheus/monitor.yaml b/deploy/rig-operator/config/prometheus/monitor.yaml new file mode 100644 index 0000000..16f5f6b --- /dev/null +++ b/deploy/rig-operator/config/prometheus/monitor.yaml @@ -0,0 +1,27 @@ +# Prometheus Monitor Service (Metrics) +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: deploy + app.kubernetes.io/managed-by: kustomize + name: controller-manager-metrics-monitor + namespace: system +spec: + endpoints: + - path: /metrics + port: https # Ensure this is the name of the port that exposes HTTPS metrics + scheme: https + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + tlsConfig: + # TODO(user): The option insecureSkipVerify: true is not recommended for production since it disables + # certificate verification, exposing the system to potential man-in-the-middle attacks. + # For production environments, it is recommended to use cert-manager for automatic TLS certificate management. + # To apply this configuration, enable cert-manager and use the patch located at config/prometheus/servicemonitor_tls_patch.yaml, + # which securely references the certificate from the 'metrics-server-cert' secret. + insecureSkipVerify: true + selector: + matchLabels: + control-plane: controller-manager + app.kubernetes.io/name: deploy diff --git a/deploy/rig-operator/config/prometheus/monitor_tls_patch.yaml b/deploy/rig-operator/config/prometheus/monitor_tls_patch.yaml new file mode 100644 index 0000000..5bf84ce --- /dev/null +++ b/deploy/rig-operator/config/prometheus/monitor_tls_patch.yaml @@ -0,0 +1,19 @@ +# Patch for Prometheus ServiceMonitor to enable secure TLS configuration +# using certificates managed by cert-manager +- op: replace + path: /spec/endpoints/0/tlsConfig + value: + # SERVICE_NAME and SERVICE_NAMESPACE will be substituted by kustomize + serverName: SERVICE_NAME.SERVICE_NAMESPACE.svc + insecureSkipVerify: false + ca: + secret: + name: metrics-server-cert + key: ca.crt + cert: + secret: + name: metrics-server-cert + key: tls.crt + keySecret: + name: metrics-server-cert + key: tls.key diff --git a/deploy/rig-operator/config/rbac/clusterblueprint_admin_role.yaml b/deploy/rig-operator/config/rbac/clusterblueprint_admin_role.yaml new file mode 100644 index 0000000..342a000 --- /dev/null +++ b/deploy/rig-operator/config/rbac/clusterblueprint_admin_role.yaml @@ -0,0 +1,27 @@ +# This rule is not used by the project deploy itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants full permissions ('*') over rig.appstack.io. +# This role is intended for users authorized to modify roles and bindings within the cluster, +# enabling them to delegate specific permissions to other users or groups as needed. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: deploy + app.kubernetes.io/managed-by: kustomize + name: clusterblueprint-admin-role +rules: +- apiGroups: + - rig.appstack.io + resources: + - clusterblueprints + verbs: + - '*' +- apiGroups: + - rig.appstack.io + resources: + - clusterblueprints/status + verbs: + - get diff --git a/deploy/rig-operator/config/rbac/clusterblueprint_editor_role.yaml b/deploy/rig-operator/config/rbac/clusterblueprint_editor_role.yaml new file mode 100644 index 0000000..cc08280 --- /dev/null +++ b/deploy/rig-operator/config/rbac/clusterblueprint_editor_role.yaml @@ -0,0 +1,33 @@ +# This rule is not used by the project deploy itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants permissions to create, update, and delete resources within the rig.appstack.io. +# This role is intended for users who need to manage these resources +# but should not control RBAC or manage permissions for others. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: deploy + app.kubernetes.io/managed-by: kustomize + name: clusterblueprint-editor-role +rules: +- apiGroups: + - rig.appstack.io + resources: + - clusterblueprints + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - rig.appstack.io + resources: + - clusterblueprints/status + verbs: + - get diff --git a/deploy/rig-operator/config/rbac/clusterblueprint_viewer_role.yaml b/deploy/rig-operator/config/rbac/clusterblueprint_viewer_role.yaml new file mode 100644 index 0000000..f9298a7 --- /dev/null +++ b/deploy/rig-operator/config/rbac/clusterblueprint_viewer_role.yaml @@ -0,0 +1,29 @@ +# This rule is not used by the project deploy itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants read-only access to rig.appstack.io resources. +# This role is intended for users who need visibility into these resources +# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: deploy + app.kubernetes.io/managed-by: kustomize + name: clusterblueprint-viewer-role +rules: +- apiGroups: + - rig.appstack.io + resources: + - clusterblueprints + verbs: + - get + - list + - watch +- apiGroups: + - rig.appstack.io + resources: + - clusterblueprints/status + verbs: + - get diff --git a/deploy/rig-operator/config/rbac/harvesterblueprint_admin_role.yaml b/deploy/rig-operator/config/rbac/harvesterblueprint_admin_role.yaml new file mode 100644 index 0000000..0b951cd --- /dev/null +++ b/deploy/rig-operator/config/rbac/harvesterblueprint_admin_role.yaml @@ -0,0 +1,27 @@ +# This rule is not used by the project deploy itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants full permissions ('*') over rig.appstack.io. +# This role is intended for users authorized to modify roles and bindings within the cluster, +# enabling them to delegate specific permissions to other users or groups as needed. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: deploy + app.kubernetes.io/managed-by: kustomize + name: harvesterblueprint-admin-role +rules: +- apiGroups: + - rig.appstack.io + resources: + - harvesterblueprints + verbs: + - '*' +- apiGroups: + - rig.appstack.io + resources: + - harvesterblueprints/status + verbs: + - get diff --git a/deploy/rig-operator/config/rbac/harvesterblueprint_editor_role.yaml b/deploy/rig-operator/config/rbac/harvesterblueprint_editor_role.yaml new file mode 100644 index 0000000..f8ab0a7 --- /dev/null +++ b/deploy/rig-operator/config/rbac/harvesterblueprint_editor_role.yaml @@ -0,0 +1,33 @@ +# This rule is not used by the project deploy itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants permissions to create, update, and delete resources within the rig.appstack.io. +# This role is intended for users who need to manage these resources +# but should not control RBAC or manage permissions for others. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: deploy + app.kubernetes.io/managed-by: kustomize + name: harvesterblueprint-editor-role +rules: +- apiGroups: + - rig.appstack.io + resources: + - harvesterblueprints + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - rig.appstack.io + resources: + - harvesterblueprints/status + verbs: + - get diff --git a/deploy/rig-operator/config/rbac/harvesterblueprint_viewer_role.yaml b/deploy/rig-operator/config/rbac/harvesterblueprint_viewer_role.yaml new file mode 100644 index 0000000..5ea0b10 --- /dev/null +++ b/deploy/rig-operator/config/rbac/harvesterblueprint_viewer_role.yaml @@ -0,0 +1,29 @@ +# This rule is not used by the project deploy itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants read-only access to rig.appstack.io resources. +# This role is intended for users who need visibility into these resources +# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: deploy + app.kubernetes.io/managed-by: kustomize + name: harvesterblueprint-viewer-role +rules: +- apiGroups: + - rig.appstack.io + resources: + - harvesterblueprints + verbs: + - get + - list + - watch +- apiGroups: + - rig.appstack.io + resources: + - harvesterblueprints/status + verbs: + - get diff --git a/deploy/rig-operator/config/rbac/infrablueprint_admin_role.yaml b/deploy/rig-operator/config/rbac/infrablueprint_admin_role.yaml new file mode 100644 index 0000000..26e7ff0 --- /dev/null +++ b/deploy/rig-operator/config/rbac/infrablueprint_admin_role.yaml @@ -0,0 +1,27 @@ +# This rule is not used by the project deploy itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants full permissions ('*') over rig.appstack.io. +# This role is intended for users authorized to modify roles and bindings within the cluster, +# enabling them to delegate specific permissions to other users or groups as needed. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: deploy + app.kubernetes.io/managed-by: kustomize + name: infrablueprint-admin-role +rules: +- apiGroups: + - rig.appstack.io + resources: + - infrablueprints + verbs: + - '*' +- apiGroups: + - rig.appstack.io + resources: + - infrablueprints/status + verbs: + - get diff --git a/deploy/rig-operator/config/rbac/infrablueprint_editor_role.yaml b/deploy/rig-operator/config/rbac/infrablueprint_editor_role.yaml new file mode 100644 index 0000000..2c08135 --- /dev/null +++ b/deploy/rig-operator/config/rbac/infrablueprint_editor_role.yaml @@ -0,0 +1,33 @@ +# This rule is not used by the project deploy itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants permissions to create, update, and delete resources within the rig.appstack.io. +# This role is intended for users who need to manage these resources +# but should not control RBAC or manage permissions for others. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: deploy + app.kubernetes.io/managed-by: kustomize + name: infrablueprint-editor-role +rules: +- apiGroups: + - rig.appstack.io + resources: + - infrablueprints + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - rig.appstack.io + resources: + - infrablueprints/status + verbs: + - get diff --git a/deploy/rig-operator/config/rbac/infrablueprint_viewer_role.yaml b/deploy/rig-operator/config/rbac/infrablueprint_viewer_role.yaml new file mode 100644 index 0000000..68d9170 --- /dev/null +++ b/deploy/rig-operator/config/rbac/infrablueprint_viewer_role.yaml @@ -0,0 +1,29 @@ +# This rule is not used by the project deploy itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants read-only access to rig.appstack.io resources. +# This role is intended for users who need visibility into these resources +# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: deploy + app.kubernetes.io/managed-by: kustomize + name: infrablueprint-viewer-role +rules: +- apiGroups: + - rig.appstack.io + resources: + - infrablueprints + verbs: + - get + - list + - watch +- apiGroups: + - rig.appstack.io + resources: + - infrablueprints/status + verbs: + - get diff --git a/deploy/rig-operator/config/rbac/kustomization.yaml b/deploy/rig-operator/config/rbac/kustomization.yaml new file mode 100644 index 0000000..9579950 --- /dev/null +++ b/deploy/rig-operator/config/rbac/kustomization.yaml @@ -0,0 +1,37 @@ +resources: +# All RBAC will be applied under this service account in +# the deployment namespace. You may comment out this resource +# if your manager will use a service account that exists at +# runtime. Be sure to update RoleBinding and ClusterRoleBinding +# subjects if changing service account names. +- service_account.yaml +- role.yaml +- role_binding.yaml +- leader_election_role.yaml +- leader_election_role_binding.yaml +# The following RBAC configurations are used to protect +# the metrics endpoint with authn/authz. These configurations +# ensure that only authorized users and service accounts +# can access the metrics endpoint. Comment the following +# permissions if you want to disable this protection. +# More info: https://book.kubebuilder.io/reference/metrics.html +- metrics_auth_role.yaml +- metrics_auth_role_binding.yaml +- metrics_reader_role.yaml +# For each CRD, "Admin", "Editor" and "Viewer" roles are scaffolded by +# default, aiding admins in cluster management. Those roles are +# not used by the deploy itself. You can comment the following lines +# if you do not want those helpers be installed with your Project. +- vsphereblueprint_admin_role.yaml +- vsphereblueprint_editor_role.yaml +- vsphereblueprint_viewer_role.yaml +- harvesterblueprint_admin_role.yaml +- harvesterblueprint_editor_role.yaml +- harvesterblueprint_viewer_role.yaml +- infrablueprint_admin_role.yaml +- infrablueprint_editor_role.yaml +- infrablueprint_viewer_role.yaml +- clusterblueprint_admin_role.yaml +- clusterblueprint_editor_role.yaml +- clusterblueprint_viewer_role.yaml + diff --git a/deploy/rig-operator/config/rbac/leader_election_role.yaml b/deploy/rig-operator/config/rbac/leader_election_role.yaml new file mode 100644 index 0000000..e0bc9f3 --- /dev/null +++ b/deploy/rig-operator/config/rbac/leader_election_role.yaml @@ -0,0 +1,40 @@ +# permissions to do leader election. +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/name: deploy + app.kubernetes.io/managed-by: kustomize + name: leader-election-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch diff --git a/deploy/rig-operator/config/rbac/leader_election_role_binding.yaml b/deploy/rig-operator/config/rbac/leader_election_role_binding.yaml new file mode 100644 index 0000000..93ad1f9 --- /dev/null +++ b/deploy/rig-operator/config/rbac/leader_election_role_binding.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/name: deploy + app.kubernetes.io/managed-by: kustomize + name: leader-election-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: leader-election-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/deploy/rig-operator/config/rbac/metrics_auth_role.yaml b/deploy/rig-operator/config/rbac/metrics_auth_role.yaml new file mode 100644 index 0000000..32d2e4e --- /dev/null +++ b/deploy/rig-operator/config/rbac/metrics_auth_role.yaml @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: metrics-auth-role +rules: +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create diff --git a/deploy/rig-operator/config/rbac/metrics_auth_role_binding.yaml b/deploy/rig-operator/config/rbac/metrics_auth_role_binding.yaml new file mode 100644 index 0000000..e775d67 --- /dev/null +++ b/deploy/rig-operator/config/rbac/metrics_auth_role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: metrics-auth-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: metrics-auth-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/deploy/rig-operator/config/rbac/metrics_reader_role.yaml b/deploy/rig-operator/config/rbac/metrics_reader_role.yaml new file mode 100644 index 0000000..51a75db --- /dev/null +++ b/deploy/rig-operator/config/rbac/metrics_reader_role.yaml @@ -0,0 +1,9 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: metrics-reader +rules: +- nonResourceURLs: + - "/metrics" + verbs: + - get diff --git a/deploy/rig-operator/config/rbac/role.yaml b/deploy/rig-operator/config/rbac/role.yaml new file mode 100644 index 0000000..c130b15 --- /dev/null +++ b/deploy/rig-operator/config/rbac/role.yaml @@ -0,0 +1,54 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: manager-role +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - rig.appstack.io + resources: + - clusterblueprints + - infrablueprints + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - rig.appstack.io + resources: + - clusterblueprints/finalizers + verbs: + - update +- apiGroups: + - rig.appstack.io + resources: + - clusterblueprints/status + - infrablueprints/status + verbs: + - get + - patch + - update +- apiGroups: + - rig.appstack.io + resources: + - harvesterblueprints + verbs: + - get + - list + - watch diff --git a/deploy/rig-operator/config/rbac/role_binding.yaml b/deploy/rig-operator/config/rbac/role_binding.yaml new file mode 100644 index 0000000..d3d78c7 --- /dev/null +++ b/deploy/rig-operator/config/rbac/role_binding.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: deploy + app.kubernetes.io/managed-by: kustomize + name: manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: manager-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/deploy/rig-operator/config/rbac/service_account.yaml b/deploy/rig-operator/config/rbac/service_account.yaml new file mode 100644 index 0000000..9b43ffd --- /dev/null +++ b/deploy/rig-operator/config/rbac/service_account.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: deploy + app.kubernetes.io/managed-by: kustomize + name: controller-manager + namespace: system diff --git a/deploy/rig-operator/config/rbac/vsphereblueprint_admin_role.yaml b/deploy/rig-operator/config/rbac/vsphereblueprint_admin_role.yaml new file mode 100644 index 0000000..f7162ad --- /dev/null +++ b/deploy/rig-operator/config/rbac/vsphereblueprint_admin_role.yaml @@ -0,0 +1,27 @@ +# This rule is not used by the project deploy itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants full permissions ('*') over rig.appstack.io. +# This role is intended for users authorized to modify roles and bindings within the cluster, +# enabling them to delegate specific permissions to other users or groups as needed. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: deploy + app.kubernetes.io/managed-by: kustomize + name: vsphereblueprint-admin-role +rules: +- apiGroups: + - rig.appstack.io + resources: + - vsphereblueprints + verbs: + - '*' +- apiGroups: + - rig.appstack.io + resources: + - vsphereblueprints/status + verbs: + - get diff --git a/deploy/rig-operator/config/rbac/vsphereblueprint_editor_role.yaml b/deploy/rig-operator/config/rbac/vsphereblueprint_editor_role.yaml new file mode 100644 index 0000000..5ab66ac --- /dev/null +++ b/deploy/rig-operator/config/rbac/vsphereblueprint_editor_role.yaml @@ -0,0 +1,33 @@ +# This rule is not used by the project deploy itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants permissions to create, update, and delete resources within the rig.appstack.io. +# This role is intended for users who need to manage these resources +# but should not control RBAC or manage permissions for others. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: deploy + app.kubernetes.io/managed-by: kustomize + name: vsphereblueprint-editor-role +rules: +- apiGroups: + - rig.appstack.io + resources: + - vsphereblueprints + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - rig.appstack.io + resources: + - vsphereblueprints/status + verbs: + - get diff --git a/deploy/rig-operator/config/rbac/vsphereblueprint_viewer_role.yaml b/deploy/rig-operator/config/rbac/vsphereblueprint_viewer_role.yaml new file mode 100644 index 0000000..dec03a8 --- /dev/null +++ b/deploy/rig-operator/config/rbac/vsphereblueprint_viewer_role.yaml @@ -0,0 +1,29 @@ +# This rule is not used by the project deploy itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants read-only access to rig.appstack.io resources. +# This role is intended for users who need visibility into these resources +# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: deploy + app.kubernetes.io/managed-by: kustomize + name: vsphereblueprint-viewer-role +rules: +- apiGroups: + - rig.appstack.io + resources: + - vsphereblueprints + verbs: + - get + - list + - watch +- apiGroups: + - rig.appstack.io + resources: + - vsphereblueprints/status + verbs: + - get diff --git a/deploy/rig-operator/config/samples/kustomization.yaml b/deploy/rig-operator/config/samples/kustomization.yaml new file mode 100644 index 0000000..9f41c13 --- /dev/null +++ b/deploy/rig-operator/config/samples/kustomization.yaml @@ -0,0 +1,7 @@ +## Append samples of your project ## +resources: +- rig_v1alpha1_clusterblueprint.yaml +- rig_v1alpha1_infrablueprint.yaml +- rig_v1alpha1_harvesterblueprint.yaml +- rig_v1alpha1_vsphereblueprint.yaml +# +kubebuilder:scaffold:manifestskustomizesamples diff --git a/deploy/rig-operator/config/samples/rig_v1alpha1_clusterblueprint.yaml b/deploy/rig-operator/config/samples/rig_v1alpha1_clusterblueprint.yaml new file mode 100644 index 0000000..f9f85b4 --- /dev/null +++ b/deploy/rig-operator/config/samples/rig_v1alpha1_clusterblueprint.yaml @@ -0,0 +1,22 @@ +apiVersion: rig.appstack.io/v1alpha1 +kind: ClusterBlueprint +metadata: + name: test-cluster-01 + namespace: fleet-default +spec: + # Points to the InfraBlueprint (which links to Harvester + Quotas) + infraBlueprintRef: "dev-environment-v1" + + # 1. Lifecycle + kubernetesVersion: "v1.33.5+rke2r1" + + # 2. Topology: Control Plane (1 Node) + controlPlaneHA: false + + # 3. Topology: Workers + workerPools: + - name: "app-workers" + quantity: 1 + cpuCores: 4 + memoryGb: 16 + diskGb: 60 \ No newline at end of file diff --git a/deploy/rig-operator/config/samples/rig_v1alpha1_harvesterblueprint.yaml b/deploy/rig-operator/config/samples/rig_v1alpha1_harvesterblueprint.yaml new file mode 100644 index 0000000..dc63beb --- /dev/null +++ b/deploy/rig-operator/config/samples/rig_v1alpha1_harvesterblueprint.yaml @@ -0,0 +1,14 @@ +apiVersion: rig.appstack.io/v1alpha1 +kind: HarvesterBlueprint +metadata: + name: dev-harvester-config + namespace: fleet-default +spec: + # [MOVED] Technical connection details live here now + harvesterUrl: "https://172.27.27.190:6443" + + # [MOVED] VM Template details + vmNamespace: "vanderlande" + imageName: "vanderlande/image-qhtpc" + networkName: "vanderlande/vm-lan" + sshUser: "rancher" \ No newline at end of file diff --git a/deploy/rig-operator/config/samples/rig_v1alpha1_infrablueprint_harvester.yaml b/deploy/rig-operator/config/samples/rig_v1alpha1_infrablueprint_harvester.yaml new file mode 100644 index 0000000..6fd5d8a --- /dev/null +++ b/deploy/rig-operator/config/samples/rig_v1alpha1_infrablueprint_harvester.yaml @@ -0,0 +1,17 @@ +apiVersion: rig.appstack.io/v1alpha1 +kind: InfraBlueprint +metadata: + name: dev-environment-v1 + namespace: fleet-default +spec: + cloudCredentialSecret: "cc-mrklm" + # [NEW] Added Rancher URL + rancherUrl: "https://rancher-mgmt.product.lan" + + providerRef: + kind: HarvesterBlueprint + name: dev-harvester-config + quota: + maxCpu: 100 + maxMemoryGb: 256 + maxDiskGb: 3000 \ No newline at end of file diff --git a/deploy/rig-operator/config/samples/rig_v1alpha1_vsphereblueprint.yaml b/deploy/rig-operator/config/samples/rig_v1alpha1_vsphereblueprint.yaml new file mode 100644 index 0000000..986fd85 --- /dev/null +++ b/deploy/rig-operator/config/samples/rig_v1alpha1_vsphereblueprint.yaml @@ -0,0 +1,9 @@ +apiVersion: rig.appstack.io/v1alpha1 +kind: VsphereBlueprint +metadata: + labels: + app.kubernetes.io/name: deploy + app.kubernetes.io/managed-by: kustomize + name: vsphereblueprint-sample +spec: + # TODO(user): Add fields here diff --git a/deploy/rig-operator/config/samples/vsphere_stack.yaml b/deploy/rig-operator/config/samples/vsphere_stack.yaml new file mode 100644 index 0000000..2146cf7 --- /dev/null +++ b/deploy/rig-operator/config/samples/vsphere_stack.yaml @@ -0,0 +1,70 @@ +# --------------------------------------------------------- +# 1. Technical Configuration (The Location) +# --------------------------------------------------------- +apiVersion: rig.appstack.io/v1alpha1 +kind: VsphereBlueprint +metadata: + name: dev-vsphere-config + namespace: fleet-default +spec: + vCenter: "vcenter.vanderlande.com" + datacenter: "NL001" + folder: "ICT Digitalisation - Rancher" + resourcePool: "NL001 Development - Rancher/Resources" + datastore: "NL001 Development - Rancher SDRS" + network: "nl001.vDS.Distri.Vlan.1542" + template: "nl001-cp-ubuntu-22.04-amd64-20250327-5.15.0-135-rke2-k3s" + +--- +# --------------------------------------------------------- +# 2. Infra Manager (The Accountant & Identity) +# --------------------------------------------------------- +apiVersion: rig.appstack.io/v1alpha1 +kind: InfraBlueprint +metadata: + name: dev-vsphere-infra + namespace: fleet-default +spec: + # Credentials (Must exist in Rancher/Kubernetes) + cloudCredentialSecret: "cc-lhtl9" + rancherUrl: "https://rancher.tst.vanderlande.com" + + # Point to the vSphere Configuration above + providerRef: + kind: VsphereBlueprint + name: dev-vsphere-config + + # Budget Limits for this Environment + quota: + maxCpu: 50 # Total vCPUs allowed + maxMemoryGb: 128 # Total RAM allowed + maxDiskGb: 5000 # Total Disk allowed + +--- +# --------------------------------------------------------- +# 3. Cluster Request (The User Goal) +# --------------------------------------------------------- +apiVersion: rig.appstack.io/v1alpha1 +kind: ClusterBlueprint +metadata: + name: test-vsphere-cluster-01 + namespace: fleet-default +spec: + # Link to the vSphere Infra defined above + infraBlueprintRef: "dev-vsphere-infra" + + # Lifecycle + kubernetesVersion: "v1.31.12+rke2r1" + + # Topology: Control Plane (1 Node) + # Uses default sizing from values.yaml (2 CPU / 8 GB) + controlPlaneHA: false + + # Topology: Workers + # These sizes (GB) will be converted to MB automatically by your Strategy + workerPools: + - name: "app-workers" + quantity: 2 + cpuCores: 4 + memoryGb: 8 # Strategy converts to 8192 MB + diskGb: 100 # Strategy converts to 102400 MB \ No newline at end of file diff --git a/deploy/rig-operator/docs/blueprint_orchestration.svg b/deploy/rig-operator/docs/blueprint_orchestration.svg new file mode 100644 index 0000000..a4d2388 --- /dev/null +++ b/deploy/rig-operator/docs/blueprint_orchestration.svg @@ -0,0 +1,67 @@ +

API_Blueprints

Controller_Layer

Generation_Layer

Unsupported markdown: list
Unsupported markdown: list
Unsupported markdown: list
Unsupported markdown: list

Injected Dependency

Implements

Implements

Reads config to map data

Reads config to map data

watches
reads & checks quota
configures & calls

«Kind: ClusterBlueprint, Short: cbp»

ClusterBlueprint

Description: Generic cluster request

---

+spec.infraBlueprintRef : string

+spec.kubernetesVersion : string

+spec.workerPools : List<GenericPoolReq>

«Kind: InfraBlueprint, Short: ibp»

InfraBlueprint

Description: Manages quotas and provider ref

---

+spec.quotaLimits : ResourceList

+status.quotaUsed : ResourceList

+spec.providerRef : TypedLocalObjectReference

«Kind: VsphereBlueprint, Short: vbp»

VsphereBlueprint

Description: Concrete vSphere details

---

+spec.vcenterURL : string

+spec.datacenterID : string

+spec.networkIDs : List<string>

«Kind: HarvesterBlueprint, Short: hbp»

HarvesterBlueprint

Description: Concrete Harvester details

---

+spec.harvesterURL : string

+spec.vmNamespace : string

+spec.imageName : string

«Kind: AzureBlueprint, Short: abp»

AzureBlueprint

Description: Future Azure details

RIGController

Description: The brain. Watches CBPs, checks IBP quotas.

---

+Reconcile(request)

MasterValuesBuilder

Description: Knows generic Helm structure.

---

-strategy : ProviderStrategy

+BuildHelmValues(cbp, ibp) : Map

«Interface»

ProviderStrategy

Description: Contract for isolated provider logic.

---

+GenerateNodePools(genericPools, providerBP) : List<Any>

+GetGlobalOverrides(providerBP) : Map

+PerformPreFlight(ctx, providerBP) : Error

VsphereStrategy

Description: Specialist VBP to Helm translation.

---

+GenerateNodePools(...)

HarvesterStrategy

Description: Specialist HBP to Helm translation.

---

+GenerateNodePools(...)

+PerformPreFlight(...)

The providerRef is polymorphic.\nIt points to ANY ProviderBlueprint Kind\n(vbp, hbp, or abp).

Unsupported markdown: list
\ No newline at end of file diff --git a/deploy/rig-operator/docs/controllerflow.mermaid b/deploy/rig-operator/docs/controllerflow.mermaid new file mode 100644 index 0000000..059a0f0 --- /dev/null +++ b/deploy/rig-operator/docs/controllerflow.mermaid @@ -0,0 +1,29 @@ +sequenceDiagram + participant User + participant Controller + participant InfraBP as InfraBlueprint + participant ProviderBP as Harvester/Vsphere BP + participant Strategy + participant Builder as MasterBuilder + participant Helm + + User->>Controller: Create ClusterBlueprint + Controller->>InfraBP: 1. Get Infra & Quota + InfraBP-->>Controller: ProviderRef (Kind="HarvesterBlueprint") + + note over Controller: Dynamic Switching Logic + + alt Kind is Harvester + Controller->>ProviderBP: 2. Get Harvester Config + Controller->>Strategy: 3. Init HarvesterStrategy + else Kind is Vsphere + Controller->>ProviderBP: 2. Get Vsphere Config + Controller->>Strategy: 3. Init VsphereStrategy + end + + Controller->>Builder: 4. Build(Strategy) + Builder->>Strategy: GenerateNodePools() + Strategy-->>Builder: [Pool A, Pool B] + Builder-->>Controller: map[values] + + Controller->>Helm: 5. Apply(values) \ No newline at end of file diff --git a/deploy/rig-operator/docs/flow-diagram.svg b/deploy/rig-operator/docs/flow-diagram.svg new file mode 100644 index 0000000..9653009 --- /dev/null +++ b/deploy/rig-operator/docs/flow-diagram.svg @@ -0,0 +1,67 @@ +HelmMasterBuilderStrategyHarvester/Vsphere BPInfraBlueprintControllerUserHelmMasterBuilderStrategyHarvester/Vsphere BPInfraBlueprintControllerUserDynamic Switching Logicalt[Kind is Harvester][Kind is Vsphere]Create ClusterBlueprint1. Get Infra & QuotaProviderRef (Kind="HarvesterBlueprint")2. Get Harvester Config3. Init HarvesterStrategy2. Get Vsphere Config3. Init VsphereStrategy4. Build(Strategy)GenerateNodePools()[Pool A, Pool B]map[values]5. Apply(values) \ No newline at end of file diff --git a/deploy/rig-operator/docs/uml.mermaid b/deploy/rig-operator/docs/uml.mermaid new file mode 100644 index 0000000..fb7a202 --- /dev/null +++ b/deploy/rig-operator/docs/uml.mermaid @@ -0,0 +1,121 @@ +classDiagram + direction TB + + %% ========================================== + %% PACKAGE: K8s API Definitions (Blueprints) + %% ========================================== + namespace API_Blueprints { + class ClusterBlueprint { + <> + Description: Generic cluster request + --- + +spec.infraBlueprintRef : string + +spec.kubernetesVersion : string + +spec.workerPools : List~GenericPoolReq~ + } + + class InfraBlueprint { + <> + Description: Manages quotas and provider ref + --- + +spec.quotaLimits : ResourceList + +status.quotaUsed : ResourceList + +spec.providerRef : TypedLocalObjectReference + } + + class VsphereBlueprint { + <> + Description: Concrete vSphere details + --- + +spec.vcenterURL : string + +spec.datacenterID : string + +spec.networkIDs : List~string~ + } + + class HarvesterBlueprint { + <> + Description: Concrete Harvester details + --- + +spec.harvesterURL : string + +spec.vmNamespace : string + +spec.imageName : string + } + + class AzureBlueprint { + <> + Description: Future Azure details + } + } + + %% Relationships between Blueprints + ClusterBlueprint --> InfraBlueprint : 1. References by Name + note for InfraBlueprint "The providerRef is polymorphic.\nIt points to ANY ProviderBlueprint Kind\n(vbp, hbp, or abp)." + InfraBlueprint ..> VsphereBlueprint : 2. Dynamically references Kind=vbp + InfraBlueprint ..> HarvesterBlueprint : 2. Dynamically references Kind=hbp + InfraBlueprint ..> AzureBlueprint : 2. Dynamically references Kind=abp + + + %% ========================================== + %% PACKAGE: Controller (Orchestration) + %% ========================================== + namespace Controller_Layer { + class RIGController { + Description: The brain. Watches CBPs, checks IBP quotas. + --- + +Reconcile(request) + } + } + + RIGController "watches" --> ClusterBlueprint + RIGController "reads & checks quota" --> InfraBlueprint + + + %% ========================================== + %% PACKAGE: Builders & Strategies (Generation) + %% ========================================== + namespace Generation_Layer { + class MasterValuesBuilder { + Description: Knows generic Helm structure. + --- + -strategy : ProviderStrategy + +BuildHelmValues(cbp, ibp) Map + } + + class ProviderStrategy { + <> + Description: Contract for isolated provider logic. + --- + +GenerateNodePools(genericPools, providerBP) List~Any~ + +GetGlobalOverrides(providerBP) Map + +PerformPreFlight(ctx, providerBP) Error + } + + class VsphereStrategy { + Description: Specialist VBP to Helm translation. + --- + +GenerateNodePools(...) + } + + class HarvesterStrategy { + Description: Specialist HBP to Helm translation. + --- + +GenerateNodePools(...) + +PerformPreFlight(...) + } + } + + %% Controller orchestrates builders + note for RIGController "1. Reads IBP.providerRef.Kind\n2. Instantiates correct Strategy (e.g. VsphereStrategy)\n3. Injects Strategy into MasterBuilder\n4. Calls MasterBuilder.Build()" + + RIGController "configures & calls" --> MasterValuesBuilder + + %% Master Builder uses the interface + MasterValuesBuilder o--> ProviderStrategy : Injected Dependency + + %% Realization of strategies + ProviderStrategy <|.. VsphereStrategy : Implements + ProviderStrategy <|.. HarvesterStrategy : Implements + + %% Strategies read their specific blueprints + VsphereStrategy ..> VsphereBlueprint : Reads config to map data + HarvesterStrategy ..> HarvesterBlueprint : Reads config to map data \ No newline at end of file diff --git a/deploy/rig-operator/go.mod b/deploy/rig-operator/go.mod new file mode 100644 index 0000000..098c729 --- /dev/null +++ b/deploy/rig-operator/go.mod @@ -0,0 +1,161 @@ +module vanderlande.com/ittp/appstack/rig-operator + +go 1.25.0 + +require ( + github.com/onsi/ginkgo/v2 v2.27.2 + github.com/onsi/gomega v1.38.2 + gopkg.in/yaml.v3 v3.0.1 + helm.sh/helm/v3 v3.19.4 + k8s.io/api v0.35.0 + k8s.io/apimachinery v0.35.0 + k8s.io/cli-runtime v0.35.0 + k8s.io/client-go v0.35.0 + sigs.k8s.io/controller-runtime v0.22.4 +) + +require ( + cel.dev/expr v0.24.0 // indirect + dario.cat/mergo v1.0.1 // indirect + github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect + github.com/BurntSushi/toml v1.5.0 // indirect + github.com/MakeNowJust/heredoc v1.0.0 // indirect + github.com/Masterminds/goutils v1.1.1 // indirect + github.com/Masterminds/semver/v3 v3.4.0 // indirect + github.com/Masterminds/sprig/v3 v3.3.0 // indirect + github.com/Masterminds/squirrel v1.5.4 // indirect + github.com/antlr4-go/antlr/v4 v4.13.0 // indirect + github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/chai2010/gettext-go v1.0.2 // indirect + github.com/containerd/containerd v1.7.29 // indirect + github.com/containerd/errdefs v0.3.0 // indirect + github.com/containerd/log v0.1.0 // indirect + github.com/containerd/platforms v0.2.1 // indirect + github.com/cyphar/filepath-securejoin v0.6.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/emicklei/go-restful/v3 v3.12.2 // indirect + github.com/evanphx/json-patch v5.9.11+incompatible // indirect + github.com/evanphx/json-patch/v5 v5.9.11 // indirect + github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect + github.com/fatih/color v1.13.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect + github.com/go-errors/errors v1.4.2 // indirect + github.com/go-gorp/gorp/v3 v3.1.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-logr/zapr v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect + github.com/gobwas/glob v0.2.3 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/btree v1.1.3 // indirect + github.com/google/cel-go v0.26.0 // indirect + github.com/google/gnostic-models v0.7.0 // indirect + github.com/google/go-cmp v0.7.0 // indirect + github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect + github.com/gosuri/uitable v0.0.4 // indirect + github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/huandu/xstrings v1.5.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jmoiron/sqlx v1.4.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.18.0 // indirect + github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect + github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect + github.com/lib/pq v1.10.9 // indirect + github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.17 // indirect + github.com/mattn/go-runewidth v0.0.9 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/spdystream v0.5.0 // indirect + github.com/moby/term v0.5.2 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect + github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.1 // indirect + github.com/peterbourgon/diskv v2.0.1+incompatible // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/client_golang v1.22.0 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.62.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + github.com/rubenv/sql-migrate v1.8.0 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 // indirect + github.com/shopspring/decimal v1.4.0 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cobra v1.10.1 // indirect + github.com/spf13/pflag v1.0.10 // indirect + github.com/stoewer/go-strcase v1.3.0 // indirect + github.com/x448/float16 v0.8.4 // indirect + github.com/xlab/treeprint v1.2.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect + go.opentelemetry.io/otel v1.35.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 // indirect + go.opentelemetry.io/otel/metric v1.35.0 // indirect + go.opentelemetry.io/otel/sdk v1.34.0 // indirect + go.opentelemetry.io/otel/trace v1.35.0 // indirect + go.opentelemetry.io/proto/otlp v1.5.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.0 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/crypto v0.45.0 // indirect + golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect + golang.org/x/mod v0.29.0 // indirect + golang.org/x/net v0.47.0 // indirect + golang.org/x/oauth2 v0.30.0 // indirect + golang.org/x/sync v0.18.0 // indirect + golang.org/x/sys v0.38.0 // indirect + golang.org/x/term v0.37.0 // indirect + golang.org/x/text v0.31.0 // indirect + golang.org/x/time v0.12.0 // indirect + golang.org/x/tools v0.38.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb // indirect + google.golang.org/grpc v1.72.1 // indirect + google.golang.org/protobuf v1.36.8 // indirect + gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + k8s.io/apiextensions-apiserver v0.34.2 // indirect + k8s.io/apiserver v0.34.2 // indirect + k8s.io/component-base v0.34.2 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect + k8s.io/kubectl v0.34.2 // indirect + k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 // indirect + oras.land/oras-go/v2 v2.6.0 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect + sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect + sigs.k8s.io/kustomize/api v0.20.1 // indirect + sigs.k8s.io/kustomize/kyaml v0.20.1 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect + sigs.k8s.io/yaml v1.6.0 // indirect +) diff --git a/deploy/rig-operator/go.sum b/deploy/rig-operator/go.sum new file mode 100644 index 0000000..bba3359 --- /dev/null +++ b/deploy/rig-operator/go.sum @@ -0,0 +1,501 @@ +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= +dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= +dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= +github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= +github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= +github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= +github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= +github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs= +github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0= +github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM= +github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= +github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= +github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= +github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= +github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= +github.com/containerd/containerd v1.7.29 h1:90fWABQsaN9mJhGkoVnuzEY+o1XDPbg9BTC9QTAHnuE= +github.com/containerd/containerd v1.7.29/go.mod h1:azUkWcOvHrWvaiUjSQH0fjzuHIwSPg1WL5PshGP4Szs= +github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4= +github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= +github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/cyphar/filepath-securejoin v0.6.1 h1:5CeZ1jPXEiYt3+Z6zqprSAgSWiggmpVyciv8syjIpVE= +github.com/cyphar/filepath-securejoin v0.6.1/go.mod h1:A8hd4EnAeyujCJRrICiOWqjS1AX0a9kM5XL+NwKoYSc= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/distribution/distribution/v3 v3.0.0 h1:q4R8wemdRQDClzoNNStftB2ZAfqOiN6UX90KJc4HjyM= +github.com/distribution/distribution/v3 v3.0.0/go.mod h1:tRNuFoZsUdyRVegq8xGNeds4KLjwLCRin/tTo6i1DhU= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI= +github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= +github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= +github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= +github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/evanphx/json-patch v5.9.11+incompatible h1:ixHHqfcGvxhWkniF1tWxBHA0yb4Z+d1UQi45df52xW8= +github.com/evanphx/json-patch v5.9.11+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= +github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= +github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4= +github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7DlmewI= +github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= +github.com/gkampitakis/ciinfo v0.3.2 h1:JcuOPk8ZU7nZQjdUhctuhQofk7BGHuIy0c9Ez8BNhXs= +github.com/gkampitakis/ciinfo v0.3.2/go.mod h1:1NIwaOcFChN4fa/B0hEBdAb6npDlFL8Bwx4dfRLRqAo= +github.com/gkampitakis/go-diff v1.3.2 h1:Qyn0J9XJSDTgnsgHRdz9Zp24RaJeKMUHg2+PDZZdC4M= +github.com/gkampitakis/go-diff v1.3.2/go.mod h1:LLgOrpqleQe26cte8s36HTWcTmMEur6OPYerdAAS9tk= +github.com/gkampitakis/go-snaps v0.5.15 h1:amyJrvM1D33cPHwVrjo9jQxX8g/7E2wYdZ+01KS3zGE= +github.com/gkampitakis/go-snaps v0.5.15/go.mod h1:HNpx/9GoKisdhw9AFOBT1N7DBs9DiHo/hGheFGBZ+mc= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= +github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= +github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw= +github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/cel-go v0.26.0 h1:DPGjXackMpJWH680oGY4lZhYjIameYmR+/6RBdDGmaI= +github.com/google/cel-go v0.26.0/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM= +github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= +github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= +github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= +github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY= +github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= +github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= +github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/golang-lru/arc/v2 v2.0.5 h1:l2zaLDubNhW4XO3LnliVj0GXO3+/CGNJAg1dcN2Fpfw= +github.com/hashicorp/golang-lru/arc/v2 v2.0.5/go.mod h1:ny6zBSQZi2JxIeYcv7kt2sH2PXJtirBN7RDhRpxPkxU= +github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvHgwfx2Vm4= +github.com/hashicorp/golang-lru/v2 v2.0.5/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= +github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= +github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/joshdk/go-junit v1.0.0 h1:S86cUKIdwBHWwA6xCmFlf3RTLfVXYQfvanM5Uh+K6GE= +github.com/joshdk/go-junit v1.0.0/go.mod h1:TiiV0PqkaNfFXjEiyjWM3XXrhVyCa1K4Zfga6W52ung= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/maruel/natural v1.1.1 h1:Hja7XhhmvEFhcByqDoHz9QZbkWey+COd9xWfCfn1ioo= +github.com/maruel/natural v1.1.1/go.mod h1:v+Rfd79xlw1AgVBjbO0BEQmptqb5HvL/k9GRHB7ZKEg= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= +github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/mfridman/tparse v0.18.0 h1:wh6dzOKaIwkUGyKgOntDW4liXSo37qg5AXbIhkMV3vE= +github.com/mfridman/tparse v0.18.0/go.mod h1:gEvqZTuCgEhPbYk/2lS3Kcxg1GmTxxU7kTC8DvP0i/A= +github.com/miekg/dns v1.1.57 h1:Jzi7ApEIzwEPLHWRcafCN9LZSBbqQpxjt/wpgvg7wcM= +github.com/miekg/dns v1.1.57/go.mod h1:uqRjCRUuEAA6qsOiJvDd+CFo/vW+y5WR6SNmHE55hZk= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= +github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= +github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= +github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns= +github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= +github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= +github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= +github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= +github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 h1:EaDatTxkdHG+U3Bk4EUr+DZ7fOGwTfezUiUJMaIcaho= +github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5/go.mod h1:fyalQWdtzDBECAQFBJuQe5bzQ02jGd5Qcbgb97Flm7U= +github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 h1:EfpWLLCyXw8PSM2/XNJLjI3Pb27yVE+gIAfeqp8LUCc= +github.com/redis/go-redis/extra/redisotel/v9 v9.0.5/go.mod h1:WZjPDy7VNzn77AAfnAfVjZNvfJTYfPetfZk5yoSTLaQ= +github.com/redis/go-redis/v9 v9.7.3 h1:YpPyAayJV+XErNsatSElgRZZVCwXX9QzkKYNvO7x0wM= +github.com/redis/go-redis/v9 v9.7.3/go.mod h1:bGUrSggJ9X9GUmZpZNEOQKaANxSGgOEBRltRTZHSvrA= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/rubenv/sql-migrate v1.8.0 h1:dXnYiJk9k3wetp7GfQbKJcPHjVJL6YK19tKj8t2Ns0o= +github.com/rubenv/sql-migrate v1.8.0/go.mod h1:F2bGFBwCU+pnmbtNYDeKvSuvL6lBVtXDXUUv5t+u1qw= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 h1:KRzFb2m7YtdldCEkzs6KqmJw4nqEVZGK7IN2kJkjTuQ= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.2/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU= +github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= +github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= +github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= +github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= +github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= +github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= +github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 h1:UW0+QyeyBVhn+COBec3nGhfnFe5lwB0ic1JBVjzhk0w= +go.opentelemetry.io/contrib/bridges/prometheus v0.57.0/go.mod h1:ppciCHRLsyCio54qbzQv0E4Jyth/fLWDTJYfvWpcSVk= +go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 h1:jmTVJ86dP60C01K3slFQa2NQ/Aoi7zA+wy7vMOKD9H4= +go.opentelemetry.io/contrib/exporters/autoexport v0.57.0/go.mod h1:EJBheUMttD/lABFyLXhce47Wr6DPWYReCzaZiXadH7g= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= +go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= +go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 h1:WzNab7hOOLzdDF/EoWCt4glhrbMPVMOO5JYTmpz36Ls= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0/go.mod h1:hKvJwTzJdp90Vh7p6q/9PAOd55dI6WA6sWj62a/JvSs= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 h1:S+LdBGiQXtJdowoJoQPEtI52syEP/JYBUpjO49EQhV8= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0/go.mod h1:5KXybFvPGds3QinJWQT7pmXf+TN5YIa7CNYObWRkj50= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 h1:j7ZSD+5yn+lo3sGV69nW04rRR0jhYnBwjuX3r0HvnK0= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0/go.mod h1:WXbYJTUaZXAbYd8lbgGuvih0yuCfOFC5RJoYnoLcGz8= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 h1:t/Qur3vKSkUCcDVaSumWF2PKHt85pc7fRvFuoVT8qFU= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0/go.mod h1:Rl61tySSdcOJWoEgYZVtmnKdA0GeKrSqkHC1t+91CH8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glBlOBp5+WlYsOElzTNmiPW/x60= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 h1:tgJ0uaNS4c98WRNUEx5U3aDlrDOI5Rs+1Vifcw4DJ8U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0/go.mod h1:U7HYyW0zt/a9x5J1Kjs+r1f/d4ZHnYFclhYY2+YbeoE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 h1:cMyu9O88joYEaI47CnQkxO1XZdpoTF9fEnW2duIddhw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0/go.mod h1:6Am3rn7P9TVVeXYG+wtcGE7IE1tsQ+bP3AuWcKt/gOI= +go.opentelemetry.io/otel/exporters/prometheus v0.54.0 h1:rFwzp68QMgtzu9PgP3jm9XaMICI6TsofWWPcBDKwlsU= +go.opentelemetry.io/otel/exporters/prometheus v0.54.0/go.mod h1:QyjcV9qDP6VeK5qPyKETvNjmaaEc7+gqjh4SS0ZYzDU= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0 h1:CHXNXwfKWfzS65yrlB2PVds1IBZcdsX8Vepy9of0iRU= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0/go.mod h1:zKU4zUgKiaRxrdovSS2amdM5gOc59slmo/zJwGX+YBg= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 h1:SZmDnHcgp3zwlPBS2JX2urGYe/jBKEIT6ZedHRUyCz8= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0/go.mod h1:fdWW0HtZJ7+jNpTKUR0GpMEDP69nR8YBJQxNiVCE3jk= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 h1:cC2yDI3IQd0Udsux7Qmq8ToKAx1XCilTQECZ0KDZyTw= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0/go.mod h1:2PD5Ex6z8CFzDbTdOlwyNIUywRr1DN0ospafJM1wJ+s= +go.opentelemetry.io/otel/log v0.8.0 h1:egZ8vV5atrUWUbnSsHn6vB8R21G2wrKqNiDt3iWertk= +go.opentelemetry.io/otel/log v0.8.0/go.mod h1:M9qvDdUTRCopJcGRKg57+JSQ9LgLBrwwfC32epk5NX8= +go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= +go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= +go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= +go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= +go.opentelemetry.io/otel/sdk/log v0.8.0 h1:zg7GUYXqxk1jnGF/dTdLPrK06xJdrXgqgFLnI4Crxvs= +go.opentelemetry.io/otel/sdk/log v0.8.0/go.mod h1:50iXr0UVwQrYS45KbruFrEt4LvAdCaWWgIrsN3ZQggo= +go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= +go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= +go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= +go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= +go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= +go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= +golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= +golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= +golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= +golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= +golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= +golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= +golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950= +google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb h1:TLPQVbx1GJ8VKZxz52VAxl1EBgKXXbTiU9Fc5fZeLn4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I= +google.golang.org/grpc v1.72.1 h1:HR03wO6eyZ7lknl75XlxABNVLLFc2PAb6mHlYh756mA= +google.golang.org/grpc v1.72.1/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= +google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo= +gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +helm.sh/helm/v3 v3.19.4 h1:E2yFBejmZBczWr5LblhjZbvAOAwVumfBO1AtN3nqI30= +helm.sh/helm/v3 v3.19.4/go.mod h1:PC1rk7PqacpkV4acUFMLStOOis7QM9Jq3DveHBInu4s= +k8s.io/api v0.35.0 h1:iBAU5LTyBI9vw3L5glmat1njFK34srdLmktWwLTprlY= +k8s.io/api v0.35.0/go.mod h1:AQ0SNTzm4ZAczM03QH42c7l3bih1TbAXYo0DkF8ktnA= +k8s.io/apiextensions-apiserver v0.34.2 h1:WStKftnGeoKP4AZRz/BaAAEJvYp4mlZGN0UCv+uvsqo= +k8s.io/apiextensions-apiserver v0.34.2/go.mod h1:398CJrsgXF1wytdaanynDpJ67zG4Xq7yj91GrmYN2SE= +k8s.io/apimachinery v0.35.0 h1:Z2L3IHvPVv/MJ7xRxHEtk6GoJElaAqDCCU0S6ncYok8= +k8s.io/apimachinery v0.35.0/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns= +k8s.io/apiserver v0.34.2 h1:2/yu8suwkmES7IzwlehAovo8dDE07cFRC7KMDb1+MAE= +k8s.io/apiserver v0.34.2/go.mod h1:gqJQy2yDOB50R3JUReHSFr+cwJnL8G1dzTA0YLEqAPI= +k8s.io/cli-runtime v0.35.0 h1:PEJtYS/Zr4p20PfZSLCbY6YvaoLrfByd6THQzPworUE= +k8s.io/cli-runtime v0.35.0/go.mod h1:VBRvHzosVAoVdP3XwUQn1Oqkvaa8facnokNkD7jOTMY= +k8s.io/client-go v0.35.0 h1:IAW0ifFbfQQwQmga0UdoH0yvdqrbwMdq9vIFEhRpxBE= +k8s.io/client-go v0.35.0/go.mod h1:q2E5AAyqcbeLGPdoRB+Nxe3KYTfPce1Dnu1myQdqz9o= +k8s.io/component-base v0.34.2 h1:HQRqK9x2sSAsd8+R4xxRirlTjowsg6fWCPwWYeSvogQ= +k8s.io/component-base v0.34.2/go.mod h1:9xw2FHJavUHBFpiGkZoKuYZ5pdtLKe97DEByaA+hHbM= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE= +k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ= +k8s.io/kubectl v0.34.2 h1:+fWGrVlDONMUmmQLDaGkQ9i91oszjjRAa94cr37hzqA= +k8s.io/kubectl v0.34.2/go.mod h1:X2KTOdtZZNrTWmUD4oHApJ836pevSl+zvC5sI6oO2YQ= +k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck= +k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc= +oras.land/oras-go/v2 v2.6.0/go.mod h1:magiQDfG6H1O9APp+rOsvCPcW1GD2MM7vgnKY0Y+u1o= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= +sigs.k8s.io/controller-runtime v0.22.4 h1:GEjV7KV3TY8e+tJ2LCTxUTanW4z/FmNB7l327UfMq9A= +sigs.k8s.io/controller-runtime v0.22.4/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/kustomize/api v0.20.1 h1:iWP1Ydh3/lmldBnH/S5RXgT98vWYMaTUL1ADcr+Sv7I= +sigs.k8s.io/kustomize/api v0.20.1/go.mod h1:t6hUFxO+Ph0VxIk1sKp1WS0dOjbPCtLJ4p8aADLwqjM= +sigs.k8s.io/kustomize/kyaml v0.20.1 h1:PCMnA2mrVbRP3NIB6v9kYCAc38uvFLVs8j/CD567A78= +sigs.k8s.io/kustomize/kyaml v0.20.1/go.mod h1:0EmkQHRUsJxY8Ug9Niig1pUMSCGHxQ5RklbpV/Ri6po= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/deploy/rig-operator/hack/boilerplate.go.txt b/deploy/rig-operator/hack/boilerplate.go.txt new file mode 100644 index 0000000..9786798 --- /dev/null +++ b/deploy/rig-operator/hack/boilerplate.go.txt @@ -0,0 +1,15 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ \ No newline at end of file diff --git a/deploy/rig-operator/internal/builder/master.go b/deploy/rig-operator/internal/builder/master.go new file mode 100644 index 0000000..fbb2e2d --- /dev/null +++ b/deploy/rig-operator/internal/builder/master.go @@ -0,0 +1,134 @@ +package builder + +import ( + "context" + "encoding/json" + "fmt" + + "gopkg.in/yaml.v3" + + "vanderlande.com/ittp/appstack/rig-operator/api/v1alpha1" + "vanderlande.com/ittp/appstack/rig-operator/internal/provider" +) + +// ChartConfig holds the helm settings extracted from the YAML _defaults +// The Controller needs this to know WHICH chart to fetch. +type ChartConfig struct { + Repo string + Name string + Version string +} + +type MasterBuilder struct { + strategy provider.Strategy + baseTemplate []byte + chartConfig ChartConfig +} + +func NewMasterBuilder(strategy provider.Strategy, baseTemplate []byte) *MasterBuilder { + b := &MasterBuilder{ + strategy: strategy, + baseTemplate: baseTemplate, + // Safe defaults + chartConfig: ChartConfig{ + Name: "oci://ghcr.io/rancherfederal/charts/rancher-cluster-templates", + }, + } + return b +} + +// GetChartConfig returns the chart details found in the template. +func (b *MasterBuilder) GetChartConfig() ChartConfig { + return b.chartConfig +} + +// Build orchestrates the values generation process +func (b *MasterBuilder) Build(ctx context.Context, cbp *v1alpha1.ClusterBlueprint, credentialSecret string) (map[string]interface{}, error) { + values := make(map[string]interface{}) + if err := yaml.Unmarshal(b.baseTemplate, &values); err != nil { + return nil, fmt.Errorf("failed to unmarshal base template: %w", err) + } + + // 1. Extract Chart Config from _defaults (Legacy Logic Ported) + // We do this so the Controller knows what version to install. + if defaults, ok := values["_defaults"].(map[string]interface{}); ok { + if chartCfg, ok := defaults["helmChart"].(map[string]interface{}); ok { + if v, ok := chartCfg["repo"].(string); ok { + b.chartConfig.Repo = v + } + if v, ok := chartCfg["name"].(string); ok { + b.chartConfig.Name = v + } + if v, ok := chartCfg["version"].(string); ok { + b.chartConfig.Version = v + } + } + } + + // 2. Generate Node Pools (Delegated to Strategy) + // [DIFFERENCE]: We don't loop here. The Strategy knows how to map CBP -> Provider NodePools. + nodePools, err := b.strategy.GenerateNodePools(ctx, cbp) + if err != nil { + return nil, fmt.Errorf("strategy failed to generate node pools: %w", err) + } + + // 3. Get Global Overrides (Delegated to Strategy) + // [DIFFERENCE]: We don't hardcode "cloud_provider_name" here. The Strategy returns it. + overrides, err := b.strategy.GetGlobalOverrides(ctx, cbp, credentialSecret) + if err != nil { + return nil, fmt.Errorf("strategy failed to get global overrides: %w", err) + } + + // 4. Inject Logic into the Helm Structure + if clusterMap, ok := values["cluster"].(map[string]interface{}); ok { + clusterMap["name"] = cbp.Name + + if configMap, ok := clusterMap["config"].(map[string]interface{}); ok { + configMap["kubernetesVersion"] = cbp.Spec.KubernetesVersion + + // Ensure globalConfig exists + if _, ok := configMap["globalConfig"]; !ok { + configMap["globalConfig"] = make(map[string]interface{}) + } + globalConfig := configMap["globalConfig"].(map[string]interface{}) + + // Inject Overrides + for k, v := range overrides { + // A. Handle specific Global Config keys + if k == "cloud_provider_name" || k == "cloud_provider_config" { + globalConfig[k] = v + continue + } + + // B. Handle Chart Values (CCM/CSI Addons) + if k == "chartValues" { + if existingChartVals, ok := configMap["chartValues"].(map[string]interface{}); ok { + if newChartVals, ok := v.(map[string]interface{}); ok { + for ck, cv := range newChartVals { + existingChartVals[ck] = cv + } + } + } else { + configMap["chartValues"] = v + } + continue + } + + // C. Default: Inject at Root level + values[k] = v + } + } + } + + // 5. Inject Node Pools + // We marshal/unmarshal to ensure JSON tags from the Strategy structs are respected + tempJSON, _ := json.Marshal(nodePools) + var cleanNodePools interface{} + _ = json.Unmarshal(tempJSON, &cleanNodePools) + values["nodepools"] = cleanNodePools + + // 6. Cleanup internal keys + delete(values, "_defaults") + + return values, nil +} diff --git a/deploy/rig-operator/internal/controller/clusterblueprint_controller.go b/deploy/rig-operator/internal/controller/clusterblueprint_controller.go new file mode 100644 index 0000000..d507c64 --- /dev/null +++ b/deploy/rig-operator/internal/controller/clusterblueprint_controller.go @@ -0,0 +1,291 @@ +package controller + +import ( + "context" + "fmt" + "time" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + + rigv1 "vanderlande.com/ittp/appstack/rig-operator/api/v1alpha1" + "vanderlande.com/ittp/appstack/rig-operator/internal/builder" + "vanderlande.com/ittp/appstack/rig-operator/internal/helm" + "vanderlande.com/ittp/appstack/rig-operator/internal/provider" + "vanderlande.com/ittp/appstack/rig-operator/internal/provider/harvester" + harvesterTemplate "vanderlande.com/ittp/appstack/rig-operator/internal/templates/harvester" + + "vanderlande.com/ittp/appstack/rig-operator/internal/provider/vsphere" + vsphereTemplate "vanderlande.com/ittp/appstack/rig-operator/internal/templates/vsphere" +) + +const ( + rigFinalizer = "rig.appstack.io/finalizer" +) + +// ClusterBlueprintReconciler reconciles a ClusterBlueprint object +type ClusterBlueprintReconciler struct { + client.Client + Scheme *runtime.Scheme + Recorder record.EventRecorder +} + +// +kubebuilder:rbac:groups=rig.appstack.io,resources=clusterblueprints,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=rig.appstack.io,resources=clusterblueprints/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=rig.appstack.io,resources=clusterblueprints/finalizers,verbs=update +// +kubebuilder:rbac:groups=rig.appstack.io,resources=infrablueprints,verbs=get;list;watch +// +kubebuilder:rbac:groups=rig.appstack.io,resources=harvesterblueprints,verbs=get;list;watch +// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;patch;delete + +func (r *ClusterBlueprintReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + l := log.FromContext(ctx) + + // 1. Fetch ClusterBlueprint (CBP) + cbp := &rigv1.ClusterBlueprint{} + if err := r.Get(ctx, req.NamespacedName, cbp); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + // 2. Handle Deletion ... (Same as before) + if !cbp.ObjectMeta.DeletionTimestamp.IsZero() { + return r.handleDelete(ctx, cbp) + } + + // 3. Ensure Finalizer ... (Same as before) + if !controllerutil.ContainsFinalizer(cbp, rigFinalizer) { + controllerutil.AddFinalizer(cbp, rigFinalizer) + if err := r.Update(ctx, cbp); err != nil { + return ctrl.Result{}, err + } + } + + // 4. Fetch InfraBlueprint (IBP) + ibp := &rigv1.InfraBlueprint{} + if err := r.Get(ctx, types.NamespacedName{Name: cbp.Spec.InfraBlueprintRef, Namespace: cbp.Namespace}, ibp); err != nil { + l.Error(err, "InfraBlueprint not found", "Infra", cbp.Spec.InfraBlueprintRef) + r.updateStatus(ctx, cbp, "PendingInfra", false) + return ctrl.Result{RequeueAfter: 1 * time.Minute}, nil + } + + // ===================================================================== + // 4.5. QUOTA CHECK (The Gatekeeper) + // Only check quota if we are NOT already deployed. + // (Existing clusters keep running even if quota shrinks later) + // ===================================================================== + if cbp.Status.Phase != "Deployed" { + if err := r.checkQuota(cbp, ibp); err != nil { + l.Error(err, "Quota Exceeded") + // We stop here! Helm Apply will NOT run. + r.updateStatus(ctx, cbp, "QuotaExceeded", false) + // Requeue slowly to check if resources freed up later + return ctrl.Result{RequeueAfter: 5 * time.Minute}, nil + } + } + + // 5. Select Strategy based on Infra ProviderRef + var selectedStrategy provider.Strategy + var baseTemplate []byte + var credentialSecret string + + switch ibp.Spec.ProviderRef.Kind { + case "HarvesterBlueprint": + // A. Fetch the specific Harvester Config (HBP) + hbp := &rigv1.HarvesterBlueprint{} + hbpName := types.NamespacedName{Name: ibp.Spec.ProviderRef.Name, Namespace: cbp.Namespace} + if err := r.Get(ctx, hbpName, hbp); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to load HarvesterBlueprint: %w", err) + } + + // B. Ensure Identity (Mint ServiceAccount/Secret) + idMgr := harvester.NewIdentityManager(r.Client, r.Scheme) + secretName, err := idMgr.Ensure(ctx, cbp, ibp, hbp) + if err != nil { + l.Error(err, "Failed to ensure identity") + r.updateStatus(ctx, cbp, "ProvisioningFailed", false) + return ctrl.Result{RequeueAfter: 30 * time.Second}, nil + } + credentialSecret = secretName + + // C. Load Defaults & Init Strategy + defaults, err := harvesterTemplate.GetDefaults() + if err != nil { + return ctrl.Result{}, err + } + baseTemplate = harvesterTemplate.GetBaseValues() + // [UPDATED] Pass ibp.Spec.RancherURL to the factory + selectedStrategy = harvester.NewStrategy( + hbp, + ibp.Spec.UserData, + ibp.Spec.RancherURL, // <--- Passing the URL here + defaults, + ) + + case "VsphereBlueprint": + // A. Fetch the specific vSphere Config (VBP) + vbp := &rigv1.VsphereBlueprint{} + vbpName := types.NamespacedName{Name: ibp.Spec.ProviderRef.Name, Namespace: cbp.Namespace} + if err := r.Get(ctx, vbpName, vbp); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to load VsphereBlueprint: %w", err) + } + + // B. Load Defaults (CPU/RAM sizing safety nets) + defaults, err := vsphereTemplate.GetDefaults() + if err != nil { + return ctrl.Result{}, err + } + baseTemplate = vsphereTemplate.GetBaseValues() + + // C. Init Strategy + // Note: vSphere typically uses the global 'cloudCredentialSecret' defined in InfraBlueprint + // rather than minting dynamic tokens per cluster like Harvester does. + credentialSecret = ibp.Spec.CloudCredentialSecret + + selectedStrategy = vsphere.NewStrategy( + vbp, + ibp.Spec.UserData, + ibp.Spec.RancherURL, + defaults, + ) + + default: + return ctrl.Result{}, fmt.Errorf("unsupported provider kind: %s", ibp.Spec.ProviderRef.Kind) + } + + // 6. Build Helm Values (Generic Engine) + masterBuilder := builder.NewMasterBuilder(selectedStrategy, baseTemplate) + + values, err := masterBuilder.Build(ctx, cbp, credentialSecret) + if err != nil { + l.Error(err, "Failed to build helm values") + r.updateStatus(ctx, cbp, "ConfigGenerationFailed", false) + return ctrl.Result{}, nil // Fatal error, don't retry until config changes + } + + // 7. Apply Helm Chart + // We use the ChartConfig extracted by the MasterBuilder (from the YAML defaults) + chartCfg := masterBuilder.GetChartConfig() + + helmConfig := helm.Config{ + Namespace: cbp.Namespace, + ReleaseName: cbp.Name, // We use the Cluster name as the Release name + RepoURL: chartCfg.Repo, + ChartName: chartCfg.Name, + Version: chartCfg.Version, + Values: values, + } + + l.Info("Applying Helm Release", "Release", cbp.Name, "Chart", chartCfg.Name) + if err := helm.Apply(helmConfig); err != nil { + l.Error(err, "Helm Install/Upgrade failed") + r.updateStatus(ctx, cbp, "HelmApplyFailed", false) + return ctrl.Result{RequeueAfter: 1 * time.Minute}, nil + } + + // 8. Success! + r.updateStatus(ctx, cbp, "Deployed", true) + return ctrl.Result{RequeueAfter: 10 * time.Minute}, nil // Re-sync periodically +} + +func (r *ClusterBlueprintReconciler) handleDelete(ctx context.Context, cbp *rigv1.ClusterBlueprint) (ctrl.Result, error) { + if controllerutil.ContainsFinalizer(cbp, rigFinalizer) { + // 1. Uninstall Helm Release + helmCfg := helm.Config{ + Namespace: cbp.Namespace, + ReleaseName: cbp.Name, + } + // Best effort uninstall + if err := helm.Uninstall(helmCfg); err != nil { + log.FromContext(ctx).Error(err, "Failed to uninstall helm release during cleanup") + } + + // 2. Cleanup Identity (Harvester SA) + // We need to look up IBP -> HBP again to know WHERE to clean up + // This is a simplified lookup; in production we might need to handle missing IBP gracefully + ibp := &rigv1.InfraBlueprint{} + if err := r.Get(ctx, types.NamespacedName{Name: cbp.Spec.InfraBlueprintRef, Namespace: cbp.Namespace}, ibp); err == nil { + if ibp.Spec.ProviderRef.Kind == "HarvesterBlueprint" { + hbp := &rigv1.HarvesterBlueprint{} + if err := r.Get(ctx, types.NamespacedName{Name: ibp.Spec.ProviderRef.Name, Namespace: cbp.Namespace}, hbp); err == nil { + idMgr := harvester.NewIdentityManager(r.Client, r.Scheme) + idMgr.Cleanup(ctx, cbp, ibp, hbp) + } + } + } + + // 3. Remove Finalizer + controllerutil.RemoveFinalizer(cbp, rigFinalizer) + if err := r.Update(ctx, cbp); err != nil { + return ctrl.Result{}, err + } + } + return ctrl.Result{}, nil +} + +func (r *ClusterBlueprintReconciler) updateStatus(ctx context.Context, cbp *rigv1.ClusterBlueprint, phase string, ready bool) { + cbp.Status.Phase = phase + cbp.Status.Ready = ready + if err := r.Status().Update(ctx, cbp); err != nil { + log.FromContext(ctx).Error(err, "Failed to update status") + } +} + +// SetupWithManager sets up the controller with the Manager. +func (r *ClusterBlueprintReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&rigv1.ClusterBlueprint{}). + Complete(r) +} + +// Helper function to calculate required resources vs available +func (r *ClusterBlueprintReconciler) checkQuota(cbp *rigv1.ClusterBlueprint, ibp *rigv1.InfraBlueprint) error { + // 1. Calculate what this cluster needs + var reqCpu, reqMem, reqDisk int + + // Control Plane Sizing (Using safe defaults or template logic) + // Ideally, this should match the defaults in your template/strategy + cpCount := 1 + if cbp.Spec.ControlPlaneHA { + cpCount = 3 + } + reqCpu += cpCount * 4 + reqMem += cpCount * 8 + reqDisk += cpCount * 40 + + // Worker Pools Sizing + for _, pool := range cbp.Spec.WorkerPools { + reqCpu += pool.Quantity * pool.CpuCores + reqMem += pool.Quantity * pool.MemoryGB + reqDisk += pool.Quantity * pool.DiskGB + } + + // 2. Check against Limits + // Note: We use the Status.Usage which is calculated by the InfraController. + // This includes "other" clusters, but might include "this" cluster if it was already counted. + // For strict "Admission Control", usually we check: + // (CurrentUsage + Request) > MaxLimit + + // However, since InfraController runs asynchronously, 'Status.Usage' might NOT yet include this new cluster. + // So (Usage + Request) > Max is the safest check for a new provisioning. + + q := ibp.Spec.Quota + u := ibp.Status.Usage + + if q.MaxCPU > 0 && (u.UsedCPU+reqCpu) > q.MaxCPU { + return fmt.Errorf("requested CPU %d exceeds remaining quota (Max: %d, Used: %d)", reqCpu, q.MaxCPU, u.UsedCPU) + } + + if q.MaxMemoryGB > 0 && (u.UsedMemoryGB+reqMem) > q.MaxMemoryGB { + return fmt.Errorf("requested Mem %dGB exceeds remaining quota (Max: %d, Used: %d)", reqMem, q.MaxMemoryGB, u.UsedMemoryGB) + } + + if q.MaxDiskGB > 0 && (u.UsedDiskGB+reqDisk) > q.MaxDiskGB { + return fmt.Errorf("requested Disk %dGB exceeds remaining quota (Max: %d, Used: %d)", reqDisk, q.MaxDiskGB, u.UsedDiskGB) + } + + return nil +} diff --git a/deploy/rig-operator/internal/controller/clusterblueprint_controller_test.go b/deploy/rig-operator/internal/controller/clusterblueprint_controller_test.go new file mode 100644 index 0000000..beb6c1d --- /dev/null +++ b/deploy/rig-operator/internal/controller/clusterblueprint_controller_test.go @@ -0,0 +1,84 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + rigv1alpha1 "vanderlande.com/ittp/appstack/rig-operator/api/v1alpha1" +) + +var _ = Describe("ClusterBlueprint Controller", func() { + Context("When reconciling a resource", func() { + const resourceName = "test-resource" + + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", // TODO(user):Modify as needed + } + clusterblueprint := &rigv1alpha1.ClusterBlueprint{} + + BeforeEach(func() { + By("creating the custom resource for the Kind ClusterBlueprint") + err := k8sClient.Get(ctx, typeNamespacedName, clusterblueprint) + if err != nil && errors.IsNotFound(err) { + resource := &rigv1alpha1.ClusterBlueprint{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: "default", + }, + // TODO(user): Specify other spec details if needed. + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + + AfterEach(func() { + // TODO(user): Cleanup logic after each test, like removing the resource instance. + resource := &rigv1alpha1.ClusterBlueprint{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance ClusterBlueprint") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + }) + It("should successfully reconcile the resource", func() { + By("Reconciling the created resource") + controllerReconciler := &ClusterBlueprintReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. + // Example: If you expect a certain status condition after reconciliation, verify it here. + }) + }) +}) diff --git a/deploy/rig-operator/internal/controller/infrablueprint_controller.go b/deploy/rig-operator/internal/controller/infrablueprint_controller.go new file mode 100644 index 0000000..54ee68f --- /dev/null +++ b/deploy/rig-operator/internal/controller/infrablueprint_controller.go @@ -0,0 +1,128 @@ +package controller + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + rigv1 "vanderlande.com/ittp/appstack/rig-operator/api/v1alpha1" +) + +// InfraBlueprintReconciler reconciles a InfraBlueprint object +type InfraBlueprintReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +// +kubebuilder:rbac:groups=rig.appstack.io,resources=infrablueprints,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=rig.appstack.io,resources=infrablueprints/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=rig.appstack.io,resources=clusterblueprints,verbs=get;list;watch + +func (r *InfraBlueprintReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + l := log.FromContext(ctx) + + // 1. Fetch the InfraBlueprint + infra := &rigv1.InfraBlueprint{} + if err := r.Get(ctx, req.NamespacedName, infra); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + // 2. List ALL ClusterBlueprints in the same namespace + // (We assume Infra and Clusters live in the same namespace for security/tenancy) + var clusterList rigv1.ClusterBlueprintList + if err := r.List(ctx, &clusterList, client.InNamespace(req.Namespace)); err != nil { + l.Error(err, "Failed to list clusters for quota calculation") + return ctrl.Result{}, err + } + + // 3. Calculate Usage (The Accountant Logic) + var usedCpu, usedMem, usedDisk int + + for _, cluster := range clusterList.Items { + // Only count clusters that belong to THIS Infra + if cluster.Spec.InfraBlueprintRef != infra.Name { + continue + } + + // Sum Control Plane + if cluster.Spec.ControlPlaneHA { + // Hardcoded fallback or we could duplicate the defaults logic here. + // Ideally, we'd read the templates, but for accounting, safe estimates are usually okay. + // Or better: The Cluster status could report its own "ResourcesConsumed". + // For now, we use the standard defaults we know: + usedCpu += 3 * 4 // 3 nodes * 4 cores + usedMem += 3 * 8 // 3 nodes * 8 GB + usedDisk += 3 * 40 // 3 nodes * 40 GB + } else { + usedCpu += 1 * 4 + usedMem += 1 * 8 + usedDisk += 1 * 40 + } + + // Sum Worker Pools + for _, pool := range cluster.Spec.WorkerPools { + usedCpu += pool.Quantity * pool.CpuCores + usedMem += pool.Quantity * pool.MemoryGB + usedDisk += pool.Quantity * pool.DiskGB + } + } + + // 4. Update Status if changed + if infra.Status.Usage.UsedCPU != usedCpu || + infra.Status.Usage.UsedMemoryGB != usedMem || + infra.Status.Usage.UsedDiskGB != usedDisk { + + infra.Status.Usage.UsedCPU = usedCpu + infra.Status.Usage.UsedMemoryGB = usedMem + infra.Status.Usage.UsedDiskGB = usedDisk + + l.Info("Updating Infra Quota Usage", "Infra", infra.Name, "CPU", usedCpu, "Mem", usedMem) + if err := r.Status().Update(ctx, infra); err != nil { + return ctrl.Result{}, err + } + } + + // 5. Verify Connectivity (Optional) + // We could check if the ProviderRef exists here and set Ready=true + + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *InfraBlueprintReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&rigv1.InfraBlueprint{}). + // Watch ClusterBlueprints too! + // If a Cluster is added/modified, we need to Reconcile the Infra it points to. + Watches( + &rigv1.ClusterBlueprint{}, + handler.EnqueueRequestsFromMapFunc(r.findInfraForCluster), + ). + Complete(r) +} + +// findInfraForCluster maps a Cluster change event to a Reconcile request for its parent Infra +func (r *InfraBlueprintReconciler) findInfraForCluster(ctx context.Context, obj client.Object) []reconcile.Request { + cluster, ok := obj.(*rigv1.ClusterBlueprint) + if !ok { + return nil + } + + if cluster.Spec.InfraBlueprintRef != "" { + return []reconcile.Request{ + { + NamespacedName: types.NamespacedName{ + Name: cluster.Spec.InfraBlueprintRef, + Namespace: cluster.Namespace, + }, + }, + } + } + return nil +} diff --git a/deploy/rig-operator/internal/controller/infrablueprint_controller_test.go b/deploy/rig-operator/internal/controller/infrablueprint_controller_test.go new file mode 100644 index 0000000..3401958 --- /dev/null +++ b/deploy/rig-operator/internal/controller/infrablueprint_controller_test.go @@ -0,0 +1,84 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + rigv1alpha1 "vanderlande.com/ittp/appstack/rig-operator/api/v1alpha1" +) + +var _ = Describe("InfraBlueprint Controller", func() { + Context("When reconciling a resource", func() { + const resourceName = "test-resource" + + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", // TODO(user):Modify as needed + } + infrablueprint := &rigv1alpha1.InfraBlueprint{} + + BeforeEach(func() { + By("creating the custom resource for the Kind InfraBlueprint") + err := k8sClient.Get(ctx, typeNamespacedName, infrablueprint) + if err != nil && errors.IsNotFound(err) { + resource := &rigv1alpha1.InfraBlueprint{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: "default", + }, + // TODO(user): Specify other spec details if needed. + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + + AfterEach(func() { + // TODO(user): Cleanup logic after each test, like removing the resource instance. + resource := &rigv1alpha1.InfraBlueprint{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance InfraBlueprint") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + }) + It("should successfully reconcile the resource", func() { + By("Reconciling the created resource") + controllerReconciler := &InfraBlueprintReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. + // Example: If you expect a certain status condition after reconciliation, verify it here. + }) + }) +}) diff --git a/deploy/rig-operator/internal/controller/suite_test.go b/deploy/rig-operator/internal/controller/suite_test.go new file mode 100644 index 0000000..250ba0d --- /dev/null +++ b/deploy/rig-operator/internal/controller/suite_test.go @@ -0,0 +1,116 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "os" + "path/filepath" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + rigv1alpha1 "vanderlande.com/ittp/appstack/rig-operator/api/v1alpha1" + // +kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var ( + ctx context.Context + cancel context.CancelFunc + testEnv *envtest.Environment + cfg *rest.Config + k8sClient client.Client +) + +func TestControllers(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Controller Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + ctx, cancel = context.WithCancel(context.TODO()) + + var err error + err = rigv1alpha1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + // +kubebuilder:scaffold:scheme + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: true, + } + + // Retrieve the first found binary directory to allow running tests from IDEs + if getFirstFoundEnvTestBinaryDir() != "" { + testEnv.BinaryAssetsDirectory = getFirstFoundEnvTestBinaryDir() + } + + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) +}) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + cancel() + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) + +// getFirstFoundEnvTestBinaryDir locates the first binary in the specified path. +// ENVTEST-based tests depend on specific binaries, usually located in paths set by +// controller-runtime. When running tests directly (e.g., via an IDE) without using +// Makefile targets, the 'BinaryAssetsDirectory' must be explicitly configured. +// +// This function streamlines the process by finding the required binaries, similar to +// setting the 'KUBEBUILDER_ASSETS' environment variable. To ensure the binaries are +// properly set up, run 'make setup-envtest' beforehand. +func getFirstFoundEnvTestBinaryDir() string { + basePath := filepath.Join("..", "..", "bin", "k8s") + entries, err := os.ReadDir(basePath) + if err != nil { + logf.Log.Error(err, "Failed to read directory", "path", basePath) + return "" + } + for _, entry := range entries { + if entry.IsDir() { + return filepath.Join(basePath, entry.Name()) + } + } + return "" +} diff --git a/deploy/rig-operator/internal/helm/client.go b/deploy/rig-operator/internal/helm/client.go new file mode 100644 index 0000000..d2df63e --- /dev/null +++ b/deploy/rig-operator/internal/helm/client.go @@ -0,0 +1,126 @@ +package helm + +import ( + "fmt" + "log" + "os" + + "helm.sh/helm/v3/pkg/action" + "helm.sh/helm/v3/pkg/chart/loader" + "helm.sh/helm/v3/pkg/cli" + "helm.sh/helm/v3/pkg/registry" // [NEW] Required for OCI + "helm.sh/helm/v3/pkg/storage/driver" + "k8s.io/cli-runtime/pkg/genericclioptions" +) + +type Config struct { + Namespace string + ReleaseName string + RepoURL string + ChartName string + Version string + Values map[string]interface{} +} + +func Apply(cfg Config) error { + settings := cli.New() + + // 1. Initialize Action Config + actionConfig := new(action.Configuration) + getter := genericclioptions.NewConfigFlags(false) + + if err := actionConfig.Init(getter, cfg.Namespace, os.Getenv("HELM_DRIVER"), log.Printf); err != nil { + return fmt.Errorf("failed to init helm config: %w", err) + } + + // 2. [NEW] Initialize OCI Registry Client + // This tells Helm how to talk to ghcr.io, docker.io, etc. + registryClient, err := registry.NewClient( + registry.ClientOptDebug(true), + registry.ClientOptEnableCache(true), + registry.ClientOptCredentialsFile(settings.RegistryConfig), // Uses ~/.config/helm/registry/config.json + ) + if err != nil { + return fmt.Errorf("failed to init registry client: %w", err) + } + actionConfig.RegistryClient = registryClient + + // 3. Setup Install Action + client := action.NewInstall(actionConfig) + client.Version = cfg.Version + client.Namespace = cfg.Namespace + client.ReleaseName = cfg.ReleaseName + client.CreateNamespace = true + + if cfg.RepoURL != "" { + client.RepoURL = cfg.RepoURL + } + + // 4. Locate Chart (Now supports oci:// because RegistryClient is set) + cp, err := client.ChartPathOptions.LocateChart(cfg.ChartName, settings) + if err != nil { + return fmt.Errorf("failed to locate chart %s: %w", cfg.ChartName, err) + } + + chart, err := loader.Load(cp) + if err != nil { + return fmt.Errorf("failed to load chart: %w", err) + } + + // 5. Install or Upgrade + histClient := action.NewHistory(actionConfig) + histClient.Max = 1 + + if _, err := histClient.Run(cfg.ReleaseName); err == driver.ErrReleaseNotFound { + fmt.Printf("Installing OCI Release %s...\n", cfg.ReleaseName) + _, err := client.Run(chart, cfg.Values) + return err + } else if err != nil { + return err + } + + fmt.Printf("Upgrading OCI Release %s...\n", cfg.ReleaseName) + upgrade := action.NewUpgrade(actionConfig) + upgrade.Version = cfg.Version + upgrade.Namespace = cfg.Namespace + // Important: Upgrade also needs the RegistryClient, but it shares 'actionConfig' + // so it is already set up. + if cfg.RepoURL != "" { + upgrade.RepoURL = cfg.RepoURL + } + _, err = upgrade.Run(cfg.ReleaseName, chart, cfg.Values) + return err +} + +func Uninstall(cfg Config) error { + settings := cli.New() + + // 1. Initialize Action Config (Same as Apply) + actionConfig := new(action.Configuration) + getter := genericclioptions.NewConfigFlags(false) + if err := actionConfig.Init(getter, cfg.Namespace, os.Getenv("HELM_DRIVER"), log.Printf); err != nil { + return fmt.Errorf("failed to init helm config: %w", err) + } + + // 2. Initialize OCI Registry Client (Crucial for OCI charts) + registryClient, err := registry.NewClient( + registry.ClientOptDebug(true), + registry.ClientOptEnableCache(true), + registry.ClientOptCredentialsFile(settings.RegistryConfig), + ) + if err != nil { + return fmt.Errorf("failed to init registry client: %w", err) + } + actionConfig.RegistryClient = registryClient + + // 3. Run Uninstall + client := action.NewUninstall(actionConfig) + // Don't fail if it's already gone + _, err = client.Run(cfg.ReleaseName) + if err != nil && err != driver.ErrReleaseNotFound { + return fmt.Errorf("failed to uninstall release: %w", err) + } + + fmt.Printf("✅ Uninstalled Release %s\n", cfg.ReleaseName) + return nil +} diff --git a/deploy/rig-operator/internal/provider/harvester/credential.go b/deploy/rig-operator/internal/provider/harvester/credential.go new file mode 100644 index 0000000..43e084d --- /dev/null +++ b/deploy/rig-operator/internal/provider/harvester/credential.go @@ -0,0 +1,176 @@ +package harvester + +import ( + "context" + "encoding/base64" + "fmt" + "time" + + authenticationv1 "k8s.io/api/authentication/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" +) + +// DeleteCredentialResources connects to Harvester and removes the specific SA and bindings +func DeleteCredentialResources(ctx context.Context, masterKubeconfig []byte, serviceAccountName, vmNamespace string) error { + restConfig, err := clientcmd.RESTConfigFromKubeConfig(masterKubeconfig) + if err != nil { + return err + } + hvClient, err := kubernetes.NewForConfig(restConfig) + if err != nil { + return err + } + + deletePolicy := metav1.DeletePropagationBackground + deleteOpts := metav1.DeleteOptions{PropagationPolicy: &deletePolicy} + + // 1. Delete Global CSI Binding (ClusterRoleBinding) + csiBindingName := fmt.Sprintf("%s-csi-binding", serviceAccountName) + // We ignore NotFound errors to make this idempotent + if err := hvClient.RbacV1().ClusterRoleBindings().Delete(ctx, csiBindingName, deleteOpts); err != nil && !apierrors.IsNotFound(err) { + return err + } + + // 2. Delete Cloud Provider Binding (RoleBinding in VM Namespace) + cpBindingName := fmt.Sprintf("%s-cloud-binding", serviceAccountName) + if err := hvClient.RbacV1().RoleBindings(vmNamespace).Delete(ctx, cpBindingName, deleteOpts); err != nil && !apierrors.IsNotFound(err) { + return err + } + + // 3. Delete ServiceAccount (VM Namespace) + if err := hvClient.CoreV1().ServiceAccounts(vmNamespace).Delete(ctx, serviceAccountName, deleteOpts); err != nil && !apierrors.IsNotFound(err) { + return err + } + + return nil +} + +// EnsureCredential mints a dedicated ServiceAccount in the specific VM Namespace +func EnsureCredential(ctx context.Context, masterKubeconfig []byte, clusterName, targetNamespace, vmNamespace, harvesterURL string) (*corev1.Secret, string, time.Time, error) { + + // --- PHASE 1: Connect --- + restConfig, err := clientcmd.RESTConfigFromKubeConfig(masterKubeconfig) + if err != nil { + return nil, "", time.Time{}, fmt.Errorf("invalid rancher cloud credential kubeconfig: %w", err) + } + hvClient, err := kubernetes.NewForConfig(restConfig) + if err != nil { + return nil, "", time.Time{}, err + } + + // --- PHASE 2: Create Identity --- + if vmNamespace == "" { + vmNamespace = "default" + } + saName := fmt.Sprintf("prov-%s", clusterName) + + // A. Create ServiceAccount + sa := &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: saName, Namespace: vmNamespace}} + if _, err := hvClient.CoreV1().ServiceAccounts(vmNamespace).Create(ctx, sa, metav1.CreateOptions{}); err != nil { + if !apierrors.IsAlreadyExists(err) { + return nil, "", time.Time{}, err + } + } + + // B. Create RoleBinding (Cloud Provider) + rb := &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{Name: saName + "-cloud-binding", Namespace: vmNamespace}, + Subjects: []rbacv1.Subject{{Kind: "ServiceAccount", Name: saName, Namespace: vmNamespace}}, + RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "harvesterhci.io:cloudprovider", APIGroup: "rbac.authorization.k8s.io"}, + } + if _, err := hvClient.RbacV1().RoleBindings(vmNamespace).Create(ctx, rb, metav1.CreateOptions{}); err != nil { + if !apierrors.IsAlreadyExists(err) { /* Ignore */ + } + } + + // C. Create ClusterRoleBinding (CSI Driver) + crb := &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{Name: saName + "-csi-binding"}, + Subjects: []rbacv1.Subject{{Kind: "ServiceAccount", Name: saName, Namespace: vmNamespace}}, + RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "harvesterhci.io:csi-driver", APIGroup: "rbac.authorization.k8s.io"}, + } + if _, err := hvClient.RbacV1().ClusterRoleBindings().Create(ctx, crb, metav1.CreateOptions{}); err != nil { + if !apierrors.IsAlreadyExists(err) { /* Ignore */ + } + } + + // D. Mint Token + ttlSeconds := int64(315360000) // ~10 years + tokenRequest, err := hvClient.CoreV1().ServiceAccounts(vmNamespace).CreateToken(ctx, saName, &authenticationv1.TokenRequest{ + Spec: authenticationv1.TokenRequestSpec{ExpirationSeconds: &ttlSeconds}, + }, metav1.CreateOptions{}) + if err != nil { + return nil, "", time.Time{}, fmt.Errorf("failed to mint harvester token: %w", err) + } + expiryTime := time.Now().Add(time.Duration(ttlSeconds) * time.Second) + + // --- PHASE 3: Determine URL & CA --- + if harvesterURL == "" { + harvesterURL = restConfig.Host + } + + // Fetch internal CA (required because proxy CA != internal CA) + harvesterCA := restConfig.CAData + caConfigMap, err := hvClient.CoreV1().ConfigMaps("default").Get(ctx, "kube-root-ca.crt", metav1.GetOptions{}) + if err == nil { + if caStr, ok := caConfigMap.Data["ca.crt"]; ok { + harvesterCA = []byte(caStr) + } + } + + // --- PHASE 4: Construct Kubeconfig --- + caData := base64.StdEncoding.EncodeToString(harvesterCA) + token := tokenRequest.Status.Token + + newKubeconfig := fmt.Sprintf( + `apiVersion: v1 +kind: Config +clusters: +- name: harvester + cluster: + server: %s + certificate-authority-data: %s +users: +- name: provisioner + user: + token: %s +contexts: +- name: default + context: + cluster: harvester + user: provisioner + namespace: %s +current-context: default +`, harvesterURL, caData, token, vmNamespace) + + // --- PHASE 5: Create Secret Object --- + secretName := fmt.Sprintf("harvesterconfig-%s", clusterName) + + secret := &corev1.Secret{ + TypeMeta: metav1.TypeMeta{Kind: "Secret", APIVersion: "v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: targetNamespace, + Annotations: map[string]string{ + // [CRITICAL] These annotations authorize the guest cluster to use this secret + "v2prov-secret-authorized-for-cluster": clusterName, + "v2prov-authorized-secret-deletes-on-cluster-removal": "true", + }, + Labels: map[string]string{ + "cattle.io/creator": "rig-operator", // Updated creator + "rig.appstack.io/cluster": clusterName, + }, + }, + Type: "Opaque", + StringData: map[string]string{ + "credential": newKubeconfig, + }, + } + + return secret, saName, expiryTime, nil +} diff --git a/deploy/rig-operator/internal/provider/harvester/manager.go b/deploy/rig-operator/internal/provider/harvester/manager.go new file mode 100644 index 0000000..c16c712 --- /dev/null +++ b/deploy/rig-operator/internal/provider/harvester/manager.go @@ -0,0 +1,126 @@ +package harvester + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + + "vanderlande.com/ittp/appstack/rig-operator/api/v1alpha1" +) + +type IdentityManager struct { + client client.Client + scheme *runtime.Scheme +} + +func NewIdentityManager(c client.Client, s *runtime.Scheme) *IdentityManager { + return &IdentityManager{client: c, scheme: s} +} + +// Ensure checks if an identity exists. If not, it fetches master creds, mints a new one, and updates Status. +func (m *IdentityManager) Ensure(ctx context.Context, cbp *v1alpha1.ClusterBlueprint, ibp *v1alpha1.InfraBlueprint, hbp *v1alpha1.HarvesterBlueprint) (string, error) { + l := log.FromContext(ctx) + + // 1. Fast Path: If identity already exists in Status, return it + if cbp.Status.Identity != nil && cbp.Status.Identity.SecretRef != "" { + return cbp.Status.Identity.SecretRef, nil + } + + l.Info("Minting Harvester identity", "Cluster", cbp.Name) + + // 2. Fetch Master Credential (from Infra) + rancherCredName := ibp.Spec.CloudCredentialSecret + if rancherCredName == "" { + return "", fmt.Errorf("CloudCredentialSecret is missing in InfraBlueprint %s", ibp.Name) + } + + var rancherSecret corev1.Secret + // Note: Rancher secrets are expected in cattle-global-data + if err := m.client.Get(ctx, types.NamespacedName{Name: rancherCredName, Namespace: "cattle-global-data"}, &rancherSecret); err != nil { + return "", fmt.Errorf("failed to fetch rancher credential %s: %w", rancherCredName, err) + } + + // 3. Extract Kubeconfig + const kubeconfigKey = "harvestercredentialConfig-kubeconfigContent" + adminKubeconfigBytes := rancherSecret.Data[kubeconfigKey] + if len(adminKubeconfigBytes) == 0 { + if len(rancherSecret.Data["credential"]) > 0 { + adminKubeconfigBytes = rancherSecret.Data["credential"] + } else { + return "", fmt.Errorf("secret %s missing kubeconfig data", rancherCredName) + } + } + + // 4. Call Factory (low-level) + newSecret, saName, _, err := EnsureCredential( + ctx, + adminKubeconfigBytes, + cbp.Name, + cbp.Namespace, // Target Namespace (where secret goes) + hbp.Spec.VmNamespace, // Harvester Namespace (where VM goes) + hbp.Spec.HarvesterURL, // Explicit URL from HBP + ) + if err != nil { + return "", fmt.Errorf("failed to mint harvester credential: %w", err) + } + + // 5. Persist Secret + // Set OwnerRef so if CBP is deleted, Secret is deleted automatically + if err := controllerutil.SetControllerReference(cbp, newSecret, m.scheme); err != nil { + return "", err + } + + patchOpts := []client.PatchOption{client.ForceOwnership, client.FieldOwner("rig-operator")} + if err := m.client.Patch(ctx, newSecret, client.Apply, patchOpts...); err != nil { + return "", fmt.Errorf("failed to patch new secret: %w", err) + } + + // 6. Update CBP Status + // We do this here so the identity is "locked" to the object immediately + if cbp.Status.Identity == nil { + cbp.Status.Identity = &v1alpha1.IdentityStatus{} + } + cbp.Status.Identity.SecretRef = newSecret.Name + cbp.Status.Identity.ServiceAccount = saName + + if err := m.client.Status().Update(ctx, cbp); err != nil { + return "", fmt.Errorf("failed to update cluster status: %w", err) + } + + return newSecret.Name, nil +} + +// Cleanup removes the ServiceAccount from Harvester when the Cluster is deleted +func (m *IdentityManager) Cleanup(ctx context.Context, cbp *v1alpha1.ClusterBlueprint, ibp *v1alpha1.InfraBlueprint, hbp *v1alpha1.HarvesterBlueprint) { + if cbp.Status.Identity == nil || cbp.Status.Identity.ServiceAccount == "" { + return + } + + // Fetch Master Secret again to get connection details + rancherCredName := ibp.Spec.CloudCredentialSecret + var rancherSecret corev1.Secret + if err := m.client.Get(ctx, types.NamespacedName{Name: rancherCredName, Namespace: "cattle-global-data"}, &rancherSecret); err != nil { + log.FromContext(ctx).V(1).Info("Cleanup: Could not fetch master secret (connection lost), skipping manual cleanup") + return + } + + var kubeBytes []byte + if len(rancherSecret.Data["harvestercredentialConfig-kubeconfigContent"]) > 0 { + kubeBytes = rancherSecret.Data["harvestercredentialConfig-kubeconfigContent"] + } else if len(rancherSecret.Data["credential"]) > 0 { + kubeBytes = rancherSecret.Data["credential"] + } else { + return + } + + // Delegate to low-level cleanup + if err := DeleteCredentialResources(ctx, kubeBytes, cbp.Status.Identity.ServiceAccount, hbp.Spec.VmNamespace); err != nil { + log.FromContext(ctx).Error(err, "Failed to cleanup Harvester resources (best effort)") + } +} diff --git a/deploy/rig-operator/internal/provider/harvester/strategy.go b/deploy/rig-operator/internal/provider/harvester/strategy.go new file mode 100644 index 0000000..3003612 --- /dev/null +++ b/deploy/rig-operator/internal/provider/harvester/strategy.go @@ -0,0 +1,140 @@ +package harvester + +import ( + "context" + "fmt" + + "vanderlande.com/ittp/appstack/rig-operator/api/v1alpha1" + template "vanderlande.com/ittp/appstack/rig-operator/internal/templates/harvester" +) + +// harvesterNodePool matches the exact JSON structure required by the Helm Chart +type harvesterNodePool struct { + Name string `json:"name"` + DisplayName string `json:"displayName"` + Quantity int `json:"quantity"` + Etcd bool `json:"etcd"` + ControlPlane bool `json:"controlplane"` + Worker bool `json:"worker"` + Paused bool `json:"paused"` + + // Harvester Specific Fields + CpuCount int `json:"cpuCount"` + MemorySize int `json:"memorySize"` // GB + DiskSize int `json:"diskSize"` // GB + ImageName string `json:"imageName"` + NetworkName string `json:"networkName"` + SshUser string `json:"sshUser"` + VmNamespace string `json:"vmNamespace"` + UserData string `json:"userData"` +} + +type Strategy struct { + blueprint *v1alpha1.HarvesterBlueprint + userData string + rancherURL string + defaults template.Defaults +} + +// NewStrategy initializes the strategy with defaults and optional overrides +func NewStrategy(hbp *v1alpha1.HarvesterBlueprint, infraUserData string, infraRancherURL string, defaults template.Defaults) *Strategy { // 1. Determine UserData priority: Infra (IBP) > Template Default + finalUserData := infraUserData + if finalUserData == "" { + finalUserData = defaults.UserData + } + + return &Strategy{ + blueprint: hbp, + userData: finalUserData, + rancherURL: infraRancherURL, + defaults: defaults, + } +} + +// GenerateNodePools implements provider.Strategy +func (s *Strategy) GenerateNodePools(ctx context.Context, cbp *v1alpha1.ClusterBlueprint) (interface{}, error) { + var pools []interface{} + + // Helper to map generic req -> harvester specific struct + mapPool := func(name string, qty, cpu, memGB, diskGB int, isEtcd, isCp, isWk bool) harvesterNodePool { + return harvesterNodePool{ + Name: name, + DisplayName: name, + Quantity: qty, + Etcd: isEtcd, + ControlPlane: isCp, + Worker: isWk, + Paused: false, + + // Mapping: Generic (GB) -> Harvester (GB) [No conversion needed] + CpuCount: cpu, + MemorySize: memGB, + DiskSize: diskGB, + + // Harvester Specifics from HBP + ImageName: s.blueprint.Spec.ImageName, + NetworkName: s.blueprint.Spec.NetworkName, + SshUser: s.blueprint.Spec.SshUser, + VmNamespace: s.blueprint.Spec.VmNamespace, + UserData: s.userData, + } + } + + // 1. Control Plane Pool + cpQty := 1 + if cbp.Spec.ControlPlaneHA { + cpQty = 3 + } + + // Use Defaults from YAML for CP sizing + pools = append(pools, mapPool( + "cp-pool", + cpQty, + s.defaults.CP_CPU, + s.defaults.CP_Mem, + s.defaults.CP_Disk, + true, true, false, + )) + + // 2. Worker Pools + for _, wp := range cbp.Spec.WorkerPools { + pools = append(pools, mapPool( + wp.Name, + wp.Quantity, + wp.CpuCores, + wp.MemoryGB, + wp.DiskGB, + false, false, true, + )) + } + + return pools, nil +} + +// GetGlobalOverrides implements provider.Strategy +func (s *Strategy) GetGlobalOverrides(ctx context.Context, cbp *v1alpha1.ClusterBlueprint, credentialSecretName string) (map[string]interface{}, error) { + // secret://: + secretURI := fmt.Sprintf("secret://%s:%s", cbp.Namespace, credentialSecretName) + + overrides := map[string]interface{}{ + "cloud_provider_name": "harvester", + "cloud_provider_config": secretURI, + // Inject Rancher URL + "rancher": map[string]interface{}{ + "cattle": map[string]interface{}{ + "url": s.rancherURL, + }, + }, + + "chartValues": map[string]interface{}{ + "harvester-cloud-provider": map[string]interface{}{ + "global": map[string]interface{}{ + "cattle": map[string]interface{}{ + "clusterName": cbp.Name, + }, + }, + }, + }, + } + return overrides, nil +} diff --git a/deploy/rig-operator/internal/provider/interface.go b/deploy/rig-operator/internal/provider/interface.go new file mode 100644 index 0000000..b4a0d4b --- /dev/null +++ b/deploy/rig-operator/internal/provider/interface.go @@ -0,0 +1,16 @@ +package provider + +import ( + "context" + + "vanderlande.com/ittp/appstack/rig-operator/api/v1alpha1" +) + +type Strategy interface { + // GenerateNodePools generates the provider-specific node pool list. + // [CHANGED] Return type is now interface{} to support both Structs and Maps + GenerateNodePools(ctx context.Context, cbp *v1alpha1.ClusterBlueprint) (interface{}, error) + + // GetGlobalOverrides returns the provider-specific helm values. + GetGlobalOverrides(ctx context.Context, cbp *v1alpha1.ClusterBlueprint, credentialSecret string) (map[string]interface{}, error) +} diff --git a/deploy/rig-operator/internal/provider/vsphere/strategy.go b/deploy/rig-operator/internal/provider/vsphere/strategy.go new file mode 100644 index 0000000..b685871 --- /dev/null +++ b/deploy/rig-operator/internal/provider/vsphere/strategy.go @@ -0,0 +1,138 @@ +package vsphere + +import ( + "context" + + rigv1 "vanderlande.com/ittp/appstack/rig-operator/api/v1alpha1" + vspheretpl "vanderlande.com/ittp/appstack/rig-operator/internal/templates/vsphere" +) + +type Strategy struct { + blueprint *rigv1.VsphereBlueprint + userData string + rancherURL string + defaults vspheretpl.Defaults +} + +// NewStrategy creates the vSphere logic handler +func NewStrategy(vbp *rigv1.VsphereBlueprint, userData string, rancherURL string, defaults vspheretpl.Defaults) *Strategy { + // 1. Resolve UserData (Infra > Template Default) + finalUserData := userData + if finalUserData == "" { + finalUserData = defaults.UserData + } + + return &Strategy{ + blueprint: vbp, + userData: finalUserData, + rancherURL: rancherURL, + defaults: defaults, + } +} + +// GenerateNodePools maps the generic ClusterBlueprint to vSphere-specific NodePool maps +func (s *Strategy) GenerateNodePools(ctx context.Context, cbp *rigv1.ClusterBlueprint) (interface{}, error) { + var nodePools []map[string]interface{} + + // 1. Control Plane Node Pool + // We rely on the defaults extracted from values.yaml (e.g. 4 Core, 8GB) + // vSphere Chart expects MB, so we multiply GB * 1024. + cpQty := 1 + if cbp.Spec.ControlPlaneHA { + cpQty = 3 + } + + nodePools = append(nodePools, s.buildPool( + "control-plane-nodes", // Name + "cp-nodes", // Display Name + cpQty, // Quantity + s.defaults.CP_CPU, // Cores + s.defaults.CP_Mem*1024, // RAM (GB -> MB) + s.defaults.CP_Disk*1024, // Disk (GB -> MB) + true, // Etcd + true, // ControlPlane + false, // Worker + )) + + // 2. Worker Pools + // We iterate over the user's requested pools in the CBP + for _, wp := range cbp.Spec.WorkerPools { + nodePools = append(nodePools, s.buildPool( + wp.Name, + wp.Name, + wp.Quantity, + wp.CpuCores, + wp.MemoryGB*1024, // Convert GB to MB + wp.DiskGB*1024, // Convert GB to MB + false, // Etcd + false, // ControlPlane + true, // Worker + )) + } + + return nodePools, nil +} + +// GetGlobalOverrides injects the vSphere-specific global values (Cloud Provider, Credentials, URLs) +func (s *Strategy) GetGlobalOverrides(ctx context.Context, cbp *rigv1.ClusterBlueprint, credentialSecret string) (map[string]interface{}, error) { + overrides := map[string]interface{}{ + // Tell Helm we are on vSphere + "cloudprovider": "vsphere", + + // The Secret containing username/password/vcenter-address + "cloudCredentialSecretName": credentialSecret, + + // Register with the correct Rancher Manager + "rancher": map[string]interface{}{ + "cattle": map[string]interface{}{ + "url": s.rancherURL, + }, + }, + + // Cluster Metadata + "cluster": map[string]interface{}{ + "name": cbp.Name, + "config": map[string]interface{}{ + "kubernetesVersion": cbp.Spec.KubernetesVersion, + }, + }, + } + + return overrides, nil +} + +// buildPool is a private helper to construct the exact map structure the vSphere Helm Chart expects +func (s *Strategy) buildPool(name, displayName string, qty, cpu, ramMB, diskMB int, etcd, cp, worker bool) map[string]interface{} { + pool := map[string]interface{}{ + // Generic RKE2 Node Settings + "name": name, + "displayName": displayName, + "quantity": qty, + "etcd": etcd, + "controlplane": cp, + "worker": worker, + "paused": false, + + // vSphere Infrastructure Location (From Blueprint) + "vcenter": s.blueprint.Spec.VCenter, + "datacenter": s.blueprint.Spec.Datacenter, + "folder": s.blueprint.Spec.Folder, + "pool": s.blueprint.Spec.ResourcePool, + "datastoreCluster": s.blueprint.Spec.Datastore, // Assumes chart supports this key. If not, use "datastore". + "network": []string{s.blueprint.Spec.Network}, + + // Cloning Details + "creationType": "template", + "cloneFrom": s.blueprint.Spec.Template, + + // Hardware Sizing (Already converted to MB) + "cpuCount": cpu, + "memorySize": ramMB, + "diskSize": diskMB, + + // Cloud Init + "cloudConfig": s.userData, + } + + return pool +} diff --git a/deploy/rig-operator/internal/templates/harvester/embed.go b/deploy/rig-operator/internal/templates/harvester/embed.go new file mode 100644 index 0000000..123f277 --- /dev/null +++ b/deploy/rig-operator/internal/templates/harvester/embed.go @@ -0,0 +1,69 @@ +package harvester + +import ( + _ "embed" + "fmt" + + "gopkg.in/yaml.v3" +) + +//go:embed values.yaml +var valuesYAML []byte + +type Defaults struct { + CP_CPU int + CP_Mem int + CP_Disk int + + ChartRepo string + ChartName string + ChartVersion string + + // [NEW] Default UserData for this provider + UserData string +} + +func GetDefaults() (Defaults, error) { + var raw map[string]interface{} + if err := yaml.Unmarshal(valuesYAML, &raw); err != nil { + return Defaults{}, fmt.Errorf("failed to parse harvester base values: %w", err) + } + + d := Defaults{ + CP_CPU: 4, CP_Mem: 8, CP_Disk: 40, // Safety Fallbacks + } + + if defs, ok := raw["_defaults"].(map[string]interface{}); ok { + if cp, ok := defs["controlPlaneProfile"].(map[string]interface{}); ok { + if v, ok := cp["cpuCores"].(int); ok { + d.CP_CPU = v + } + if v, ok := cp["memoryGb"].(int); ok { + d.CP_Mem = v + } + if v, ok := cp["diskGb"].(int); ok { + d.CP_Disk = v + } + } + if chart, ok := defs["helmChart"].(map[string]interface{}); ok { + if v, ok := chart["repo"].(string); ok { + d.ChartRepo = v + } + if v, ok := chart["name"].(string); ok { + d.ChartName = v + } + if v, ok := chart["version"].(string); ok { + d.ChartVersion = v + } + } + // [NEW] Extract UserData + if v, ok := defs["userData"].(string); ok { + d.UserData = v + } + } + return d, nil +} + +func GetBaseValues() []byte { + return valuesYAML +} diff --git a/deploy/rig-operator/internal/templates/harvester/values.yaml b/deploy/rig-operator/internal/templates/harvester/values.yaml new file mode 100644 index 0000000..cabfb23 --- /dev/null +++ b/deploy/rig-operator/internal/templates/harvester/values.yaml @@ -0,0 +1,456 @@ +# ---------------------------------------------------------------- +# BASE TEMPLATE (internal/templates/base_values.yaml) +# ---------------------------------------------------------------- + +_defaults: + helmChart: + repo: "" + name: "oci://ghcr.io/rancherfederal/charts/rancher-cluster-templates" + version: "0.7.2" + controlPlaneProfile: + cpuCores: 4 + memoryGb: 8 + diskGb: 40 + userData: &userData | + #cloud-config + package_update: false + package_upgrade: false + snap: + commands: + 00: snap refresh --hold=forever + package_reboot_if_required: true + packages: + - qemu-guest-agent + - yq + - jq + - curl + - wget + + bootcmd: + - sysctl -w net.ipv6.conf.all.disable_ipv6=1 + - sysctl -w net.ipv6.conf.default.disable_ipv6=1 + + write_files: + # ---------------------------------------------------------------- + # 1. CNI Permission Fix Script & Cron (CIS 1.1.9 Persistence) + # ---------------------------------------------------------------- + - path: /usr/local/bin/fix-cni-perms.sh + permissions: '0700' + owner: root:root + content: | + #!/bin/bash + # Wait 60s on boot for RKE2 to write files + [ "$1" == "boot" ] && sleep 60 + + # Enforce 600 on CNI files (CIS 1.1.9) + if [ -d /etc/cni/net.d ]; then + find /etc/cni/net.d -type f -exec chmod 600 {} \; + fi + if [ -d /var/lib/cni/networks ]; then + find /var/lib/cni/networks -type f -exec chmod 600 {} \; + fi + + # Every RKE2 service restart can reset CNI file permissions, so we run + # this script on reboot and daily via cron to maintain CIS compliance. + + - path: /etc/cron.d/cis-cni-fix + permissions: '0644' + owner: root:root + content: | + # Run on Reboot (with delay) to fix files created during startup + @reboot root /usr/local/bin/fix-cni-perms.sh boot + # Run once daily at 00:00 to correct any drift + 0 0 * * * root /usr/local/bin/fix-cni-perms.sh + + # ---------------------------------------------------------------- + # 2. RKE2 Admission Config + # ---------------------------------------------------------------- + - path: /etc/rancher/rke2/rke2-admission.yaml + permissions: '0600' + owner: root:root + content: | + apiVersion: apiserver.config.k8s.io/v1 + kind: AdmissionConfiguration + plugins: + - name: PodSecurity + configuration: + apiVersion: pod-security.admission.config.k8s.io/v1beta1 + kind: PodSecurityConfiguration + defaults: + enforce: "restricted" + enforce-version: "latest" + audit: "restricted" + audit-version: "latest" + warn: "restricted" + warn-version: "latest" + exemptions: + usernames: [] + runtimeClasses: [] + namespaces: [compliance-operator-system,kube-system, cis-operator-system, tigera-operator, calico-system, rke2-ingress-nginx, cattle-system, cattle-fleet-system, longhorn-system, cattle-neuvector-system] + - name: EventRateLimit + configuration: + apiVersion: eventratelimit.admission.k8s.io/v1alpha1 + kind: Configuration + limits: + - type: Server + qps: 5000 + burst: 20000 + + # ---------------------------------------------------------------- + # 3. RKE2 Audit Policy + # ---------------------------------------------------------------- + - path: /etc/rancher/rke2/audit-policy.yaml + permissions: '0600' + owner: root:root + content: | + apiVersion: audit.k8s.io/v1 + kind: Policy + rules: + - level: None + users: ["system:kube-controller-manager", "system:kube-scheduler", "system:serviceaccount:kube-system:endpoint-controller"] + verbs: ["get", "update"] + resources: + - group: "" + resources: ["endpoints", "services", "services/status"] + - level: None + verbs: ["get"] + resources: + - group: "" + resources: ["nodes", "nodes/status", "pods", "pods/status"] + - level: None + users: ["kube-proxy"] + verbs: ["watch"] + resources: + - group: "" + resources: ["endpoints", "services", "services/status", "configmaps"] + - level: Metadata + resources: + - group: "" + resources: ["secrets", "configmaps"] + - level: RequestResponse + omitStages: + - RequestReceived + + # ---------------------------------------------------------------- + # 4. Static NetworkPolicies + # ---------------------------------------------------------------- + - path: /var/lib/rancher/rke2/server/manifests/cis-network-policy.yaml + permissions: '0600' + owner: root:root + content: | + apiVersion: networking.k8s.io/v1 + kind: NetworkPolicy + metadata: + name: default-deny-ingress + namespace: default + spec: + podSelector: {} + policyTypes: + - Ingress + --- + apiVersion: networking.k8s.io/v1 + kind: NetworkPolicy + metadata: + name: allow-all-metrics + namespace: kube-public + spec: + podSelector: {} + ingress: + - {} + policyTypes: + - Ingress + --- + apiVersion: networking.k8s.io/v1 + kind: NetworkPolicy + metadata: + name: allow-all-system + namespace: kube-system + spec: + podSelector: {} + ingress: + - {} + policyTypes: + - Ingress + + # ---------------------------------------------------------------- + # 5. Service Account Hardening + # ---------------------------------------------------------------- + - path: /var/lib/rancher/rke2/server/manifests/cis-sa-config.yaml + permissions: '0600' + owner: root:root + content: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: default + namespace: default + automountServiceAccountToken: false + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: default + namespace: kube-system + automountServiceAccountToken: false + + - path: /var/lib/rancher/rke2/server/manifests/cis-sa-cron.yaml + permissions: '0600' + owner: root:root + content: | + apiVersion: v1 + kind: ServiceAccount + metadata: {name: sa-cleaner, namespace: kube-system} + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: {name: sa-cleaner-role} + rules: + - apiGroups: [""] + resources: ["namespaces", "serviceaccounts"] + verbs: ["get", "list", "patch"] + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: {name: sa-cleaner-binding} + subjects: [{kind: ServiceAccount, name: sa-cleaner, namespace: kube-system}] + roleRef: {kind: ClusterRole, name: sa-cleaner-role, apiGroup: rbac.authorization.k8s.io} + --- + apiVersion: batch/v1 + kind: CronJob + metadata: + name: sa-cleaner + namespace: kube-system + spec: + schedule: "0 */6 * * *" # Run every 6 hours + jobTemplate: + spec: + template: + spec: + serviceAccountName: sa-cleaner + containers: + - name: cleaner + image: rancher/kubectl:v1.26.0 + command: + - /bin/bash + - -c + - | + # Get all namespaces + for ns in $(kubectl get ns -o jsonpath='{.items[*].metadata.name}'); do + # Check if default SA has automount=true (or null) + automount=$(kubectl get sa default -n $ns -o jsonpath='{.automountServiceAccountToken}') + if [ "$automount" != "false" ]; then + echo "Securing default SA in namespace: $ns" + kubectl patch sa default -n $ns -p '{"automountServiceAccountToken": false}' + fi + done + restartPolicy: OnFailure + + # ---------------------------------------------------------------- + # 6. OS Sysctls Hardening + # ---------------------------------------------------------------- + - path: /etc/sysctl.d/60-rke2-cis.conf + permissions: '0644' + content: | + vm.overcommit_memory=1 + vm.max_map_count=65530 + vm.panic_on_oom=0 + fs.inotify.max_user_watches=1048576 + fs.inotify.max_user_instances=8192 + kernel.panic=10 + kernel.panic_on_oops=1 + net.ipv4.conf.all.rp_filter=1 + net.ipv4.conf.default.rp_filter=1 + net.ipv4.conf.all.accept_source_route=0 + net.ipv4.conf.default.accept_source_route=0 + net.ipv4.conf.all.accept_redirects=0 + net.ipv4.conf.default.accept_redirects=0 + net.ipv4.conf.all.send_redirects=0 + net.ipv4.conf.default.send_redirects=0 + net.ipv4.conf.all.log_martians=1 + net.ipv4.conf.default.log_martians=1 + net.ipv4.icmp_echo_ignore_broadcasts=1 + net.ipv4.icmp_ignore_bogus_error_responses=1 + net.ipv6.conf.all.disable_ipv6=1 + net.ipv6.conf.default.disable_ipv6=1 + fs.protected_hardlinks=1 + fs.protected_symlinks=1 + + # ---------------------------------------------------------------- + # 7. Environment & Setup Scripts + # ---------------------------------------------------------------- + - path: /etc/profile.d/rke2.sh + permissions: '0644' + content: | + export PATH=$PATH:/var/lib/rancher/rke2/bin:/opt/rke2/bin + export KUBECONFIG=/etc/rancher/rke2/rke2.yaml + + + - path: /root/updates.sh + permissions: '0550' + content: | + #!/bin/bash + export DEBIAN_FRONTEND=noninteractive + apt-mark hold linux-headers-generic + apt-mark hold linux-headers-virtual + apt-mark hold linux-image-virtual + apt-mark hold linux-virtual + apt-get update + apt-get upgrade -y + apt-get autoremove -y + + users: + - name: rancher + gecos: Rancher service account + hashed_passwd: $6$Mas.x2i7B2cefjUy$59363FmEuoU.LiTLNRZmtemlH2W0D0SWsig22KSZ3QzOmfxeZXxdSx5wIw9wO7GXF/M9W.9SHoKVBOYj1HPX3. + lock_passwd: false + shell: /bin/bash + groups: [users, sudo, docker] + sudo: ALL=(ALL:ALL) ALL + ssh_authorized_keys: + - 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEwWnnOTAu0LlAZRczQ0Z0KvNlUdPhGQhpZie+nF1O3s' + + - name: etcd + gecos: "etcd user" + shell: /sbin/nologin + system: true + lock_passwd: true + + disable_root: true + ssh_pwauth: true + + runcmd: + - systemctl enable --now qemu-guest-agent + - sysctl --system + - /root/updates.sh + # Immediate run of fix script + - /usr/local/bin/fix-cni-perms.sh + + final_message: | + VI_CNV_CLOUD_INIT has been applied successfully. + Node ready for Rancher! + +# amazonec2, azure, digitalocean, harvester, vsphere, custom +cloudprovider: harvester + +# cloud provider credentials +cloudCredentialSecretName: cc-mrklm + +# rancher manager url +rancher: + cattle: + url: rancher-mgmt.product.lan + +# cluster values +cluster: + + name: default-cluster + # labels: + # key: value + config: + kubernetesVersion: v1.33.5+rke2r1 + enableNetworkPolicy: true + localClusterAuthEndpoint: + enabled: false + chartValues: + harvester-cloud-provider: + global: + cattle: + clusterName: default-cluster + + # Pod Security Standard (Replaces PSP) + defaultPodSecurityAdmissionConfigurationTemplateName: "rancher-restricted" + + globalConfig: + systemDefaultRegistry: docker.io + cni: canal + docker: false + disable_scheduler: false + disable_cloud_controller: false + disable_kube_proxy: false + etcd_expose_metrics: false + profile: 'cis' + selinux: false + secrets_encryption: true + write_kubeconfig_mode: 0600 + use_service_account_credentials: false + protect_kernel_defaults: true + cloud_provider_name: harvester + cloud_provider_config: secret://fleet-default:harvesterconfigzswmd + + kube_apiserver_arg: + - "service-account-extend-token-expiration=false" + - "anonymous-auth=false" + - "enable-admission-plugins=NodeRestriction,PodSecurity,EventRateLimit,DenyServiceExternalIPs" + - "admission-control-config-file=/etc/rancher/rke2/rke2-admission.yaml" + - "audit-policy-file=/etc/rancher/rke2/audit-policy.yaml" + - "audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log" + - "audit-log-maxage=30" + - "audit-log-maxbackup=10" + - "audit-log-maxsize=100" + + kubelet_arg: + # Strong Ciphers (CIS 4.2.12) + - "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" + # PID Limit (CIS 4.2.13) + - "pod-max-pids=4096" + # Seccomp Default (CIS 4.2.14) + - "seccomp-default=true" + - "protect-kernel-defaults=true" + - "make-iptables-util-chains=true" + + upgradeStrategy: + controlPlaneConcurrency: 10% + controlPlaneDrainOptions: + enabled: false + workerConcurrency: 10% + workerDrainOptions: + enabled: false +addons: + monitoring: + enabled: false + logging: + enabled: false + longhorn: + enabled: false + neuvector: + enabled: false + +# node and nodepool(s) values +# ---------------------------------------------------------------- +# MANUAL TESTING SECTION +# The Operator will DELETE and OVERWRITE this section at runtime. +# These values are only used if you run 'helm install' manually. +# ---------------------------------------------------------------- +nodepools: + - name: control-plane-nodes + displayName: cp-nodes + quantity: 1 + etcd: true + controlplane: true + worker: false + paused: false + cpuCount: 4 + diskSize: 40 + imageName: vanderlande/image-qhtpc + memorySize: 8 + networkName: vanderlande/vm-lan + sshUser: rancher + vmNamespace: vanderlande + userData: *userData + + - name: worker-nodes + displayName: wk-nodes + quantity: 2 + etcd: false + controlplane: false + worker: true + paused: false + cpuCount: 2 + diskSize: 40 + imageName: vanderlande/image-qmx5q + memorySize: 8 + networkName: vanderlande/vm-lan + sshUser: rancher + vmNamespace: vanderlande + userData: *userData + diff --git a/deploy/rig-operator/internal/templates/vsphere/embed.go b/deploy/rig-operator/internal/templates/vsphere/embed.go new file mode 100644 index 0000000..db5a8b5 --- /dev/null +++ b/deploy/rig-operator/internal/templates/vsphere/embed.go @@ -0,0 +1,77 @@ +package vsphere + +import ( + _ "embed" + "fmt" + + "gopkg.in/yaml.v3" +) + +//go:embed values.yaml +var valuesYAML []byte + +type Defaults struct { + CP_CPU int + CP_Mem int + CP_Disk int + + ChartRepo string + ChartName string + ChartVersion string + + UserData string +} + +// GetDefaults parses the embedded values.yaml to extract global settings +func GetDefaults() (Defaults, error) { + var raw map[string]interface{} + if err := yaml.Unmarshal(valuesYAML, &raw); err != nil { + return Defaults{}, fmt.Errorf("failed to parse vsphere base values: %w", err) + } + + // 1. Set Hardcoded Fallbacks (Safety Net) + d := Defaults{ + CP_CPU: 2, CP_Mem: 4, CP_Disk: 40, // vSphere might need different defaults than Harvester + } + + // 2. Read from _defaults block + if defs, ok := raw["_defaults"].(map[string]interface{}); ok { + + // Profile Defaults + if cp, ok := defs["controlPlaneProfile"].(map[string]interface{}); ok { + if v, ok := cp["cpuCores"].(int); ok { + d.CP_CPU = v + } + if v, ok := cp["memoryGb"].(int); ok { + d.CP_Mem = v + } + if v, ok := cp["diskGb"].(int); ok { + d.CP_Disk = v + } + } + + // Helm Chart Defaults + if chart, ok := defs["helmChart"].(map[string]interface{}); ok { + if v, ok := chart["repo"].(string); ok { + d.ChartRepo = v + } + if v, ok := chart["name"].(string); ok { + d.ChartName = v + } + if v, ok := chart["version"].(string); ok { + d.ChartVersion = v + } + } + + // UserData Default + if v, ok := defs["userData"].(string); ok { + d.UserData = v + } + } + return d, nil +} + +// GetBaseValues returns the raw bytes for the MasterBuilder +func GetBaseValues() []byte { + return valuesYAML +} diff --git a/deploy/rig-operator/internal/templates/vsphere/values.yaml b/deploy/rig-operator/internal/templates/vsphere/values.yaml new file mode 100644 index 0000000..1adb15c --- /dev/null +++ b/deploy/rig-operator/internal/templates/vsphere/values.yaml @@ -0,0 +1,202 @@ +# ---------------------------------------------------------------- +# BASE TEMPLATE (internal/templates/base_values.yaml) +# ---------------------------------------------------------------- + +_defaults: + helmChart: + repo: "" + name: "oci://ghcr.io/rancherfederal/charts/rancher-cluster-templates" + version: "0.7.2" + controlPlaneProfile: + cpuCores: 4 + memoryGb: 8 + diskGb: 40 + userData: &userData | + #cloud-config + package_update: false + package_upgrade: false + snap: + commands: + 00: snap refresh --hold=forever + package_reboot_if_required: true + packages: + - yq + - jq + + disable_root: true + ssh_pwauth: false + + write_files: + - path: /root/updates.sh + permissions: '0550' + content: | + #!/bin/bash + export DEBIAN_FRONTEND=noninteractive + apt-mark hold linux-headers-generic + apt-mark hold linux-headers-virtual + apt-mark hold linux-image-virtual + apt-mark hold linux-virtual + apt-get update + apt-get upgrade -y + apt-get autoremove -y + + users: + - name: rancher + gecos: Rancher service account + hashed_passwd: $6$Mas.x2i7B2cefjUy$59363FmEuoU.LiTLNRZmtemlH2W0D0SWsig22KSZ3QzOmfxeZXxdSx5wIw9wO7GXF/M9W.9SHoKVBOYj1HPX3. + lock_passwd: false + shell: /bin/bash + groups: [users, sudo, docker] + sudo: ALL=(ALL:ALL) ALL + ssh_authorized_keys: + - 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEwWnnOTAu0LlAZRczQ0Z0KvNlUdPhGQhpZie+nF1O3s' + + disable_root: true + ssh_pwauth: true + + runcmd: + # - systemctl enable --now qemu-guest-agent + - sysctl --system + - /root/updates.sh + # Immediate run of fix script + + bootcmd: + - sudo bash /root/networking.sh + + final_message: | + VI_CNV_CLOUD_INIT has been applied successfully. + Node ready for Rancher! + +# amazonec2, azure, digitalocean, harvester, vsphere, custom +cloudprovider: vsphere + +# cloud provider credentials +cloudCredentialSecretName: cc-lhtl9 + +# rancher manager url +rancher: + cattle: + url: rancher.tst.vanderlande.com + +# cluster values +cluster: + + name: default-cluster-005 + # labels: + # key: value + config: + kubernetesVersion: v1.31.12+rke2r1 + enableNetworkPolicy: true + localClusterAuthEndpoint: + enabled: false + + + # Pod Security Standard (Replaces PSP) + # defaultPodSecurityAdmissionConfigurationTemplateName: "rancher-restricted" + + globalConfig: + systemDefaultRegistry: docker.io + cni: canal + docker: false + disable_scheduler: false + disable_cloud_controller: false + disable_kube_proxy: false + etcd_expose_metrics: false + profile: '' + selinux: false + secrets_encryption: false + write_kubeconfig_mode: 0600 + use_service_account_credentials: false + protect_kernel_defaults: false + cloud_provider_name: '' + + # kube_apiserver_arg: + # - "service-account-extend-token-expiration=false" + # - "anonymous-auth=false" + # - "enable-admission-plugins=NodeRestriction,PodSecurity,EventRateLimit,DenyServiceExternalIPs" + # - "admission-control-config-file=/etc/rancher/rke2/rke2-admission.yaml" + # - "audit-policy-file=/etc/rancher/rke2/audit-policy.yaml" + # - "audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log" + # - "audit-log-maxage=30" + # - "audit-log-maxbackup=10" + # - "audit-log-maxsize=100" + + # kubelet_arg: + # # Strong Ciphers (CIS 4.2.12) + # - "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" + # # PID Limit (CIS 4.2.13) + # - "pod-max-pids=4096" + # # Seccomp Default (CIS 4.2.14) + # - "seccomp-default=true" + # - "protect-kernel-defaults=true" + # - "make-iptables-util-chains=true" + + upgradeStrategy: + controlPlaneConcurrency: 10% + controlPlaneDrainOptions: + enabled: false + workerConcurrency: 10% + workerDrainOptions: + enabled: false +addons: + monitoring: + enabled: false + logging: + enabled: false + longhorn: + enabled: true + neuvector: + enabled: false + +# node and nodepool(s) values +# ---------------------------------------------------------------- +# MANUAL TESTING SECTION +# The Operator will DELETE and OVERWRITE this section at runtime. +# These values are only used if you run 'helm install' manually. +# ---------------------------------------------------------------- +nodepools: + - name: control-plane-nodes + displayName: cp-nodes + quantity: 1 + etcd: true + controlplane: true + worker: false + paused: false + # VSPHERE SPECIFIC FIELDS + cpuCount: 2 + memorySize: 8192 + diskSize: 40000 + vcenter: "vcenter.vanderlande.com" + datacenter: "NL001" + folder: "ICT Digitalisation - Rancher" + pool: "NL001 Development - Rancher/Resources" + datastoreCluster: "NL001 Development - Rancher SDRS" # Matches your SDRS input + network: + - "nl001.vDS.Distri.Vlan.1542" + # Provisioning Source + creationType: "template" + cloneFrom: "nl001-cp-ubuntu-22.04-amd64-20250327-5.15.0-135-rke2-k3s" + cloudConfig: *userData # Using the anchor from your base file + + - name: worker-storage-nodes + displayName: wk-nodes + quantity: 2 + etcd: false + controlplane: false + worker: true + paused: false + # VSPHERE SPECIFIC FIELDS + cpuCount: 4 + memorySize: 8192 + diskSize: 100000 + vcenter: "vcenter.vanderlande.com" + datacenter: "NL001" + folder: "ICT Digitalisation - Rancher" + pool: "NL001 Development - Rancher/Resources" + datastoreCluster: "NL001 Development - Rancher SDRS" # Matches your SDRS input + network: + - "nl001.vDS.Distri.Vlan.1542" + # Provisioning Source + creationType: "template" + cloneFrom: "nl001-cp-ubuntu-22.04-amd64-20250327-5.15.0-135-rke2-k3s" + cloudConfig: *userData # Using the anchor from your base file \ No newline at end of file diff --git a/deploy/rig-operator/misc/patch-default-sa b/deploy/rig-operator/misc/patch-default-sa new file mode 100755 index 0000000..4408581 --- /dev/null +++ b/deploy/rig-operator/misc/patch-default-sa @@ -0,0 +1,11 @@ +#!/bin/sh + +# Copy script into /etc/cron.daily/ +# Make it executable chmod 0755 + +# TODO: Update path to actual kubeconfig +export KUBECONFIG=/home/rancher/.kube/config + +for n in $(kubectl get namespaces -A -o=jsonpath="{.items[*]['metadata.name']}"); do + kubectl patch serviceaccount default -p '{"automountServiceAccountToken": false}' -n $n +done diff --git a/deploy/rig-operator/test/e2e/e2e_suite_test.go b/deploy/rig-operator/test/e2e/e2e_suite_test.go new file mode 100644 index 0000000..f62e9fc --- /dev/null +++ b/deploy/rig-operator/test/e2e/e2e_suite_test.go @@ -0,0 +1,92 @@ +//go:build e2e +// +build e2e + +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "fmt" + "os" + "os/exec" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "vanderlande.com/ittp/appstack/rig-operator/test/utils" +) + +var ( + // Optional Environment Variables: + // - CERT_MANAGER_INSTALL_SKIP=true: Skips CertManager installation during test setup. + // These variables are useful if CertManager is already installed, avoiding + // re-installation and conflicts. + skipCertManagerInstall = os.Getenv("CERT_MANAGER_INSTALL_SKIP") == "true" + // isCertManagerAlreadyInstalled will be set true when CertManager CRDs be found on the cluster + isCertManagerAlreadyInstalled = false + + // projectImage is the name of the image which will be build and loaded + // with the code source changes to be tested. + projectImage = "example.com/deploy:v0.0.1" +) + +// TestE2E runs the end-to-end (e2e) test suite for the project. These tests execute in an isolated, +// temporary environment to validate project changes with the purpose of being used in CI jobs. +// The default setup requires Kind, builds/loads the Manager Docker image locally, and installs +// CertManager. +func TestE2E(t *testing.T) { + RegisterFailHandler(Fail) + _, _ = fmt.Fprintf(GinkgoWriter, "Starting deploy integration test suite\n") + RunSpecs(t, "e2e suite") +} + +var _ = BeforeSuite(func() { + By("building the manager(Operator) image") + cmd := exec.Command("make", "docker-build", fmt.Sprintf("IMG=%s", projectImage)) + _, err := utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Failed to build the manager(Operator) image") + + // TODO(user): If you want to change the e2e test vendor from Kind, ensure the image is + // built and available before running the tests. Also, remove the following block. + By("loading the manager(Operator) image on Kind") + err = utils.LoadImageToKindClusterWithName(projectImage) + ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Failed to load the manager(Operator) image into Kind") + + // The tests-e2e are intended to run on a temporary cluster that is created and destroyed for testing. + // To prevent errors when tests run in environments with CertManager already installed, + // we check for its presence before execution. + // Setup CertManager before the suite if not skipped and if not already installed + if !skipCertManagerInstall { + By("checking if cert manager is installed already") + isCertManagerAlreadyInstalled = utils.IsCertManagerCRDsInstalled() + if !isCertManagerAlreadyInstalled { + _, _ = fmt.Fprintf(GinkgoWriter, "Installing CertManager...\n") + Expect(utils.InstallCertManager()).To(Succeed(), "Failed to install CertManager") + } else { + _, _ = fmt.Fprintf(GinkgoWriter, "WARNING: CertManager is already installed. Skipping installation...\n") + } + } +}) + +var _ = AfterSuite(func() { + // Teardown CertManager after the suite if not skipped and if it was not already installed + if !skipCertManagerInstall && !isCertManagerAlreadyInstalled { + _, _ = fmt.Fprintf(GinkgoWriter, "Uninstalling CertManager...\n") + utils.UninstallCertManager() + } +}) diff --git a/deploy/rig-operator/test/e2e/e2e_test.go b/deploy/rig-operator/test/e2e/e2e_test.go new file mode 100644 index 0000000..6e581a9 --- /dev/null +++ b/deploy/rig-operator/test/e2e/e2e_test.go @@ -0,0 +1,337 @@ +//go:build e2e +// +build e2e + +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "encoding/json" + "fmt" + "os" + "os/exec" + "path/filepath" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "vanderlande.com/ittp/appstack/rig-operator/test/utils" +) + +// namespace where the project is deployed in +const namespace = "deploy-system" + +// serviceAccountName created for the project +const serviceAccountName = "deploy-controller-manager" + +// metricsServiceName is the name of the metrics service of the project +const metricsServiceName = "deploy-controller-manager-metrics-service" + +// metricsRoleBindingName is the name of the RBAC that will be created to allow get the metrics data +const metricsRoleBindingName = "deploy-metrics-binding" + +var _ = Describe("Manager", Ordered, func() { + var controllerPodName string + + // Before running the tests, set up the environment by creating the namespace, + // enforce the restricted security policy to the namespace, installing CRDs, + // and deploying the controller. + BeforeAll(func() { + By("creating manager namespace") + cmd := exec.Command("kubectl", "create", "ns", namespace) + _, err := utils.Run(cmd) + Expect(err).NotTo(HaveOccurred(), "Failed to create namespace") + + By("labeling the namespace to enforce the restricted security policy") + cmd = exec.Command("kubectl", "label", "--overwrite", "ns", namespace, + "pod-security.kubernetes.io/enforce=restricted") + _, err = utils.Run(cmd) + Expect(err).NotTo(HaveOccurred(), "Failed to label namespace with restricted policy") + + By("installing CRDs") + cmd = exec.Command("make", "install") + _, err = utils.Run(cmd) + Expect(err).NotTo(HaveOccurred(), "Failed to install CRDs") + + By("deploying the controller-manager") + cmd = exec.Command("make", "deploy", fmt.Sprintf("IMG=%s", projectImage)) + _, err = utils.Run(cmd) + Expect(err).NotTo(HaveOccurred(), "Failed to deploy the controller-manager") + }) + + // After all tests have been executed, clean up by undeploying the controller, uninstalling CRDs, + // and deleting the namespace. + AfterAll(func() { + By("cleaning up the curl pod for metrics") + cmd := exec.Command("kubectl", "delete", "pod", "curl-metrics", "-n", namespace) + _, _ = utils.Run(cmd) + + By("undeploying the controller-manager") + cmd = exec.Command("make", "undeploy") + _, _ = utils.Run(cmd) + + By("uninstalling CRDs") + cmd = exec.Command("make", "uninstall") + _, _ = utils.Run(cmd) + + By("removing manager namespace") + cmd = exec.Command("kubectl", "delete", "ns", namespace) + _, _ = utils.Run(cmd) + }) + + // After each test, check for failures and collect logs, events, + // and pod descriptions for debugging. + AfterEach(func() { + specReport := CurrentSpecReport() + if specReport.Failed() { + By("Fetching controller manager pod logs") + cmd := exec.Command("kubectl", "logs", controllerPodName, "-n", namespace) + controllerLogs, err := utils.Run(cmd) + if err == nil { + _, _ = fmt.Fprintf(GinkgoWriter, "Controller logs:\n %s", controllerLogs) + } else { + _, _ = fmt.Fprintf(GinkgoWriter, "Failed to get Controller logs: %s", err) + } + + By("Fetching Kubernetes events") + cmd = exec.Command("kubectl", "get", "events", "-n", namespace, "--sort-by=.lastTimestamp") + eventsOutput, err := utils.Run(cmd) + if err == nil { + _, _ = fmt.Fprintf(GinkgoWriter, "Kubernetes events:\n%s", eventsOutput) + } else { + _, _ = fmt.Fprintf(GinkgoWriter, "Failed to get Kubernetes events: %s", err) + } + + By("Fetching curl-metrics logs") + cmd = exec.Command("kubectl", "logs", "curl-metrics", "-n", namespace) + metricsOutput, err := utils.Run(cmd) + if err == nil { + _, _ = fmt.Fprintf(GinkgoWriter, "Metrics logs:\n %s", metricsOutput) + } else { + _, _ = fmt.Fprintf(GinkgoWriter, "Failed to get curl-metrics logs: %s", err) + } + + By("Fetching controller manager pod description") + cmd = exec.Command("kubectl", "describe", "pod", controllerPodName, "-n", namespace) + podDescription, err := utils.Run(cmd) + if err == nil { + fmt.Println("Pod description:\n", podDescription) + } else { + fmt.Println("Failed to describe controller pod") + } + } + }) + + SetDefaultEventuallyTimeout(2 * time.Minute) + SetDefaultEventuallyPollingInterval(time.Second) + + Context("Manager", func() { + It("should run successfully", func() { + By("validating that the controller-manager pod is running as expected") + verifyControllerUp := func(g Gomega) { + // Get the name of the controller-manager pod + cmd := exec.Command("kubectl", "get", + "pods", "-l", "control-plane=controller-manager", + "-o", "go-template={{ range .items }}"+ + "{{ if not .metadata.deletionTimestamp }}"+ + "{{ .metadata.name }}"+ + "{{ \"\\n\" }}{{ end }}{{ end }}", + "-n", namespace, + ) + + podOutput, err := utils.Run(cmd) + g.Expect(err).NotTo(HaveOccurred(), "Failed to retrieve controller-manager pod information") + podNames := utils.GetNonEmptyLines(podOutput) + g.Expect(podNames).To(HaveLen(1), "expected 1 controller pod running") + controllerPodName = podNames[0] + g.Expect(controllerPodName).To(ContainSubstring("controller-manager")) + + // Validate the pod's status + cmd = exec.Command("kubectl", "get", + "pods", controllerPodName, "-o", "jsonpath={.status.phase}", + "-n", namespace, + ) + output, err := utils.Run(cmd) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(output).To(Equal("Running"), "Incorrect controller-manager pod status") + } + Eventually(verifyControllerUp).Should(Succeed()) + }) + + It("should ensure the metrics endpoint is serving metrics", func() { + By("creating a ClusterRoleBinding for the service account to allow access to metrics") + cmd := exec.Command("kubectl", "create", "clusterrolebinding", metricsRoleBindingName, + "--clusterrole=deploy-metrics-reader", + fmt.Sprintf("--serviceaccount=%s:%s", namespace, serviceAccountName), + ) + _, err := utils.Run(cmd) + Expect(err).NotTo(HaveOccurred(), "Failed to create ClusterRoleBinding") + + By("validating that the metrics service is available") + cmd = exec.Command("kubectl", "get", "service", metricsServiceName, "-n", namespace) + _, err = utils.Run(cmd) + Expect(err).NotTo(HaveOccurred(), "Metrics service should exist") + + By("getting the service account token") + token, err := serviceAccountToken() + Expect(err).NotTo(HaveOccurred()) + Expect(token).NotTo(BeEmpty()) + + By("ensuring the controller pod is ready") + verifyControllerPodReady := func(g Gomega) { + cmd := exec.Command("kubectl", "get", "pod", controllerPodName, "-n", namespace, + "-o", "jsonpath={.status.conditions[?(@.type=='Ready')].status}") + output, err := utils.Run(cmd) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(output).To(Equal("True"), "Controller pod not ready") + } + Eventually(verifyControllerPodReady, 3*time.Minute, time.Second).Should(Succeed()) + + By("verifying that the controller manager is serving the metrics server") + verifyMetricsServerStarted := func(g Gomega) { + cmd := exec.Command("kubectl", "logs", controllerPodName, "-n", namespace) + output, err := utils.Run(cmd) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(output).To(ContainSubstring("Serving metrics server"), + "Metrics server not yet started") + } + Eventually(verifyMetricsServerStarted, 3*time.Minute, time.Second).Should(Succeed()) + + // +kubebuilder:scaffold:e2e-metrics-webhooks-readiness + + By("creating the curl-metrics pod to access the metrics endpoint") + cmd = exec.Command("kubectl", "run", "curl-metrics", "--restart=Never", + "--namespace", namespace, + "--image=curlimages/curl:latest", + "--overrides", + fmt.Sprintf(`{ + "spec": { + "containers": [{ + "name": "curl", + "image": "curlimages/curl:latest", + "command": ["/bin/sh", "-c"], + "args": ["curl -v -k -H 'Authorization: Bearer %s' https://%s.%s.svc.cluster.local:8443/metrics"], + "securityContext": { + "readOnlyRootFilesystem": true, + "allowPrivilegeEscalation": false, + "capabilities": { + "drop": ["ALL"] + }, + "runAsNonRoot": true, + "runAsUser": 1000, + "seccompProfile": { + "type": "RuntimeDefault" + } + } + }], + "serviceAccountName": "%s" + } + }`, token, metricsServiceName, namespace, serviceAccountName)) + _, err = utils.Run(cmd) + Expect(err).NotTo(HaveOccurred(), "Failed to create curl-metrics pod") + + By("waiting for the curl-metrics pod to complete.") + verifyCurlUp := func(g Gomega) { + cmd := exec.Command("kubectl", "get", "pods", "curl-metrics", + "-o", "jsonpath={.status.phase}", + "-n", namespace) + output, err := utils.Run(cmd) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(output).To(Equal("Succeeded"), "curl pod in wrong status") + } + Eventually(verifyCurlUp, 5*time.Minute).Should(Succeed()) + + By("getting the metrics by checking curl-metrics logs") + verifyMetricsAvailable := func(g Gomega) { + metricsOutput, err := getMetricsOutput() + g.Expect(err).NotTo(HaveOccurred(), "Failed to retrieve logs from curl pod") + g.Expect(metricsOutput).NotTo(BeEmpty()) + g.Expect(metricsOutput).To(ContainSubstring("< HTTP/1.1 200 OK")) + } + Eventually(verifyMetricsAvailable, 2*time.Minute).Should(Succeed()) + }) + + // +kubebuilder:scaffold:e2e-webhooks-checks + + // TODO: Customize the e2e test suite with scenarios specific to your project. + // Consider applying sample/CR(s) and check their status and/or verifying + // the reconciliation by using the metrics, i.e.: + // metricsOutput, err := getMetricsOutput() + // Expect(err).NotTo(HaveOccurred(), "Failed to retrieve logs from curl pod") + // Expect(metricsOutput).To(ContainSubstring( + // fmt.Sprintf(`controller_runtime_reconcile_total{controller="%s",result="success"} 1`, + // strings.ToLower(), + // )) + }) +}) + +// serviceAccountToken returns a token for the specified service account in the given namespace. +// It uses the Kubernetes TokenRequest API to generate a token by directly sending a request +// and parsing the resulting token from the API response. +func serviceAccountToken() (string, error) { + const tokenRequestRawString = `{ + "apiVersion": "authentication.k8s.io/v1", + "kind": "TokenRequest" + }` + + // Temporary file to store the token request + secretName := fmt.Sprintf("%s-token-request", serviceAccountName) + tokenRequestFile := filepath.Join("/tmp", secretName) + err := os.WriteFile(tokenRequestFile, []byte(tokenRequestRawString), os.FileMode(0o644)) + if err != nil { + return "", err + } + + var out string + verifyTokenCreation := func(g Gomega) { + // Execute kubectl command to create the token + cmd := exec.Command("kubectl", "create", "--raw", fmt.Sprintf( + "/api/v1/namespaces/%s/serviceaccounts/%s/token", + namespace, + serviceAccountName, + ), "-f", tokenRequestFile) + + output, err := cmd.CombinedOutput() + g.Expect(err).NotTo(HaveOccurred()) + + // Parse the JSON output to extract the token + var token tokenRequest + err = json.Unmarshal(output, &token) + g.Expect(err).NotTo(HaveOccurred()) + + out = token.Status.Token + } + Eventually(verifyTokenCreation).Should(Succeed()) + + return out, err +} + +// getMetricsOutput retrieves and returns the logs from the curl pod used to access the metrics endpoint. +func getMetricsOutput() (string, error) { + By("getting the curl-metrics logs") + cmd := exec.Command("kubectl", "logs", "curl-metrics", "-n", namespace) + return utils.Run(cmd) +} + +// tokenRequest is a simplified representation of the Kubernetes TokenRequest API response, +// containing only the token field that we need to extract. +type tokenRequest struct { + Status struct { + Token string `json:"token"` + } `json:"status"` +} diff --git a/deploy/rig-operator/test/utils/utils.go b/deploy/rig-operator/test/utils/utils.go new file mode 100644 index 0000000..495bc7f --- /dev/null +++ b/deploy/rig-operator/test/utils/utils.go @@ -0,0 +1,226 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +import ( + "bufio" + "bytes" + "fmt" + "os" + "os/exec" + "strings" + + . "github.com/onsi/ginkgo/v2" // nolint:revive,staticcheck +) + +const ( + certmanagerVersion = "v1.19.1" + certmanagerURLTmpl = "https://github.com/cert-manager/cert-manager/releases/download/%s/cert-manager.yaml" + + defaultKindBinary = "kind" + defaultKindCluster = "kind" +) + +func warnError(err error) { + _, _ = fmt.Fprintf(GinkgoWriter, "warning: %v\n", err) +} + +// Run executes the provided command within this context +func Run(cmd *exec.Cmd) (string, error) { + dir, _ := GetProjectDir() + cmd.Dir = dir + + if err := os.Chdir(cmd.Dir); err != nil { + _, _ = fmt.Fprintf(GinkgoWriter, "chdir dir: %q\n", err) + } + + cmd.Env = append(os.Environ(), "GO111MODULE=on") + command := strings.Join(cmd.Args, " ") + _, _ = fmt.Fprintf(GinkgoWriter, "running: %q\n", command) + output, err := cmd.CombinedOutput() + if err != nil { + return string(output), fmt.Errorf("%q failed with error %q: %w", command, string(output), err) + } + + return string(output), nil +} + +// UninstallCertManager uninstalls the cert manager +func UninstallCertManager() { + url := fmt.Sprintf(certmanagerURLTmpl, certmanagerVersion) + cmd := exec.Command("kubectl", "delete", "-f", url) + if _, err := Run(cmd); err != nil { + warnError(err) + } + + // Delete leftover leases in kube-system (not cleaned by default) + kubeSystemLeases := []string{ + "cert-manager-cainjector-leader-election", + "cert-manager-controller", + } + for _, lease := range kubeSystemLeases { + cmd = exec.Command("kubectl", "delete", "lease", lease, + "-n", "kube-system", "--ignore-not-found", "--force", "--grace-period=0") + if _, err := Run(cmd); err != nil { + warnError(err) + } + } +} + +// InstallCertManager installs the cert manager bundle. +func InstallCertManager() error { + url := fmt.Sprintf(certmanagerURLTmpl, certmanagerVersion) + cmd := exec.Command("kubectl", "apply", "-f", url) + if _, err := Run(cmd); err != nil { + return err + } + // Wait for cert-manager-webhook to be ready, which can take time if cert-manager + // was re-installed after uninstalling on a cluster. + cmd = exec.Command("kubectl", "wait", "deployment.apps/cert-manager-webhook", + "--for", "condition=Available", + "--namespace", "cert-manager", + "--timeout", "5m", + ) + + _, err := Run(cmd) + return err +} + +// IsCertManagerCRDsInstalled checks if any Cert Manager CRDs are installed +// by verifying the existence of key CRDs related to Cert Manager. +func IsCertManagerCRDsInstalled() bool { + // List of common Cert Manager CRDs + certManagerCRDs := []string{ + "certificates.cert-manager.io", + "issuers.cert-manager.io", + "clusterissuers.cert-manager.io", + "certificaterequests.cert-manager.io", + "orders.acme.cert-manager.io", + "challenges.acme.cert-manager.io", + } + + // Execute the kubectl command to get all CRDs + cmd := exec.Command("kubectl", "get", "crds") + output, err := Run(cmd) + if err != nil { + return false + } + + // Check if any of the Cert Manager CRDs are present + crdList := GetNonEmptyLines(output) + for _, crd := range certManagerCRDs { + for _, line := range crdList { + if strings.Contains(line, crd) { + return true + } + } + } + + return false +} + +// LoadImageToKindClusterWithName loads a local docker image to the kind cluster +func LoadImageToKindClusterWithName(name string) error { + cluster := defaultKindCluster + if v, ok := os.LookupEnv("KIND_CLUSTER"); ok { + cluster = v + } + kindOptions := []string{"load", "docker-image", name, "--name", cluster} + kindBinary := defaultKindBinary + if v, ok := os.LookupEnv("KIND"); ok { + kindBinary = v + } + cmd := exec.Command(kindBinary, kindOptions...) + _, err := Run(cmd) + return err +} + +// GetNonEmptyLines converts given command output string into individual objects +// according to line breakers, and ignores the empty elements in it. +func GetNonEmptyLines(output string) []string { + var res []string + elements := strings.Split(output, "\n") + for _, element := range elements { + if element != "" { + res = append(res, element) + } + } + + return res +} + +// GetProjectDir will return the directory where the project is +func GetProjectDir() (string, error) { + wd, err := os.Getwd() + if err != nil { + return wd, fmt.Errorf("failed to get current working directory: %w", err) + } + wd = strings.ReplaceAll(wd, "/test/e2e", "") + return wd, nil +} + +// UncommentCode searches for target in the file and remove the comment prefix +// of the target content. The target content may span multiple lines. +func UncommentCode(filename, target, prefix string) error { + // false positive + // nolint:gosec + content, err := os.ReadFile(filename) + if err != nil { + return fmt.Errorf("failed to read file %q: %w", filename, err) + } + strContent := string(content) + + idx := strings.Index(strContent, target) + if idx < 0 { + return fmt.Errorf("unable to find the code %q to be uncomment", target) + } + + out := new(bytes.Buffer) + _, err = out.Write(content[:idx]) + if err != nil { + return fmt.Errorf("failed to write to output: %w", err) + } + + scanner := bufio.NewScanner(bytes.NewBufferString(target)) + if !scanner.Scan() { + return nil + } + for { + if _, err = out.WriteString(strings.TrimPrefix(scanner.Text(), prefix)); err != nil { + return fmt.Errorf("failed to write to output: %w", err) + } + // Avoid writing a newline in case the previous line was the last in target. + if !scanner.Scan() { + break + } + if _, err = out.WriteString("\n"); err != nil { + return fmt.Errorf("failed to write to output: %w", err) + } + } + + if _, err = out.Write(content[idx+len(target):]); err != nil { + return fmt.Errorf("failed to write to output: %w", err) + } + + // false positive + // nolint:gosec + if err = os.WriteFile(filename, out.Bytes(), 0644); err != nil { + return fmt.Errorf("failed to write file %q: %w", filename, err) + } + + return nil +} diff --git a/docs/TEST.md b/docs/TEST.md new file mode 100644 index 0000000..9f0ae2f --- /dev/null +++ b/docs/TEST.md @@ -0,0 +1,9 @@ + + + + +# {{ release_version }} - TEST + +TEST123 - TEST123 + +--- IT WILL BE REMOVED BEFORE MAIN MERGE --- diff --git a/docs/_changelog.md b/docs/_changelog.md new file mode 100644 index 0000000..499c2bb --- /dev/null +++ b/docs/_changelog.md @@ -0,0 +1,9 @@ + + + + +# {{ release_version }} - Changelog + + + +