Drop initial code
This commit is contained in:
25
deploy/rig-operator/.devcontainer/devcontainer.json
Normal file
25
deploy/rig-operator/.devcontainer/devcontainer.json
Normal file
@@ -0,0 +1,25 @@
|
||||
{
|
||||
"name": "Kubebuilder DevContainer",
|
||||
"image": "golang:1.24",
|
||||
"features": {
|
||||
"ghcr.io/devcontainers/features/docker-in-docker:2": {},
|
||||
"ghcr.io/devcontainers/features/git:1": {}
|
||||
},
|
||||
|
||||
"runArgs": ["--network=host"],
|
||||
|
||||
"customizations": {
|
||||
"vscode": {
|
||||
"settings": {
|
||||
"terminal.integrated.shell.linux": "/bin/bash"
|
||||
},
|
||||
"extensions": [
|
||||
"ms-kubernetes-tools.vscode-kubernetes-tools",
|
||||
"ms-azuretools.vscode-docker"
|
||||
]
|
||||
}
|
||||
},
|
||||
|
||||
"onCreateCommand": "bash .devcontainer/post-install.sh"
|
||||
}
|
||||
|
||||
23
deploy/rig-operator/.devcontainer/post-install.sh
Normal file
23
deploy/rig-operator/.devcontainer/post-install.sh
Normal file
@@ -0,0 +1,23 @@
|
||||
#!/bin/bash
|
||||
set -x
|
||||
|
||||
curl -Lo ./kind https://kind.sigs.k8s.io/dl/latest/kind-linux-$(go env GOARCH)
|
||||
chmod +x ./kind
|
||||
mv ./kind /usr/local/bin/kind
|
||||
|
||||
curl -L -o kubebuilder https://go.kubebuilder.io/dl/latest/linux/$(go env GOARCH)
|
||||
chmod +x kubebuilder
|
||||
mv kubebuilder /usr/local/bin/
|
||||
|
||||
KUBECTL_VERSION=$(curl -L -s https://dl.k8s.io/release/stable.txt)
|
||||
curl -LO "https://dl.k8s.io/release/$KUBECTL_VERSION/bin/linux/$(go env GOARCH)/kubectl"
|
||||
chmod +x kubectl
|
||||
mv kubectl /usr/local/bin/kubectl
|
||||
|
||||
docker network create -d=bridge --subnet=172.19.0.0/24 kind
|
||||
|
||||
kind version
|
||||
kubebuilder version
|
||||
docker --version
|
||||
go version
|
||||
kubectl version --client
|
||||
11
deploy/rig-operator/.dockerignore
Normal file
11
deploy/rig-operator/.dockerignore
Normal file
@@ -0,0 +1,11 @@
|
||||
# More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file
|
||||
# Ignore everything by default and re-include only needed files
|
||||
**
|
||||
|
||||
# Re-include Go source files (but not *_test.go)
|
||||
!**/*.go
|
||||
**/*_test.go
|
||||
|
||||
# Re-include Go module files
|
||||
!go.mod
|
||||
!go.sum
|
||||
23
deploy/rig-operator/.github/workflows/lint.yml
vendored
Normal file
23
deploy/rig-operator/.github/workflows/lint.yml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
name: Lint
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
name: Run on Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Clone the code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Run linter
|
||||
uses: golangci/golangci-lint-action@v8
|
||||
with:
|
||||
version: v2.5.0
|
||||
32
deploy/rig-operator/.github/workflows/test-e2e.yml
vendored
Normal file
32
deploy/rig-operator/.github/workflows/test-e2e.yml
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
name: E2E Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
test-e2e:
|
||||
name: Run on Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Clone the code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Install the latest version of kind
|
||||
run: |
|
||||
curl -Lo ./kind https://kind.sigs.k8s.io/dl/latest/kind-linux-$(go env GOARCH)
|
||||
chmod +x ./kind
|
||||
sudo mv ./kind /usr/local/bin/kind
|
||||
|
||||
- name: Verify kind installation
|
||||
run: kind version
|
||||
|
||||
- name: Running Test e2e
|
||||
run: |
|
||||
go mod tidy
|
||||
make test-e2e
|
||||
23
deploy/rig-operator/.github/workflows/test.yml
vendored
Normal file
23
deploy/rig-operator/.github/workflows/test.yml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
name: Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: Run on Ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Clone the code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Running Tests
|
||||
run: |
|
||||
go mod tidy
|
||||
make test
|
||||
30
deploy/rig-operator/.gitignore
vendored
Normal file
30
deploy/rig-operator/.gitignore
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
bin/*
|
||||
Dockerfile.cross
|
||||
|
||||
# Test binary, built with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
||||
# Go workspace file
|
||||
go.work
|
||||
|
||||
# Kubernetes Generated files - skip generated files, except for vendored files
|
||||
!vendor/**/zz_generated.*
|
||||
|
||||
# editor and IDE paraphernalia
|
||||
.idea
|
||||
.vscode
|
||||
*.swp
|
||||
*.swo
|
||||
*~
|
||||
|
||||
# Kubeconfig might contain secrets
|
||||
*.kubeconfig
|
||||
52
deploy/rig-operator/.golangci.yml
Normal file
52
deploy/rig-operator/.golangci.yml
Normal file
@@ -0,0 +1,52 @@
|
||||
version: "2"
|
||||
run:
|
||||
allow-parallel-runners: true
|
||||
linters:
|
||||
default: none
|
||||
enable:
|
||||
- copyloopvar
|
||||
- dupl
|
||||
- errcheck
|
||||
- ginkgolinter
|
||||
- goconst
|
||||
- gocyclo
|
||||
- govet
|
||||
- ineffassign
|
||||
- lll
|
||||
- misspell
|
||||
- nakedret
|
||||
- prealloc
|
||||
- revive
|
||||
- staticcheck
|
||||
- unconvert
|
||||
- unparam
|
||||
- unused
|
||||
settings:
|
||||
revive:
|
||||
rules:
|
||||
- name: comment-spacings
|
||||
- name: import-shadowing
|
||||
exclusions:
|
||||
generated: lax
|
||||
rules:
|
||||
- linters:
|
||||
- lll
|
||||
path: api/*
|
||||
- linters:
|
||||
- dupl
|
||||
- lll
|
||||
path: internal/*
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
formatters:
|
||||
enable:
|
||||
- gofmt
|
||||
- goimports
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
31
deploy/rig-operator/Dockerfile
Normal file
31
deploy/rig-operator/Dockerfile
Normal file
@@ -0,0 +1,31 @@
|
||||
# Build the manager binary
|
||||
FROM golang:1.24 AS builder
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
|
||||
WORKDIR /workspace
|
||||
# Copy the Go Modules manifests
|
||||
COPY go.mod go.mod
|
||||
COPY go.sum go.sum
|
||||
# cache deps before building and copying source so that we don't need to re-download as much
|
||||
# and so that source changes don't invalidate our downloaded layer
|
||||
RUN go mod download
|
||||
|
||||
# Copy the Go source (relies on .dockerignore to filter)
|
||||
COPY . .
|
||||
|
||||
# Build
|
||||
# the GOARCH has no default value to allow the binary to be built according to the host where the command
|
||||
# was called. For example, if we call make docker-build in a local env which has the Apple Silicon M1 SO
|
||||
# the docker BUILDPLATFORM arg will be linux/arm64 when for Apple x86 it will be linux/amd64. Therefore,
|
||||
# by leaving it empty we can ensure that the container and binary shipped on it will have the same platform.
|
||||
RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o manager cmd/main.go
|
||||
|
||||
# Use distroless as minimal base image to package the manager binary
|
||||
# Refer to https://github.com/GoogleContainerTools/distroless for more details
|
||||
FROM gcr.io/distroless/static:nonroot
|
||||
WORKDIR /
|
||||
COPY --from=builder /workspace/manager .
|
||||
USER 65532:65532
|
||||
|
||||
ENTRYPOINT ["/manager"]
|
||||
250
deploy/rig-operator/Makefile
Normal file
250
deploy/rig-operator/Makefile
Normal file
@@ -0,0 +1,250 @@
|
||||
# Image URL to use all building/pushing image targets
|
||||
IMG ?= controller:latest
|
||||
|
||||
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
|
||||
ifeq (,$(shell go env GOBIN))
|
||||
GOBIN=$(shell go env GOPATH)/bin
|
||||
else
|
||||
GOBIN=$(shell go env GOBIN)
|
||||
endif
|
||||
|
||||
# CONTAINER_TOOL defines the container tool to be used for building images.
|
||||
# Be aware that the target commands are only tested with Docker which is
|
||||
# scaffolded by default. However, you might want to replace it to use other
|
||||
# tools. (i.e. podman)
|
||||
CONTAINER_TOOL ?= docker
|
||||
|
||||
# Setting SHELL to bash allows bash commands to be executed by recipes.
|
||||
# Options are set to exit when a recipe line exits non-zero or a piped command fails.
|
||||
SHELL = /usr/bin/env bash -o pipefail
|
||||
.SHELLFLAGS = -ec
|
||||
|
||||
.PHONY: all
|
||||
all: build
|
||||
|
||||
##@ General
|
||||
|
||||
# The help target prints out all targets with their descriptions organized
|
||||
# beneath their categories. The categories are represented by '##@' and the
|
||||
# target descriptions by '##'. The awk command is responsible for reading the
|
||||
# entire set of makefiles included in this invocation, looking for lines of the
|
||||
# file as xyz: ## something, and then pretty-format the target and help. Then,
|
||||
# if there's a line with ##@ something, that gets pretty-printed as a category.
|
||||
# More info on the usage of ANSI control characters for terminal formatting:
|
||||
# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters
|
||||
# More info on the awk command:
|
||||
# http://linuxcommand.org/lc3_adv_awk.php
|
||||
|
||||
.PHONY: help
|
||||
help: ## Display this help.
|
||||
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
|
||||
|
||||
##@ Development
|
||||
|
||||
.PHONY: manifests
|
||||
manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects.
|
||||
"$(CONTROLLER_GEN)" rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases
|
||||
|
||||
.PHONY: generate
|
||||
generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations.
|
||||
"$(CONTROLLER_GEN)" object:headerFile="hack/boilerplate.go.txt" paths="./..."
|
||||
|
||||
.PHONY: fmt
|
||||
fmt: ## Run go fmt against code.
|
||||
go fmt ./...
|
||||
|
||||
.PHONY: vet
|
||||
vet: ## Run go vet against code.
|
||||
go vet ./...
|
||||
|
||||
.PHONY: test
|
||||
test: manifests generate fmt vet setup-envtest ## Run tests.
|
||||
KUBEBUILDER_ASSETS="$(shell "$(ENVTEST)" use $(ENVTEST_K8S_VERSION) --bin-dir "$(LOCALBIN)" -p path)" go test $$(go list ./... | grep -v /e2e) -coverprofile cover.out
|
||||
|
||||
# TODO(user): To use a different vendor for e2e tests, modify the setup under 'tests/e2e'.
|
||||
# The default setup assumes Kind is pre-installed and builds/loads the Manager Docker image locally.
|
||||
# CertManager is installed by default; skip with:
|
||||
# - CERT_MANAGER_INSTALL_SKIP=true
|
||||
KIND_CLUSTER ?= deploy-test-e2e
|
||||
|
||||
.PHONY: setup-test-e2e
|
||||
setup-test-e2e: ## Set up a Kind cluster for e2e tests if it does not exist
|
||||
@command -v $(KIND) >/dev/null 2>&1 || { \
|
||||
echo "Kind is not installed. Please install Kind manually."; \
|
||||
exit 1; \
|
||||
}
|
||||
@case "$$($(KIND) get clusters)" in \
|
||||
*"$(KIND_CLUSTER)"*) \
|
||||
echo "Kind cluster '$(KIND_CLUSTER)' already exists. Skipping creation." ;; \
|
||||
*) \
|
||||
echo "Creating Kind cluster '$(KIND_CLUSTER)'..."; \
|
||||
$(KIND) create cluster --name $(KIND_CLUSTER) ;; \
|
||||
esac
|
||||
|
||||
.PHONY: test-e2e
|
||||
test-e2e: setup-test-e2e manifests generate fmt vet ## Run the e2e tests. Expected an isolated environment using Kind.
|
||||
KIND=$(KIND) KIND_CLUSTER=$(KIND_CLUSTER) go test -tags=e2e ./test/e2e/ -v -ginkgo.v
|
||||
$(MAKE) cleanup-test-e2e
|
||||
|
||||
.PHONY: cleanup-test-e2e
|
||||
cleanup-test-e2e: ## Tear down the Kind cluster used for e2e tests
|
||||
@$(KIND) delete cluster --name $(KIND_CLUSTER)
|
||||
|
||||
.PHONY: lint
|
||||
lint: golangci-lint ## Run golangci-lint linter
|
||||
"$(GOLANGCI_LINT)" run
|
||||
|
||||
.PHONY: lint-fix
|
||||
lint-fix: golangci-lint ## Run golangci-lint linter and perform fixes
|
||||
"$(GOLANGCI_LINT)" run --fix
|
||||
|
||||
.PHONY: lint-config
|
||||
lint-config: golangci-lint ## Verify golangci-lint linter configuration
|
||||
"$(GOLANGCI_LINT)" config verify
|
||||
|
||||
##@ Build
|
||||
|
||||
.PHONY: build
|
||||
build: manifests generate fmt vet ## Build manager binary.
|
||||
go build -o bin/manager cmd/main.go
|
||||
|
||||
.PHONY: run
|
||||
run: manifests generate fmt vet ## Run a controller from your host.
|
||||
go run ./cmd/main.go
|
||||
|
||||
# If you wish to build the manager image targeting other platforms you can use the --platform flag.
|
||||
# (i.e. docker build --platform linux/arm64). However, you must enable docker buildKit for it.
|
||||
# More info: https://docs.docker.com/develop/develop-images/build_enhancements/
|
||||
.PHONY: docker-build
|
||||
docker-build: ## Build docker image with the manager.
|
||||
$(CONTAINER_TOOL) build -t ${IMG} .
|
||||
|
||||
.PHONY: docker-push
|
||||
docker-push: ## Push docker image with the manager.
|
||||
$(CONTAINER_TOOL) push ${IMG}
|
||||
|
||||
# PLATFORMS defines the target platforms for the manager image be built to provide support to multiple
|
||||
# architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to:
|
||||
# - be able to use docker buildx. More info: https://docs.docker.com/build/buildx/
|
||||
# - have enabled BuildKit. More info: https://docs.docker.com/develop/develop-images/build_enhancements/
|
||||
# - be able to push the image to your registry (i.e. if you do not set a valid value via IMG=<myregistry/image:<tag>> then the export will fail)
|
||||
# To adequately provide solutions that are compatible with multiple platforms, you should consider using this option.
|
||||
PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x,linux/ppc64le
|
||||
.PHONY: docker-buildx
|
||||
docker-buildx: ## Build and push docker image for the manager for cross-platform support
|
||||
# copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile
|
||||
sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross
|
||||
- $(CONTAINER_TOOL) buildx create --name deploy-builder
|
||||
$(CONTAINER_TOOL) buildx use deploy-builder
|
||||
- $(CONTAINER_TOOL) buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile.cross .
|
||||
- $(CONTAINER_TOOL) buildx rm deploy-builder
|
||||
rm Dockerfile.cross
|
||||
|
||||
.PHONY: build-installer
|
||||
build-installer: manifests generate kustomize ## Generate a consolidated YAML with CRDs and deployment.
|
||||
mkdir -p dist
|
||||
cd config/manager && "$(KUSTOMIZE)" edit set image controller=${IMG}
|
||||
"$(KUSTOMIZE)" build config/default > dist/install.yaml
|
||||
|
||||
##@ Deployment
|
||||
|
||||
ifndef ignore-not-found
|
||||
ignore-not-found = false
|
||||
endif
|
||||
|
||||
.PHONY: install
|
||||
install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config.
|
||||
@out="$$( "$(KUSTOMIZE)" build config/crd 2>/dev/null || true )"; \
|
||||
if [ -n "$$out" ]; then echo "$$out" | "$(KUBECTL)" apply -f -; else echo "No CRDs to install; skipping."; fi
|
||||
|
||||
.PHONY: uninstall
|
||||
uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
|
||||
@out="$$( "$(KUSTOMIZE)" build config/crd 2>/dev/null || true )"; \
|
||||
if [ -n "$$out" ]; then echo "$$out" | "$(KUBECTL)" delete --ignore-not-found=$(ignore-not-found) -f -; else echo "No CRDs to delete; skipping."; fi
|
||||
|
||||
.PHONY: deploy
|
||||
deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config.
|
||||
cd config/manager && "$(KUSTOMIZE)" edit set image controller=${IMG}
|
||||
"$(KUSTOMIZE)" build config/default | "$(KUBECTL)" apply -f -
|
||||
|
||||
.PHONY: undeploy
|
||||
undeploy: kustomize ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
|
||||
"$(KUSTOMIZE)" build config/default | "$(KUBECTL)" delete --ignore-not-found=$(ignore-not-found) -f -
|
||||
|
||||
##@ Dependencies
|
||||
|
||||
## Location to install dependencies to
|
||||
LOCALBIN ?= $(shell pwd)/bin
|
||||
$(LOCALBIN):
|
||||
mkdir -p "$(LOCALBIN)"
|
||||
|
||||
## Tool Binaries
|
||||
KUBECTL ?= kubectl
|
||||
KIND ?= kind
|
||||
KUSTOMIZE ?= $(LOCALBIN)/kustomize
|
||||
CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen
|
||||
ENVTEST ?= $(LOCALBIN)/setup-envtest
|
||||
GOLANGCI_LINT = $(LOCALBIN)/golangci-lint
|
||||
|
||||
## Tool Versions
|
||||
KUSTOMIZE_VERSION ?= v5.7.1
|
||||
CONTROLLER_TOOLS_VERSION ?= v0.19.0
|
||||
|
||||
#ENVTEST_VERSION is the version of controller-runtime release branch to fetch the envtest setup script (i.e. release-0.20)
|
||||
ENVTEST_VERSION ?= $(shell v='$(call gomodver,sigs.k8s.io/controller-runtime)'; \
|
||||
[ -n "$$v" ] || { echo "Set ENVTEST_VERSION manually (controller-runtime replace has no tag)" >&2; exit 1; }; \
|
||||
printf '%s\n' "$$v" | sed -E 's/^v?([0-9]+)\.([0-9]+).*/release-\1.\2/')
|
||||
|
||||
#ENVTEST_K8S_VERSION is the version of Kubernetes to use for setting up ENVTEST binaries (i.e. 1.31)
|
||||
ENVTEST_K8S_VERSION ?= $(shell v='$(call gomodver,k8s.io/api)'; \
|
||||
[ -n "$$v" ] || { echo "Set ENVTEST_K8S_VERSION manually (k8s.io/api replace has no tag)" >&2; exit 1; }; \
|
||||
printf '%s\n' "$$v" | sed -E 's/^v?[0-9]+\.([0-9]+).*/1.\1/')
|
||||
|
||||
GOLANGCI_LINT_VERSION ?= v2.5.0
|
||||
.PHONY: kustomize
|
||||
kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary.
|
||||
$(KUSTOMIZE): $(LOCALBIN)
|
||||
$(call go-install-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v5,$(KUSTOMIZE_VERSION))
|
||||
|
||||
.PHONY: controller-gen
|
||||
controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary.
|
||||
$(CONTROLLER_GEN): $(LOCALBIN)
|
||||
$(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen,$(CONTROLLER_TOOLS_VERSION))
|
||||
|
||||
.PHONY: setup-envtest
|
||||
setup-envtest: envtest ## Download the binaries required for ENVTEST in the local bin directory.
|
||||
@echo "Setting up envtest binaries for Kubernetes version $(ENVTEST_K8S_VERSION)..."
|
||||
@"$(ENVTEST)" use $(ENVTEST_K8S_VERSION) --bin-dir "$(LOCALBIN)" -p path || { \
|
||||
echo "Error: Failed to set up envtest binaries for version $(ENVTEST_K8S_VERSION)."; \
|
||||
exit 1; \
|
||||
}
|
||||
|
||||
.PHONY: envtest
|
||||
envtest: $(ENVTEST) ## Download setup-envtest locally if necessary.
|
||||
$(ENVTEST): $(LOCALBIN)
|
||||
$(call go-install-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest,$(ENVTEST_VERSION))
|
||||
|
||||
.PHONY: golangci-lint
|
||||
golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary.
|
||||
$(GOLANGCI_LINT): $(LOCALBIN)
|
||||
$(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/v2/cmd/golangci-lint,$(GOLANGCI_LINT_VERSION))
|
||||
|
||||
# go-install-tool will 'go install' any package with custom target and name of binary, if it doesn't exist
|
||||
# $1 - target path with name of binary
|
||||
# $2 - package url which can be installed
|
||||
# $3 - specific version of package
|
||||
define go-install-tool
|
||||
@[ -f "$(1)-$(3)" ] && [ "$$(readlink -- "$(1)" 2>/dev/null)" = "$(1)-$(3)" ] || { \
|
||||
set -e; \
|
||||
package=$(2)@$(3) ;\
|
||||
echo "Downloading $${package}" ;\
|
||||
rm -f "$(1)" ;\
|
||||
GOBIN="$(LOCALBIN)" go install $${package} ;\
|
||||
mv "$(LOCALBIN)/$$(basename "$(1)")" "$(1)-$(3)" ;\
|
||||
} ;\
|
||||
ln -sf "$$(realpath "$(1)-$(3)")" "$(1)"
|
||||
endef
|
||||
|
||||
define gomodver
|
||||
$(shell go list -m -f '{{if .Replace}}{{.Replace.Version}}{{else}}{{.Version}}{{end}}' $(1) 2>/dev/null)
|
||||
endef
|
||||
46
deploy/rig-operator/PROJECT
Normal file
46
deploy/rig-operator/PROJECT
Normal file
@@ -0,0 +1,46 @@
|
||||
# Code generated by tool. DO NOT EDIT.
|
||||
# This file is used to track the info used to scaffold your project
|
||||
# and allow the plugins properly work.
|
||||
# More info: https://book.kubebuilder.io/reference/project-config.html
|
||||
cliVersion: 4.10.1
|
||||
domain: appstack.io
|
||||
layout:
|
||||
- go.kubebuilder.io/v4
|
||||
projectName: deploy
|
||||
repo: vanderlande.com/ittp/appstack/rig-operator
|
||||
resources:
|
||||
- api:
|
||||
crdVersion: v1
|
||||
namespaced: true
|
||||
controller: true
|
||||
domain: appstack.io
|
||||
group: rig
|
||||
kind: ClusterBlueprint
|
||||
path: vanderlande.com/ittp/appstack/rig-operator/api/v1alpha1
|
||||
version: v1alpha1
|
||||
- api:
|
||||
crdVersion: v1
|
||||
namespaced: true
|
||||
controller: true
|
||||
domain: appstack.io
|
||||
group: rig
|
||||
kind: InfraBlueprint
|
||||
path: vanderlande.com/ittp/appstack/rig-operator/api/v1alpha1
|
||||
version: v1alpha1
|
||||
- api:
|
||||
crdVersion: v1
|
||||
namespaced: true
|
||||
domain: appstack.io
|
||||
group: rig
|
||||
kind: HarvesterBlueprint
|
||||
path: vanderlande.com/ittp/appstack/rig-operator/api/v1alpha1
|
||||
version: v1alpha1
|
||||
- api:
|
||||
crdVersion: v1
|
||||
namespaced: true
|
||||
domain: appstack.io
|
||||
group: rig
|
||||
kind: VsphereBlueprint
|
||||
path: vanderlande.com/ittp/appstack/rig-operator/api/v1alpha1
|
||||
version: v1alpha1
|
||||
version: "3"
|
||||
141
deploy/rig-operator/README-DEV.md
Normal file
141
deploy/rig-operator/README-DEV.md
Normal file
@@ -0,0 +1,141 @@
|
||||
# RIG Operator (Resource Infrastructure Gateway)
|
||||
|
||||
**RIG Operator** is a Kubernetes operator designed to decouple **Infrastructure Management** (Quotas, Credentials, Providers) from **Cluster Provisioning** (Kubernetes versions, Node sizing).
|
||||
|
||||
It replaces legacy monolithic provisioners with a **Strategy-Pattern Architecture**, allowing a single operator to manage hybrid fleets (Harvester, vSphere, etc.) using a unified API while strictly enforcing resource quotas.
|
||||
|
||||
---
|
||||
|
||||
## 🏗 Architecture & Design
|
||||
|
||||
### The Problem
|
||||
Legacy controllers often mix concerns:
|
||||
* *Hardcoded Providers:* Adding vSphere requires rewriting the main loop.
|
||||
* *No Accounting:* Users can provision infinite clusters until the underlying storage fills up.
|
||||
* *Tight Coupling:* Helm values, RKE2 configs, and VM details are mashed into one huge struct.
|
||||
|
||||
### The RIG Solution
|
||||
RIG segregates responsibilities into three distinct layers, acting as a **Gatekeeper** and **Router**.
|
||||
|
||||
|
||||
|
||||
#### 1. The Data Model (Blueprints)
|
||||
* **`InfraBlueprint` (The Accountant):** Owned by Platform Admins. Defines **Quotas** (CPU/RAM/Disk), **Credentials**, and points to the specific Provider Config. It automatically tracks usage across all child clusters.
|
||||
* **`ClusterBlueprint` (The Request):** Owned by Users. Defines **What** is needed (e.g., "3 nodes, 4 CPU, 16GB RAM") but not **How** it is provided.
|
||||
* **`HarvesterBlueprint` / `VsphereBlueprint` (The Tech Specs):** Holds low-level details (Image names, Networks, VM Namespaces).
|
||||
|
||||
#### 2. The Logic Flow
|
||||
1. **Gatekeeper:** Before doing anything, the Controller checks `Infra.Quota`. If `(Used + Request) > Max`, provisioning is **blocked**.
|
||||
2. **Router:** The Controller reads `Infra.ProviderRef` and dynamically loads the correct **Strategy** (Harvester vs. vSphere).
|
||||
3. **Builder:** A generic `MasterBuilder` combines the Strategy's output with a Base Helm Template to generate the final values.
|
||||
|
||||
---
|
||||
|
||||
## 📂 Project Structure
|
||||
|
||||
| Directory | Role | Description |
|
||||
| :--- | :--- | :--- |
|
||||
| `api/v1alpha1` | **The Contract** | Defines the CRDs (`ibp`, `cbp`, `hbp`). |
|
||||
| `internal/controller` | **The Brain** | `ClusterBlueprint` (Provisioning/Gatekeeping) & `InfraBlueprint` (Accounting). |
|
||||
| `internal/provider` | **The Interface** | Defines the `Strategy` interface that all clouds must obey. |
|
||||
| `internal/provider/harvester`| **The Implementation** | Logic specific to Harvester (Identity minting, NodePool mapping). |
|
||||
| `internal/builder` | **The Assembler** | Merges Strategy output with Helm Templates. Agnostic to the cloud provider. |
|
||||
| `internal/helm` | **The Tool** | Wrapper around the Helm SDK (OCI supported). |
|
||||
| `internal/templates` | **The Defaults** | Embedded YAML files containing default values (CPU/RAM, UserData). |
|
||||
|
||||
---
|
||||
|
||||
## 🚀 Development Workflow
|
||||
|
||||
### Prerequisites
|
||||
* Go 1.22+
|
||||
* Helm v3 (Binary)
|
||||
* Kubernetes Cluster (or local Kind/Minikube)
|
||||
* `kubectl` pointing to your dev cluster
|
||||
|
||||
### 1. Common Commands
|
||||
|
||||
**Initial Setup (Download Dependencies):**
|
||||
```bash
|
||||
go mod tidy
|
||||
|
||||
```
|
||||
|
||||
**Update APIs (CRDs):**
|
||||
*Run this whenever you edit `api/v1alpha1/*.go*`
|
||||
|
||||
```bash
|
||||
make manifests generate
|
||||
|
||||
```
|
||||
|
||||
**Run Locally:**
|
||||
*Runs the controller against your current `~/.kube/config` context.*
|
||||
|
||||
```bash
|
||||
make install run
|
||||
|
||||
```
|
||||
|
||||
### 2. Debugging (VS Code / Delve)
|
||||
|
||||
`make run` is great for logs, but if you need to set breakpoints (e.g., to inspect the Helm Values map before it applies), use the debugger.
|
||||
|
||||
**VS Code `launch.json` Configuration:**
|
||||
|
||||
```json
|
||||
{
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Debug RIG Operator",
|
||||
"type": "go",
|
||||
"request": "launch",
|
||||
"mode": "auto",
|
||||
"program": "${workspaceFolder}/cmd/main.go",
|
||||
"args": [],
|
||||
"env": {
|
||||
"KUBECONFIG": "${userHome}/.kube/config"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
1. Select **"Debug RIG Operator"** from the Run menu.
|
||||
2. Set a breakpoint in `internal/controller/clusterblueprint_controller.go` (e.g., inside the `Reconcile` loop).
|
||||
3. Apply a generic cluster manifest to trigger the breakpoint.
|
||||
|
||||
---
|
||||
|
||||
## 🛠 Maintenance Guide
|
||||
|
||||
### "I need to add a new field..."
|
||||
|
||||
| Scenario | Files to Touch | Command to Run |
|
||||
| --- | --- | --- |
|
||||
| **Add a field to the API** (e.g., `ProxyURL` to Infra) | `api/v1alpha1/infrablueprint_types.go` | `make manifests generate` |
|
||||
| **Update Default CPU/RAM** | `internal/templates/harvester/values.yaml` | `make build` (Recompiles embedded file) |
|
||||
| **Change Harvester UserData logic** | `internal/provider/harvester/strategy.go` | `go test ./...` |
|
||||
| **Add a new Cloud Provider (e.g. AWS)** | 1. Create `api/.../awsblueprint_types.go`<br>
|
||||
|
||||
<br>2. Create `internal/provider/aws/strategy.go`<br>
|
||||
|
||||
<br>3. Update `controller` switch case. | `make manifests generate` |
|
||||
|
||||
### "The Quota isn't updating!"
|
||||
|
||||
Remember that the **InfraController** is responsible for math. It watches `ClusterBlueprint` events.
|
||||
|
||||
1. Check logs: `kubectl logs -l control-plane=controller-manager`
|
||||
2. Ensure your Cluster actually points to the correct Infra name (`spec.infraBlueprintRef`).
|
||||
|
||||
---
|
||||
|
||||
## 📊 Documentation & Diagrams
|
||||
|
||||
Visual flows (Mermaid) are available in the `docs/` folder:
|
||||
|
||||
* `docs/flow-diagram.svg`: High-level Request Flow.
|
||||
* `docs/controllerflow.mermaid`: Detailed Controller logic.
|
||||
135
deploy/rig-operator/README.md
Normal file
135
deploy/rig-operator/README.md
Normal file
@@ -0,0 +1,135 @@
|
||||
# deploy
|
||||
// TODO(user): Add simple overview of use/purpose
|
||||
|
||||
## Description
|
||||
// TODO(user): An in-depth paragraph about your project and overview of use
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Prerequisites
|
||||
- go version v1.24.6+
|
||||
- docker version 17.03+.
|
||||
- kubectl version v1.11.3+.
|
||||
- Access to a Kubernetes v1.11.3+ cluster.
|
||||
|
||||
### To Deploy on the cluster
|
||||
**Build and push your image to the location specified by `IMG`:**
|
||||
|
||||
```sh
|
||||
make docker-build docker-push IMG=<some-registry>/deploy:tag
|
||||
```
|
||||
|
||||
**NOTE:** This image ought to be published in the personal registry you specified.
|
||||
And it is required to have access to pull the image from the working environment.
|
||||
Make sure you have the proper permission to the registry if the above commands don’t work.
|
||||
|
||||
**Install the CRDs into the cluster:**
|
||||
|
||||
```sh
|
||||
make install
|
||||
```
|
||||
|
||||
**Deploy the Manager to the cluster with the image specified by `IMG`:**
|
||||
|
||||
```sh
|
||||
make deploy IMG=<some-registry>/deploy:tag
|
||||
```
|
||||
|
||||
> **NOTE**: If you encounter RBAC errors, you may need to grant yourself cluster-admin
|
||||
privileges or be logged in as admin.
|
||||
|
||||
**Create instances of your solution**
|
||||
You can apply the samples (examples) from the config/sample:
|
||||
|
||||
```sh
|
||||
kubectl apply -k config/samples/
|
||||
```
|
||||
|
||||
>**NOTE**: Ensure that the samples has default values to test it out.
|
||||
|
||||
### To Uninstall
|
||||
**Delete the instances (CRs) from the cluster:**
|
||||
|
||||
```sh
|
||||
kubectl delete -k config/samples/
|
||||
```
|
||||
|
||||
**Delete the APIs(CRDs) from the cluster:**
|
||||
|
||||
```sh
|
||||
make uninstall
|
||||
```
|
||||
|
||||
**UnDeploy the controller from the cluster:**
|
||||
|
||||
```sh
|
||||
make undeploy
|
||||
```
|
||||
|
||||
## Project Distribution
|
||||
|
||||
Following the options to release and provide this solution to the users.
|
||||
|
||||
### By providing a bundle with all YAML files
|
||||
|
||||
1. Build the installer for the image built and published in the registry:
|
||||
|
||||
```sh
|
||||
make build-installer IMG=<some-registry>/deploy:tag
|
||||
```
|
||||
|
||||
**NOTE:** The makefile target mentioned above generates an 'install.yaml'
|
||||
file in the dist directory. This file contains all the resources built
|
||||
with Kustomize, which are necessary to install this project without its
|
||||
dependencies.
|
||||
|
||||
2. Using the installer
|
||||
|
||||
Users can just run 'kubectl apply -f <URL for YAML BUNDLE>' to install
|
||||
the project, i.e.:
|
||||
|
||||
```sh
|
||||
kubectl apply -f https://raw.githubusercontent.com/<org>/deploy/<tag or branch>/dist/install.yaml
|
||||
```
|
||||
|
||||
### By providing a Helm Chart
|
||||
|
||||
1. Build the chart using the optional helm plugin
|
||||
|
||||
```sh
|
||||
kubebuilder edit --plugins=helm/v2-alpha
|
||||
```
|
||||
|
||||
2. See that a chart was generated under 'dist/chart', and users
|
||||
can obtain this solution from there.
|
||||
|
||||
**NOTE:** If you change the project, you need to update the Helm Chart
|
||||
using the same command above to sync the latest changes. Furthermore,
|
||||
if you create webhooks, you need to use the above command with
|
||||
the '--force' flag and manually ensure that any custom configuration
|
||||
previously added to 'dist/chart/values.yaml' or 'dist/chart/manager/manager.yaml'
|
||||
is manually re-applied afterwards.
|
||||
|
||||
## Contributing
|
||||
// TODO(user): Add detailed information on how you would like others to contribute to this project
|
||||
|
||||
**NOTE:** Run `make help` for more information on all potential `make` targets
|
||||
|
||||
More information can be found via the [Kubebuilder Documentation](https://book.kubebuilder.io/introduction.html)
|
||||
|
||||
## License
|
||||
|
||||
Copyright 2026.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
101
deploy/rig-operator/api/v1alpha1/clusterblueprint_types.go
Normal file
101
deploy/rig-operator/api/v1alpha1/clusterblueprint_types.go
Normal file
@@ -0,0 +1,101 @@
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// GenericPoolReq defines a request for a set of nodes with specific sizing.
|
||||
// This is provider-agnostic.
|
||||
type GenericPoolReq struct {
|
||||
// Name is the identifier for this node pool (e.g. "workers-gpu").
|
||||
// +required
|
||||
Name string `json:"name"`
|
||||
|
||||
// Quantity is the number of nodes desired.
|
||||
// +required
|
||||
// +kubebuilder:validation:Minimum=0
|
||||
Quantity int `json:"quantity"`
|
||||
|
||||
// CpuCores is the number of vCPUs per node.
|
||||
// +required
|
||||
// +kubebuilder:validation:Minimum=1
|
||||
CpuCores int `json:"cpuCores"`
|
||||
|
||||
// MemoryGB is the amount of RAM per node in Gigabytes.
|
||||
// +required
|
||||
// +kubebuilder:validation:Minimum=1
|
||||
MemoryGB int `json:"memoryGb"`
|
||||
|
||||
// DiskGB is the root disk size per node in Gigabytes.
|
||||
// +required
|
||||
// +kubebuilder:validation:Minimum=10
|
||||
DiskGB int `json:"diskGb"`
|
||||
}
|
||||
|
||||
// ClusterBlueprintSpec defines the desired state of ClusterBlueprint
|
||||
type ClusterBlueprintSpec struct {
|
||||
// InfraBlueprintRef points to the InfraBlueprint (IBP) that manages
|
||||
// the quotas and provider details for this cluster.
|
||||
// +required
|
||||
InfraBlueprintRef string `json:"infraBlueprintRef"`
|
||||
|
||||
// KubernetesVersion is the target RKE2/K3s version (e.g., v1.28.0+rke2r1).
|
||||
// +required
|
||||
KubernetesVersion string `json:"kubernetesVersion"`
|
||||
|
||||
// ControlPlaneHA determines if we provision 3 CP nodes (true) or 1 (false).
|
||||
// +optional
|
||||
ControlPlaneHA bool `json:"controlPlaneHA"`
|
||||
|
||||
// WorkerPools is the list of worker node groups to provision.
|
||||
// +optional
|
||||
WorkerPools []GenericPoolReq `json:"workerPools,omitempty"`
|
||||
}
|
||||
|
||||
// IdentityStatus tracks the generated cloud provider identity
|
||||
type IdentityStatus struct {
|
||||
// SecretRef is the name of the generated secret used by this cluster.
|
||||
SecretRef string `json:"secretRef,omitempty"`
|
||||
|
||||
// ServiceAccount is the name of the SA created on the provider (if applicable).
|
||||
ServiceAccount string `json:"serviceAccount,omitempty"`
|
||||
}
|
||||
|
||||
// ClusterBlueprintStatus defines the observed state of ClusterBlueprint
|
||||
type ClusterBlueprintStatus struct {
|
||||
// Ready indicates if the Helm Chart has been successfully applied.
|
||||
Ready bool `json:"ready"`
|
||||
|
||||
// Identity tracks the cloud credentials generated for this cluster.
|
||||
// +optional
|
||||
Identity *IdentityStatus `json:"identity,omitempty"`
|
||||
|
||||
// Phase can be "Pending", "Provisioning", "Deployed", or "Failed"
|
||||
// +optional
|
||||
Phase string `json:"phase,omitempty"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:resource:shortName=cbp
|
||||
// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase"
|
||||
// +kubebuilder:printcolumn:name="K8s Version",type="string",JSONPath=".spec.kubernetesVersion"
|
||||
// +kubebuilder:printcolumn:name="Infra",type="string",JSONPath=".spec.infraBlueprintRef"
|
||||
type ClusterBlueprint struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec ClusterBlueprintSpec `json:"spec,omitempty"`
|
||||
Status ClusterBlueprintStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
type ClusterBlueprintList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []ClusterBlueprint `json:"items"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(&ClusterBlueprint{}, &ClusterBlueprintList{})
|
||||
}
|
||||
36
deploy/rig-operator/api/v1alpha1/groupversion_info.go
Normal file
36
deploy/rig-operator/api/v1alpha1/groupversion_info.go
Normal file
@@ -0,0 +1,36 @@
|
||||
/*
|
||||
Copyright 2026.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package v1alpha1 contains API Schema definitions for the rig v1alpha1 API group.
|
||||
// +kubebuilder:object:generate=true
|
||||
// +groupName=rig.appstack.io
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"sigs.k8s.io/controller-runtime/pkg/scheme"
|
||||
)
|
||||
|
||||
var (
|
||||
// GroupVersion is group version used to register these objects.
|
||||
GroupVersion = schema.GroupVersion{Group: "rig.appstack.io", Version: "v1alpha1"}
|
||||
|
||||
// SchemeBuilder is used to add go types to the GroupVersionKind scheme.
|
||||
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
|
||||
|
||||
// AddToScheme adds the types in this group-version to the given scheme.
|
||||
AddToScheme = SchemeBuilder.AddToScheme
|
||||
)
|
||||
59
deploy/rig-operator/api/v1alpha1/harvesterblueprint_types.go
Normal file
59
deploy/rig-operator/api/v1alpha1/harvesterblueprint_types.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// HarvesterBlueprintSpec defines the desired state of HarvesterBlueprint
|
||||
type HarvesterBlueprintSpec struct {
|
||||
// HarvesterURL is the endpoint of the Harvester cluster (e.g. https://10.x.x.x:6443).
|
||||
// This replaces the need for auto-discovery.
|
||||
// +required
|
||||
HarvesterURL string `json:"harvesterUrl"`
|
||||
|
||||
// VmNamespace is the namespace in Harvester where VMs will be created.
|
||||
// +required
|
||||
VmNamespace string `json:"vmNamespace"`
|
||||
|
||||
// ImageName is the specific image name in Harvester to clone (e.g. image-abcde).
|
||||
// +required
|
||||
ImageName string `json:"imageName"`
|
||||
|
||||
// NetworkName is the VM Network to attach to the nodes.
|
||||
// +required
|
||||
NetworkName string `json:"networkName"`
|
||||
|
||||
// SshUser is the username to configure on the VM (e.g. ubuntu, rancher).
|
||||
// +required
|
||||
SshUser string `json:"sshUser"`
|
||||
}
|
||||
|
||||
// HarvesterBlueprintStatus defines the observed state of HarvesterBlueprint
|
||||
type HarvesterBlueprintStatus struct {
|
||||
// Ready indicates the configuration is valid (optional future use)
|
||||
Ready bool `json:"ready,omitempty"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:resource:shortName=hbp
|
||||
type HarvesterBlueprint struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec HarvesterBlueprintSpec `json:"spec,omitempty"`
|
||||
Status HarvesterBlueprintStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
|
||||
// HarvesterBlueprintList contains a list of HarvesterBlueprint
|
||||
type HarvesterBlueprintList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []HarvesterBlueprint `json:"items"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(&HarvesterBlueprint{}, &HarvesterBlueprintList{})
|
||||
}
|
||||
112
deploy/rig-operator/api/v1alpha1/infrablueprint_types.go
Normal file
112
deploy/rig-operator/api/v1alpha1/infrablueprint_types.go
Normal file
@@ -0,0 +1,112 @@
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// InfraQuota defines the resource limits for this infrastructure account
|
||||
type InfraQuota struct {
|
||||
// MaxCPU is the total number of cores allowed across all clusters
|
||||
// +optional
|
||||
MaxCPU int `json:"maxCpu,omitempty"`
|
||||
|
||||
// MaxMemoryGB is the total RAM (in GB) allowed across all clusters
|
||||
// +optional
|
||||
MaxMemoryGB int `json:"maxMemoryGb,omitempty"`
|
||||
|
||||
// MaxDiskGB is the total Storage (in GB) allowed across all clusters
|
||||
// +optional
|
||||
MaxDiskGB int `json:"maxDiskGb,omitempty"`
|
||||
}
|
||||
|
||||
// InfraQuotaStatus tracks current usage
|
||||
type InfraQuotaStatus struct {
|
||||
// UsedCPU is the sum of cores currently provisioned
|
||||
UsedCPU int `json:"usedCpu"`
|
||||
|
||||
// UsedMemoryGB is the sum of RAM currently provisioned
|
||||
UsedMemoryGB int `json:"usedMemoryGb"`
|
||||
|
||||
// UsedDiskGB tracks storage consumption
|
||||
UsedDiskGB int `json:"usedDiskGb"`
|
||||
}
|
||||
|
||||
// ProviderRef points to the specific provider configuration (HBP or VBP)
|
||||
type ProviderRef struct {
|
||||
// Kind is the type of resource being referenced (e.g., HarvesterBlueprint)
|
||||
// +required
|
||||
Kind string `json:"kind"`
|
||||
|
||||
// Name is the name of resource being referenced
|
||||
// +required
|
||||
Name string `json:"name"`
|
||||
|
||||
// APIGroup defaults to rig.appstack.io if not specified
|
||||
// +optional
|
||||
APIGroup string `json:"apiGroup,omitempty"`
|
||||
}
|
||||
|
||||
// InfraBlueprintSpec defines the desired state of InfraBlueprint
|
||||
type InfraBlueprintSpec struct {
|
||||
// RancherURL is the public URL of the Rancher Manager (e.g. https://rancher.example.com)
|
||||
// This is injected into the Helm Chart to register the cluster.
|
||||
// +required
|
||||
RancherURL string `json:"rancherUrl"`
|
||||
|
||||
// CloudCredentialSecret is the name of the Secret containing the
|
||||
// master cloud credentials (e.g., kubeconfig or username/password).
|
||||
// +required
|
||||
CloudCredentialSecret string `json:"cloudCredentialSecret"`
|
||||
|
||||
// ProviderRef points to the technical configuration (HarvesterBlueprint/VsphereBlueprint).
|
||||
// +required
|
||||
ProviderRef ProviderRef `json:"providerRef"`
|
||||
|
||||
// Quota defines the maximum resources allocatable by this Infra.
|
||||
// +optional
|
||||
Quota InfraQuota `json:"quota,omitempty"`
|
||||
|
||||
// UserData is the default cloud-init user data for all clusters in this Infra.
|
||||
// +optional
|
||||
UserData string `json:"userData,omitempty"`
|
||||
}
|
||||
|
||||
// InfraBlueprintStatus defines the observed state of InfraBlueprint
|
||||
type InfraBlueprintStatus struct {
|
||||
// Ready indicates the provider connection is verified
|
||||
Ready bool `json:"ready,omitempty"`
|
||||
|
||||
// Usage tracks the current resource consumption
|
||||
Usage InfraQuotaStatus `json:"usage,omitempty"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:resource:shortName=ibp
|
||||
// +kubebuilder:printcolumn:name="Ready",type="boolean",JSONPath=".status.ready"
|
||||
// +kubebuilder:printcolumn:name="MaxCPU",type="integer",JSONPath=".spec.quota.maxCpu"
|
||||
// +kubebuilder:printcolumn:name="UsedCPU",type="integer",JSONPath=".status.usage.usedCpu"
|
||||
// +kubebuilder:printcolumn:name="MaxMem(GB)",type="integer",JSONPath=".spec.quota.maxMemoryGb"
|
||||
// +kubebuilder:printcolumn:name="UsedMem(GB)",type="integer",JSONPath=".status.usage.usedMemoryGb"
|
||||
// +kubebuilder:printcolumn:name="MaxDisk(GB)",type="integer",JSONPath=".spec.quota.maxDiskGb"
|
||||
// +kubebuilder:printcolumn:name="UsedDisk(GB)",type="integer",JSONPath=".status.usage.usedDiskGb"
|
||||
type InfraBlueprint struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec InfraBlueprintSpec `json:"spec,omitempty"`
|
||||
Status InfraBlueprintStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
|
||||
// InfraBlueprintList contains a list of InfraBlueprint
|
||||
type InfraBlueprintList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []InfraBlueprint `json:"items"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(&InfraBlueprint{}, &InfraBlueprintList{})
|
||||
}
|
||||
67
deploy/rig-operator/api/v1alpha1/vsphereblueprint_types.go
Normal file
67
deploy/rig-operator/api/v1alpha1/vsphereblueprint_types.go
Normal file
@@ -0,0 +1,67 @@
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// VsphereBlueprintSpec defines the desired state of VsphereBlueprint
|
||||
type VsphereBlueprintSpec struct {
|
||||
// vCenter address (e.g. vcenter.example.com)
|
||||
// +required
|
||||
VCenter string `json:"vCenter"`
|
||||
|
||||
// Datacenter name (e.g. NL001)
|
||||
// +required
|
||||
Datacenter string `json:"datacenter"`
|
||||
|
||||
// Folder path where VMs will be organized (e.g. "ICT Digitalisation - Rancher")
|
||||
// +required
|
||||
Folder string `json:"folder"`
|
||||
|
||||
// ResourcePool path (e.g. "NL001 Development - Rancher/Resources")
|
||||
// +required
|
||||
ResourcePool string `json:"resourcePool"`
|
||||
|
||||
// DatastoreCluster or Datastore name (e.g. "NL001 Development - Rancher SDRS")
|
||||
// +required
|
||||
Datastore string `json:"datastore"`
|
||||
|
||||
// Network name to attach to (e.g. "nl001.vDS.Distri.Vlan.1542")
|
||||
// +required
|
||||
Network string `json:"network"`
|
||||
|
||||
// Template is the VM template name to clone from
|
||||
// +required
|
||||
Template string `json:"template"`
|
||||
}
|
||||
|
||||
// VsphereBlueprintStatus defines the observed state
|
||||
type VsphereBlueprintStatus struct {
|
||||
Ready bool `json:"ready,omitempty"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:resource:shortName=vbp
|
||||
|
||||
// VsphereBlueprint is the Schema for the vsphereblueprints API
|
||||
type VsphereBlueprint struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec VsphereBlueprintSpec `json:"spec,omitempty"`
|
||||
Status VsphereBlueprintStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
|
||||
// VsphereBlueprintList contains a list of VsphereBlueprint
|
||||
type VsphereBlueprintList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []VsphereBlueprint `json:"items"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(&VsphereBlueprint{}, &VsphereBlueprintList{})
|
||||
}
|
||||
469
deploy/rig-operator/api/v1alpha1/zz_generated.deepcopy.go
Normal file
469
deploy/rig-operator/api/v1alpha1/zz_generated.deepcopy.go
Normal file
@@ -0,0 +1,469 @@
|
||||
//go:build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2026.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by controller-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ClusterBlueprint) DeepCopyInto(out *ClusterBlueprint) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterBlueprint.
|
||||
func (in *ClusterBlueprint) DeepCopy() *ClusterBlueprint {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ClusterBlueprint)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ClusterBlueprint) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ClusterBlueprintList) DeepCopyInto(out *ClusterBlueprintList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]ClusterBlueprint, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterBlueprintList.
|
||||
func (in *ClusterBlueprintList) DeepCopy() *ClusterBlueprintList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ClusterBlueprintList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ClusterBlueprintList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ClusterBlueprintSpec) DeepCopyInto(out *ClusterBlueprintSpec) {
|
||||
*out = *in
|
||||
if in.WorkerPools != nil {
|
||||
in, out := &in.WorkerPools, &out.WorkerPools
|
||||
*out = make([]GenericPoolReq, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterBlueprintSpec.
|
||||
func (in *ClusterBlueprintSpec) DeepCopy() *ClusterBlueprintSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ClusterBlueprintSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ClusterBlueprintStatus) DeepCopyInto(out *ClusterBlueprintStatus) {
|
||||
*out = *in
|
||||
if in.Identity != nil {
|
||||
in, out := &in.Identity, &out.Identity
|
||||
*out = new(IdentityStatus)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterBlueprintStatus.
|
||||
func (in *ClusterBlueprintStatus) DeepCopy() *ClusterBlueprintStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ClusterBlueprintStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *GenericPoolReq) DeepCopyInto(out *GenericPoolReq) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericPoolReq.
|
||||
func (in *GenericPoolReq) DeepCopy() *GenericPoolReq {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(GenericPoolReq)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *HarvesterBlueprint) DeepCopyInto(out *HarvesterBlueprint) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
out.Spec = in.Spec
|
||||
out.Status = in.Status
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HarvesterBlueprint.
|
||||
func (in *HarvesterBlueprint) DeepCopy() *HarvesterBlueprint {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(HarvesterBlueprint)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *HarvesterBlueprint) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *HarvesterBlueprintList) DeepCopyInto(out *HarvesterBlueprintList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]HarvesterBlueprint, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HarvesterBlueprintList.
|
||||
func (in *HarvesterBlueprintList) DeepCopy() *HarvesterBlueprintList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(HarvesterBlueprintList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *HarvesterBlueprintList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *HarvesterBlueprintSpec) DeepCopyInto(out *HarvesterBlueprintSpec) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HarvesterBlueprintSpec.
|
||||
func (in *HarvesterBlueprintSpec) DeepCopy() *HarvesterBlueprintSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(HarvesterBlueprintSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *HarvesterBlueprintStatus) DeepCopyInto(out *HarvesterBlueprintStatus) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HarvesterBlueprintStatus.
|
||||
func (in *HarvesterBlueprintStatus) DeepCopy() *HarvesterBlueprintStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(HarvesterBlueprintStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *IdentityStatus) DeepCopyInto(out *IdentityStatus) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityStatus.
|
||||
func (in *IdentityStatus) DeepCopy() *IdentityStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(IdentityStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *InfraBlueprint) DeepCopyInto(out *InfraBlueprint) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
out.Spec = in.Spec
|
||||
out.Status = in.Status
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfraBlueprint.
|
||||
func (in *InfraBlueprint) DeepCopy() *InfraBlueprint {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(InfraBlueprint)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *InfraBlueprint) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *InfraBlueprintList) DeepCopyInto(out *InfraBlueprintList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]InfraBlueprint, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfraBlueprintList.
|
||||
func (in *InfraBlueprintList) DeepCopy() *InfraBlueprintList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(InfraBlueprintList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *InfraBlueprintList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *InfraBlueprintSpec) DeepCopyInto(out *InfraBlueprintSpec) {
|
||||
*out = *in
|
||||
out.ProviderRef = in.ProviderRef
|
||||
out.Quota = in.Quota
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfraBlueprintSpec.
|
||||
func (in *InfraBlueprintSpec) DeepCopy() *InfraBlueprintSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(InfraBlueprintSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *InfraBlueprintStatus) DeepCopyInto(out *InfraBlueprintStatus) {
|
||||
*out = *in
|
||||
out.Usage = in.Usage
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfraBlueprintStatus.
|
||||
func (in *InfraBlueprintStatus) DeepCopy() *InfraBlueprintStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(InfraBlueprintStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *InfraQuota) DeepCopyInto(out *InfraQuota) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfraQuota.
|
||||
func (in *InfraQuota) DeepCopy() *InfraQuota {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(InfraQuota)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *InfraQuotaStatus) DeepCopyInto(out *InfraQuotaStatus) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfraQuotaStatus.
|
||||
func (in *InfraQuotaStatus) DeepCopy() *InfraQuotaStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(InfraQuotaStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ProviderRef) DeepCopyInto(out *ProviderRef) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderRef.
|
||||
func (in *ProviderRef) DeepCopy() *ProviderRef {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ProviderRef)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *VsphereBlueprint) DeepCopyInto(out *VsphereBlueprint) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
out.Spec = in.Spec
|
||||
out.Status = in.Status
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VsphereBlueprint.
|
||||
func (in *VsphereBlueprint) DeepCopy() *VsphereBlueprint {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(VsphereBlueprint)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *VsphereBlueprint) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *VsphereBlueprintList) DeepCopyInto(out *VsphereBlueprintList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]VsphereBlueprint, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VsphereBlueprintList.
|
||||
func (in *VsphereBlueprintList) DeepCopy() *VsphereBlueprintList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(VsphereBlueprintList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *VsphereBlueprintList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *VsphereBlueprintSpec) DeepCopyInto(out *VsphereBlueprintSpec) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VsphereBlueprintSpec.
|
||||
func (in *VsphereBlueprintSpec) DeepCopy() *VsphereBlueprintSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(VsphereBlueprintSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *VsphereBlueprintStatus) DeepCopyInto(out *VsphereBlueprintStatus) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VsphereBlueprintStatus.
|
||||
func (in *VsphereBlueprintStatus) DeepCopy() *VsphereBlueprintStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(VsphereBlueprintStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
213
deploy/rig-operator/cmd/main.go
Normal file
213
deploy/rig-operator/cmd/main.go
Normal file
@@ -0,0 +1,213 @@
|
||||
/*
|
||||
Copyright 2026.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"flag"
|
||||
"os"
|
||||
|
||||
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
|
||||
// to ensure that exec-entrypoint and run can make use of them.
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/healthz"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
"sigs.k8s.io/controller-runtime/pkg/metrics/filters"
|
||||
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||
|
||||
rigv1alpha1 "vanderlande.com/ittp/appstack/rig-operator/api/v1alpha1"
|
||||
"vanderlande.com/ittp/appstack/rig-operator/internal/controller"
|
||||
// +kubebuilder:scaffold:imports
|
||||
)
|
||||
|
||||
var (
|
||||
scheme = runtime.NewScheme()
|
||||
setupLog = ctrl.Log.WithName("setup")
|
||||
)
|
||||
|
||||
func init() {
|
||||
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
|
||||
|
||||
utilruntime.Must(rigv1alpha1.AddToScheme(scheme))
|
||||
// +kubebuilder:scaffold:scheme
|
||||
}
|
||||
|
||||
// nolint:gocyclo
|
||||
func main() {
|
||||
var metricsAddr string
|
||||
var metricsCertPath, metricsCertName, metricsCertKey string
|
||||
var webhookCertPath, webhookCertName, webhookCertKey string
|
||||
var enableLeaderElection bool
|
||||
var probeAddr string
|
||||
var secureMetrics bool
|
||||
var enableHTTP2 bool
|
||||
var tlsOpts []func(*tls.Config)
|
||||
flag.StringVar(&metricsAddr, "metrics-bind-address", "0", "The address the metrics endpoint binds to. "+
|
||||
"Use :8443 for HTTPS or :8080 for HTTP, or leave as 0 to disable the metrics service.")
|
||||
flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
|
||||
flag.BoolVar(&enableLeaderElection, "leader-elect", false,
|
||||
"Enable leader election for controller manager. "+
|
||||
"Enabling this will ensure there is only one active controller manager.")
|
||||
flag.BoolVar(&secureMetrics, "metrics-secure", true,
|
||||
"If set, the metrics endpoint is served securely via HTTPS. Use --metrics-secure=false to use HTTP instead.")
|
||||
flag.StringVar(&webhookCertPath, "webhook-cert-path", "", "The directory that contains the webhook certificate.")
|
||||
flag.StringVar(&webhookCertName, "webhook-cert-name", "tls.crt", "The name of the webhook certificate file.")
|
||||
flag.StringVar(&webhookCertKey, "webhook-cert-key", "tls.key", "The name of the webhook key file.")
|
||||
flag.StringVar(&metricsCertPath, "metrics-cert-path", "",
|
||||
"The directory that contains the metrics server certificate.")
|
||||
flag.StringVar(&metricsCertName, "metrics-cert-name", "tls.crt", "The name of the metrics server certificate file.")
|
||||
flag.StringVar(&metricsCertKey, "metrics-cert-key", "tls.key", "The name of the metrics server key file.")
|
||||
flag.BoolVar(&enableHTTP2, "enable-http2", false,
|
||||
"If set, HTTP/2 will be enabled for the metrics and webhook servers")
|
||||
opts := zap.Options{
|
||||
Development: true,
|
||||
}
|
||||
opts.BindFlags(flag.CommandLine)
|
||||
flag.Parse()
|
||||
|
||||
ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts)))
|
||||
|
||||
// if the enable-http2 flag is false (the default), http/2 should be disabled
|
||||
// due to its vulnerabilities. More specifically, disabling http/2 will
|
||||
// prevent from being vulnerable to the HTTP/2 Stream Cancellation and
|
||||
// Rapid Reset CVEs. For more information see:
|
||||
// - https://github.com/advisories/GHSA-qppj-fm5r-hxr3
|
||||
// - https://github.com/advisories/GHSA-4374-p667-p6c8
|
||||
disableHTTP2 := func(c *tls.Config) {
|
||||
setupLog.Info("disabling http/2")
|
||||
c.NextProtos = []string{"http/1.1"}
|
||||
}
|
||||
|
||||
if !enableHTTP2 {
|
||||
tlsOpts = append(tlsOpts, disableHTTP2)
|
||||
}
|
||||
|
||||
// Initial webhook TLS options
|
||||
webhookTLSOpts := tlsOpts
|
||||
webhookServerOptions := webhook.Options{
|
||||
TLSOpts: webhookTLSOpts,
|
||||
}
|
||||
|
||||
if len(webhookCertPath) > 0 {
|
||||
setupLog.Info("Initializing webhook certificate watcher using provided certificates",
|
||||
"webhook-cert-path", webhookCertPath, "webhook-cert-name", webhookCertName, "webhook-cert-key", webhookCertKey)
|
||||
|
||||
webhookServerOptions.CertDir = webhookCertPath
|
||||
webhookServerOptions.CertName = webhookCertName
|
||||
webhookServerOptions.KeyName = webhookCertKey
|
||||
}
|
||||
|
||||
webhookServer := webhook.NewServer(webhookServerOptions)
|
||||
|
||||
// Metrics endpoint is enabled in 'config/default/kustomization.yaml'. The Metrics options configure the server.
|
||||
// More info:
|
||||
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.22.4/pkg/metrics/server
|
||||
// - https://book.kubebuilder.io/reference/metrics.html
|
||||
metricsServerOptions := metricsserver.Options{
|
||||
BindAddress: metricsAddr,
|
||||
SecureServing: secureMetrics,
|
||||
TLSOpts: tlsOpts,
|
||||
}
|
||||
|
||||
if secureMetrics {
|
||||
// FilterProvider is used to protect the metrics endpoint with authn/authz.
|
||||
// These configurations ensure that only authorized users and service accounts
|
||||
// can access the metrics endpoint. The RBAC are configured in 'config/rbac/kustomization.yaml'. More info:
|
||||
// https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.22.4/pkg/metrics/filters#WithAuthenticationAndAuthorization
|
||||
metricsServerOptions.FilterProvider = filters.WithAuthenticationAndAuthorization
|
||||
}
|
||||
|
||||
// If the certificate is not specified, controller-runtime will automatically
|
||||
// generate self-signed certificates for the metrics server. While convenient for development and testing,
|
||||
// this setup is not recommended for production.
|
||||
//
|
||||
// TODO(user): If you enable certManager, uncomment the following lines:
|
||||
// - [METRICS-WITH-CERTS] at config/default/kustomization.yaml to generate and use certificates
|
||||
// managed by cert-manager for the metrics server.
|
||||
// - [PROMETHEUS-WITH-CERTS] at config/prometheus/kustomization.yaml for TLS certification.
|
||||
if len(metricsCertPath) > 0 {
|
||||
setupLog.Info("Initializing metrics certificate watcher using provided certificates",
|
||||
"metrics-cert-path", metricsCertPath, "metrics-cert-name", metricsCertName, "metrics-cert-key", metricsCertKey)
|
||||
|
||||
metricsServerOptions.CertDir = metricsCertPath
|
||||
metricsServerOptions.CertName = metricsCertName
|
||||
metricsServerOptions.KeyName = metricsCertKey
|
||||
}
|
||||
|
||||
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
|
||||
Scheme: scheme,
|
||||
Metrics: metricsServerOptions,
|
||||
WebhookServer: webhookServer,
|
||||
HealthProbeBindAddress: probeAddr,
|
||||
LeaderElection: enableLeaderElection,
|
||||
LeaderElectionID: "47b7cef0.appstack.io",
|
||||
// LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily
|
||||
// when the Manager ends. This requires the binary to immediately end when the
|
||||
// Manager is stopped, otherwise, this setting is unsafe. Setting this significantly
|
||||
// speeds up voluntary leader transitions as the new leader don't have to wait
|
||||
// LeaseDuration time first.
|
||||
//
|
||||
// In the default scaffold provided, the program ends immediately after
|
||||
// the manager stops, so would be fine to enable this option. However,
|
||||
// if you are doing or is intended to do any operation such as perform cleanups
|
||||
// after the manager stops then its usage might be unsafe.
|
||||
// LeaderElectionReleaseOnCancel: true,
|
||||
})
|
||||
if err != nil {
|
||||
setupLog.Error(err, "unable to start manager")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err := (&controller.ClusterBlueprintReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
// [IMPORTANT] Add this line to enable event broadcasting
|
||||
Recorder: mgr.GetEventRecorderFor("rig-operator"),
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "ClusterBlueprint")
|
||||
os.Exit(1)
|
||||
}
|
||||
if err := (&controller.InfraBlueprintReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "InfraBlueprint")
|
||||
os.Exit(1)
|
||||
}
|
||||
// +kubebuilder:scaffold:builder
|
||||
|
||||
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
|
||||
setupLog.Error(err, "unable to set up health check")
|
||||
os.Exit(1)
|
||||
}
|
||||
if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil {
|
||||
setupLog.Error(err, "unable to set up ready check")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
setupLog.Info("starting manager")
|
||||
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
|
||||
setupLog.Error(err, "problem running manager")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,136 @@
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.19.0
|
||||
name: clusterblueprints.rig.appstack.io
|
||||
spec:
|
||||
group: rig.appstack.io
|
||||
names:
|
||||
kind: ClusterBlueprint
|
||||
listKind: ClusterBlueprintList
|
||||
plural: clusterblueprints
|
||||
shortNames:
|
||||
- cbp
|
||||
singular: clusterblueprint
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- additionalPrinterColumns:
|
||||
- jsonPath: .status.phase
|
||||
name: Phase
|
||||
type: string
|
||||
- jsonPath: .spec.kubernetesVersion
|
||||
name: K8s Version
|
||||
type: string
|
||||
- jsonPath: .spec.infraBlueprintRef
|
||||
name: Infra
|
||||
type: string
|
||||
name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: ClusterBlueprintSpec defines the desired state of ClusterBlueprint
|
||||
properties:
|
||||
controlPlaneHA:
|
||||
description: ControlPlaneHA determines if we provision 3 CP nodes
|
||||
(true) or 1 (false).
|
||||
type: boolean
|
||||
infraBlueprintRef:
|
||||
description: |-
|
||||
InfraBlueprintRef points to the InfraBlueprint (IBP) that manages
|
||||
the quotas and provider details for this cluster.
|
||||
type: string
|
||||
kubernetesVersion:
|
||||
description: KubernetesVersion is the target RKE2/K3s version (e.g.,
|
||||
v1.28.0+rke2r1).
|
||||
type: string
|
||||
workerPools:
|
||||
description: WorkerPools is the list of worker node groups to provision.
|
||||
items:
|
||||
description: |-
|
||||
GenericPoolReq defines a request for a set of nodes with specific sizing.
|
||||
This is provider-agnostic.
|
||||
properties:
|
||||
cpuCores:
|
||||
description: CpuCores is the number of vCPUs per node.
|
||||
minimum: 1
|
||||
type: integer
|
||||
diskGb:
|
||||
description: DiskGB is the root disk size per node in Gigabytes.
|
||||
minimum: 10
|
||||
type: integer
|
||||
memoryGb:
|
||||
description: MemoryGB is the amount of RAM per node in Gigabytes.
|
||||
minimum: 1
|
||||
type: integer
|
||||
name:
|
||||
description: Name is the identifier for this node pool (e.g.
|
||||
"workers-gpu").
|
||||
type: string
|
||||
quantity:
|
||||
description: Quantity is the number of nodes desired.
|
||||
minimum: 0
|
||||
type: integer
|
||||
required:
|
||||
- cpuCores
|
||||
- diskGb
|
||||
- memoryGb
|
||||
- name
|
||||
- quantity
|
||||
type: object
|
||||
type: array
|
||||
required:
|
||||
- infraBlueprintRef
|
||||
- kubernetesVersion
|
||||
type: object
|
||||
status:
|
||||
description: ClusterBlueprintStatus defines the observed state of ClusterBlueprint
|
||||
properties:
|
||||
identity:
|
||||
description: Identity tracks the cloud credentials generated for this
|
||||
cluster.
|
||||
properties:
|
||||
secretRef:
|
||||
description: SecretRef is the name of the generated secret used
|
||||
by this cluster.
|
||||
type: string
|
||||
serviceAccount:
|
||||
description: ServiceAccount is the name of the SA created on the
|
||||
provider (if applicable).
|
||||
type: string
|
||||
type: object
|
||||
phase:
|
||||
description: Phase can be "Pending", "Provisioning", "Deployed", or
|
||||
"Failed"
|
||||
type: string
|
||||
ready:
|
||||
description: Ready indicates if the Helm Chart has been successfully
|
||||
applied.
|
||||
type: boolean
|
||||
required:
|
||||
- ready
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
@@ -0,0 +1,82 @@
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.19.0
|
||||
name: harvesterblueprints.rig.appstack.io
|
||||
spec:
|
||||
group: rig.appstack.io
|
||||
names:
|
||||
kind: HarvesterBlueprint
|
||||
listKind: HarvesterBlueprintList
|
||||
plural: harvesterblueprints
|
||||
shortNames:
|
||||
- hbp
|
||||
singular: harvesterblueprint
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: HarvesterBlueprintSpec defines the desired state of HarvesterBlueprint
|
||||
properties:
|
||||
harvesterUrl:
|
||||
description: |-
|
||||
HarvesterURL is the endpoint of the Harvester cluster (e.g. https://10.x.x.x:6443).
|
||||
This replaces the need for auto-discovery.
|
||||
type: string
|
||||
imageName:
|
||||
description: ImageName is the specific image name in Harvester to
|
||||
clone (e.g. image-abcde).
|
||||
type: string
|
||||
networkName:
|
||||
description: NetworkName is the VM Network to attach to the nodes.
|
||||
type: string
|
||||
sshUser:
|
||||
description: SshUser is the username to configure on the VM (e.g.
|
||||
ubuntu, rancher).
|
||||
type: string
|
||||
vmNamespace:
|
||||
description: VmNamespace is the namespace in Harvester where VMs will
|
||||
be created.
|
||||
type: string
|
||||
required:
|
||||
- harvesterUrl
|
||||
- imageName
|
||||
- networkName
|
||||
- sshUser
|
||||
- vmNamespace
|
||||
type: object
|
||||
status:
|
||||
description: HarvesterBlueprintStatus defines the observed state of HarvesterBlueprint
|
||||
properties:
|
||||
ready:
|
||||
description: Ready indicates the configuration is valid (optional
|
||||
future use)
|
||||
type: boolean
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
@@ -0,0 +1,146 @@
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.19.0
|
||||
name: infrablueprints.rig.appstack.io
|
||||
spec:
|
||||
group: rig.appstack.io
|
||||
names:
|
||||
kind: InfraBlueprint
|
||||
listKind: InfraBlueprintList
|
||||
plural: infrablueprints
|
||||
shortNames:
|
||||
- ibp
|
||||
singular: infrablueprint
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- additionalPrinterColumns:
|
||||
- jsonPath: .status.ready
|
||||
name: Ready
|
||||
type: boolean
|
||||
- jsonPath: .spec.quota.maxCpu
|
||||
name: MaxCPU
|
||||
type: integer
|
||||
- jsonPath: .status.usage.usedCpu
|
||||
name: UsedCPU
|
||||
type: integer
|
||||
- jsonPath: .spec.quota.maxMemoryGb
|
||||
name: MaxMem(GB)
|
||||
type: integer
|
||||
- jsonPath: .status.usage.usedMemoryGb
|
||||
name: UsedMem(GB)
|
||||
type: integer
|
||||
- jsonPath: .spec.quota.maxDiskGb
|
||||
name: MaxDisk(GB)
|
||||
type: integer
|
||||
- jsonPath: .status.usage.usedDiskGb
|
||||
name: UsedDisk(GB)
|
||||
type: integer
|
||||
name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: InfraBlueprintSpec defines the desired state of InfraBlueprint
|
||||
properties:
|
||||
cloudCredentialSecret:
|
||||
description: |-
|
||||
CloudCredentialSecret is the name of the Secret containing the
|
||||
master cloud credentials (e.g., kubeconfig or username/password).
|
||||
type: string
|
||||
providerRef:
|
||||
description: ProviderRef points to the technical configuration (HarvesterBlueprint/VsphereBlueprint).
|
||||
properties:
|
||||
apiGroup:
|
||||
description: APIGroup defaults to rig.appstack.io if not specified
|
||||
type: string
|
||||
kind:
|
||||
description: Kind is the type of resource being referenced (e.g.,
|
||||
HarvesterBlueprint)
|
||||
type: string
|
||||
name:
|
||||
description: Name is the name of resource being referenced
|
||||
type: string
|
||||
required:
|
||||
- kind
|
||||
- name
|
||||
type: object
|
||||
quota:
|
||||
description: Quota defines the maximum resources allocatable by this
|
||||
Infra.
|
||||
properties:
|
||||
maxCpu:
|
||||
description: MaxCPU is the total number of cores allowed across
|
||||
all clusters
|
||||
type: integer
|
||||
maxDiskGb:
|
||||
description: MaxDiskGB is the total Storage (in GB) allowed across
|
||||
all clusters
|
||||
type: integer
|
||||
maxMemoryGb:
|
||||
description: MaxMemoryGB is the total RAM (in GB) allowed across
|
||||
all clusters
|
||||
type: integer
|
||||
type: object
|
||||
rancherUrl:
|
||||
description: |-
|
||||
RancherURL is the public URL of the Rancher Manager (e.g. https://rancher.example.com)
|
||||
This is injected into the Helm Chart to register the cluster.
|
||||
type: string
|
||||
userData:
|
||||
description: UserData is the default cloud-init user data for all
|
||||
clusters in this Infra.
|
||||
type: string
|
||||
required:
|
||||
- cloudCredentialSecret
|
||||
- providerRef
|
||||
- rancherUrl
|
||||
type: object
|
||||
status:
|
||||
description: InfraBlueprintStatus defines the observed state of InfraBlueprint
|
||||
properties:
|
||||
ready:
|
||||
description: Ready indicates the provider connection is verified
|
||||
type: boolean
|
||||
usage:
|
||||
description: Usage tracks the current resource consumption
|
||||
properties:
|
||||
usedCpu:
|
||||
description: UsedCPU is the sum of cores currently provisioned
|
||||
type: integer
|
||||
usedDiskGb:
|
||||
description: UsedDiskGB tracks storage consumption
|
||||
type: integer
|
||||
usedMemoryGb:
|
||||
description: UsedMemoryGB is the sum of RAM currently provisioned
|
||||
type: integer
|
||||
required:
|
||||
- usedCpu
|
||||
- usedDiskGb
|
||||
- usedMemoryGb
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
@@ -0,0 +1,86 @@
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.19.0
|
||||
name: vsphereblueprints.rig.appstack.io
|
||||
spec:
|
||||
group: rig.appstack.io
|
||||
names:
|
||||
kind: VsphereBlueprint
|
||||
listKind: VsphereBlueprintList
|
||||
plural: vsphereblueprints
|
||||
shortNames:
|
||||
- vbp
|
||||
singular: vsphereblueprint
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: VsphereBlueprint is the Schema for the vsphereblueprints API
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: VsphereBlueprintSpec defines the desired state of VsphereBlueprint
|
||||
properties:
|
||||
datacenter:
|
||||
description: Datacenter name (e.g. NL001)
|
||||
type: string
|
||||
datastore:
|
||||
description: DatastoreCluster or Datastore name (e.g. "NL001 Development
|
||||
- Rancher SDRS")
|
||||
type: string
|
||||
folder:
|
||||
description: Folder path where VMs will be organized (e.g. "ICT Digitalisation
|
||||
- Rancher")
|
||||
type: string
|
||||
network:
|
||||
description: Network name to attach to (e.g. "nl001.vDS.Distri.Vlan.1542")
|
||||
type: string
|
||||
resourcePool:
|
||||
description: ResourcePool path (e.g. "NL001 Development - Rancher/Resources")
|
||||
type: string
|
||||
template:
|
||||
description: Template is the VM template name to clone from
|
||||
type: string
|
||||
vCenter:
|
||||
description: vCenter address (e.g. vcenter.example.com)
|
||||
type: string
|
||||
required:
|
||||
- datacenter
|
||||
- datastore
|
||||
- folder
|
||||
- network
|
||||
- resourcePool
|
||||
- template
|
||||
- vCenter
|
||||
type: object
|
||||
status:
|
||||
description: VsphereBlueprintStatus defines the observed state
|
||||
properties:
|
||||
ready:
|
||||
type: boolean
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
19
deploy/rig-operator/config/crd/kustomization.yaml
Normal file
19
deploy/rig-operator/config/crd/kustomization.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
# This kustomization.yaml is not intended to be run by itself,
|
||||
# since it depends on service name and namespace that are out of this kustomize package.
|
||||
# It should be run by config/default
|
||||
resources:
|
||||
- bases/rig.appstack.io_clusterblueprints.yaml
|
||||
- bases/rig.appstack.io_infrablueprints.yaml
|
||||
- bases/rig.appstack.io_harvesterblueprints.yaml
|
||||
- bases/rig.appstack.io_vsphereblueprints.yaml
|
||||
# +kubebuilder:scaffold:crdkustomizeresource
|
||||
|
||||
patches:
|
||||
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix.
|
||||
# patches here are for enabling the conversion webhook for each CRD
|
||||
# +kubebuilder:scaffold:crdkustomizewebhookpatch
|
||||
|
||||
# [WEBHOOK] To enable webhook, uncomment the following section
|
||||
# the following config is for teaching kustomize how to do kustomization for CRDs.
|
||||
#configurations:
|
||||
#- kustomizeconfig.yaml
|
||||
19
deploy/rig-operator/config/crd/kustomizeconfig.yaml
Normal file
19
deploy/rig-operator/config/crd/kustomizeconfig.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
# This file is for teaching kustomize how to substitute name and namespace reference in CRD
|
||||
nameReference:
|
||||
- kind: Service
|
||||
version: v1
|
||||
fieldSpecs:
|
||||
- kind: CustomResourceDefinition
|
||||
version: v1
|
||||
group: apiextensions.k8s.io
|
||||
path: spec/conversion/webhook/clientConfig/service/name
|
||||
|
||||
namespace:
|
||||
- kind: CustomResourceDefinition
|
||||
version: v1
|
||||
group: apiextensions.k8s.io
|
||||
path: spec/conversion/webhook/clientConfig/service/namespace
|
||||
create: false
|
||||
|
||||
varReference:
|
||||
- path: metadata/annotations
|
||||
@@ -0,0 +1,30 @@
|
||||
# This patch adds the args, volumes, and ports to allow the manager to use the metrics-server certs.
|
||||
|
||||
# Add the volumeMount for the metrics-server certs
|
||||
- op: add
|
||||
path: /spec/template/spec/containers/0/volumeMounts/-
|
||||
value:
|
||||
mountPath: /tmp/k8s-metrics-server/metrics-certs
|
||||
name: metrics-certs
|
||||
readOnly: true
|
||||
|
||||
# Add the --metrics-cert-path argument for the metrics server
|
||||
- op: add
|
||||
path: /spec/template/spec/containers/0/args/-
|
||||
value: --metrics-cert-path=/tmp/k8s-metrics-server/metrics-certs
|
||||
|
||||
# Add the metrics-server certs volume configuration
|
||||
- op: add
|
||||
path: /spec/template/spec/volumes/-
|
||||
value:
|
||||
name: metrics-certs
|
||||
secret:
|
||||
secretName: metrics-server-cert
|
||||
optional: false
|
||||
items:
|
||||
- key: ca.crt
|
||||
path: ca.crt
|
||||
- key: tls.crt
|
||||
path: tls.crt
|
||||
- key: tls.key
|
||||
path: tls.key
|
||||
234
deploy/rig-operator/config/default/kustomization.yaml
Normal file
234
deploy/rig-operator/config/default/kustomization.yaml
Normal file
@@ -0,0 +1,234 @@
|
||||
# Adds namespace to all resources.
|
||||
namespace: deploy-system
|
||||
|
||||
# Value of this field is prepended to the
|
||||
# names of all resources, e.g. a deployment named
|
||||
# "wordpress" becomes "alices-wordpress".
|
||||
# Note that it should also match with the prefix (text before '-') of the namespace
|
||||
# field above.
|
||||
namePrefix: deploy-
|
||||
|
||||
# Labels to add to all resources and selectors.
|
||||
#labels:
|
||||
#- includeSelectors: true
|
||||
# pairs:
|
||||
# someName: someValue
|
||||
|
||||
resources:
|
||||
- ../crd
|
||||
- ../rbac
|
||||
- ../manager
|
||||
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
|
||||
# crd/kustomization.yaml
|
||||
#- ../webhook
|
||||
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required.
|
||||
#- ../certmanager
|
||||
# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
|
||||
#- ../prometheus
|
||||
# [METRICS] Expose the controller manager metrics service.
|
||||
- metrics_service.yaml
|
||||
# [NETWORK POLICY] Protect the /metrics endpoint and Webhook Server with NetworkPolicy.
|
||||
# Only Pod(s) running a namespace labeled with 'metrics: enabled' will be able to gather the metrics.
|
||||
# Only CR(s) which requires webhooks and are applied on namespaces labeled with 'webhooks: enabled' will
|
||||
# be able to communicate with the Webhook Server.
|
||||
#- ../network-policy
|
||||
|
||||
# Uncomment the patches line if you enable Metrics
|
||||
patches:
|
||||
# [METRICS] The following patch will enable the metrics endpoint using HTTPS and the port :8443.
|
||||
# More info: https://book.kubebuilder.io/reference/metrics
|
||||
- path: manager_metrics_patch.yaml
|
||||
target:
|
||||
kind: Deployment
|
||||
|
||||
# Uncomment the patches line if you enable Metrics and CertManager
|
||||
# [METRICS-WITH-CERTS] To enable metrics protected with certManager, uncomment the following line.
|
||||
# This patch will protect the metrics with certManager self-signed certs.
|
||||
#- path: cert_metrics_manager_patch.yaml
|
||||
# target:
|
||||
# kind: Deployment
|
||||
|
||||
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
|
||||
# crd/kustomization.yaml
|
||||
#- path: manager_webhook_patch.yaml
|
||||
# target:
|
||||
# kind: Deployment
|
||||
|
||||
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix.
|
||||
# Uncomment the following replacements to add the cert-manager CA injection annotations
|
||||
#replacements:
|
||||
# - source: # Uncomment the following block to enable certificates for metrics
|
||||
# kind: Service
|
||||
# version: v1
|
||||
# name: controller-manager-metrics-service
|
||||
# fieldPath: metadata.name
|
||||
# targets:
|
||||
# - select:
|
||||
# kind: Certificate
|
||||
# group: cert-manager.io
|
||||
# version: v1
|
||||
# name: metrics-certs
|
||||
# fieldPaths:
|
||||
# - spec.dnsNames.0
|
||||
# - spec.dnsNames.1
|
||||
# options:
|
||||
# delimiter: '.'
|
||||
# index: 0
|
||||
# create: true
|
||||
# - select: # Uncomment the following to set the Service name for TLS config in Prometheus ServiceMonitor
|
||||
# kind: ServiceMonitor
|
||||
# group: monitoring.coreos.com
|
||||
# version: v1
|
||||
# name: controller-manager-metrics-monitor
|
||||
# fieldPaths:
|
||||
# - spec.endpoints.0.tlsConfig.serverName
|
||||
# options:
|
||||
# delimiter: '.'
|
||||
# index: 0
|
||||
# create: true
|
||||
|
||||
# - source:
|
||||
# kind: Service
|
||||
# version: v1
|
||||
# name: controller-manager-metrics-service
|
||||
# fieldPath: metadata.namespace
|
||||
# targets:
|
||||
# - select:
|
||||
# kind: Certificate
|
||||
# group: cert-manager.io
|
||||
# version: v1
|
||||
# name: metrics-certs
|
||||
# fieldPaths:
|
||||
# - spec.dnsNames.0
|
||||
# - spec.dnsNames.1
|
||||
# options:
|
||||
# delimiter: '.'
|
||||
# index: 1
|
||||
# create: true
|
||||
# - select: # Uncomment the following to set the Service namespace for TLS in Prometheus ServiceMonitor
|
||||
# kind: ServiceMonitor
|
||||
# group: monitoring.coreos.com
|
||||
# version: v1
|
||||
# name: controller-manager-metrics-monitor
|
||||
# fieldPaths:
|
||||
# - spec.endpoints.0.tlsConfig.serverName
|
||||
# options:
|
||||
# delimiter: '.'
|
||||
# index: 1
|
||||
# create: true
|
||||
|
||||
# - source: # Uncomment the following block if you have any webhook
|
||||
# kind: Service
|
||||
# version: v1
|
||||
# name: webhook-service
|
||||
# fieldPath: .metadata.name # Name of the service
|
||||
# targets:
|
||||
# - select:
|
||||
# kind: Certificate
|
||||
# group: cert-manager.io
|
||||
# version: v1
|
||||
# name: serving-cert
|
||||
# fieldPaths:
|
||||
# - .spec.dnsNames.0
|
||||
# - .spec.dnsNames.1
|
||||
# options:
|
||||
# delimiter: '.'
|
||||
# index: 0
|
||||
# create: true
|
||||
# - source:
|
||||
# kind: Service
|
||||
# version: v1
|
||||
# name: webhook-service
|
||||
# fieldPath: .metadata.namespace # Namespace of the service
|
||||
# targets:
|
||||
# - select:
|
||||
# kind: Certificate
|
||||
# group: cert-manager.io
|
||||
# version: v1
|
||||
# name: serving-cert
|
||||
# fieldPaths:
|
||||
# - .spec.dnsNames.0
|
||||
# - .spec.dnsNames.1
|
||||
# options:
|
||||
# delimiter: '.'
|
||||
# index: 1
|
||||
# create: true
|
||||
|
||||
# - source: # Uncomment the following block if you have a ValidatingWebhook (--programmatic-validation)
|
||||
# kind: Certificate
|
||||
# group: cert-manager.io
|
||||
# version: v1
|
||||
# name: serving-cert # This name should match the one in certificate.yaml
|
||||
# fieldPath: .metadata.namespace # Namespace of the certificate CR
|
||||
# targets:
|
||||
# - select:
|
||||
# kind: ValidatingWebhookConfiguration
|
||||
# fieldPaths:
|
||||
# - .metadata.annotations.[cert-manager.io/inject-ca-from]
|
||||
# options:
|
||||
# delimiter: '/'
|
||||
# index: 0
|
||||
# create: true
|
||||
# - source:
|
||||
# kind: Certificate
|
||||
# group: cert-manager.io
|
||||
# version: v1
|
||||
# name: serving-cert
|
||||
# fieldPath: .metadata.name
|
||||
# targets:
|
||||
# - select:
|
||||
# kind: ValidatingWebhookConfiguration
|
||||
# fieldPaths:
|
||||
# - .metadata.annotations.[cert-manager.io/inject-ca-from]
|
||||
# options:
|
||||
# delimiter: '/'
|
||||
# index: 1
|
||||
# create: true
|
||||
|
||||
# - source: # Uncomment the following block if you have a DefaultingWebhook (--defaulting )
|
||||
# kind: Certificate
|
||||
# group: cert-manager.io
|
||||
# version: v1
|
||||
# name: serving-cert
|
||||
# fieldPath: .metadata.namespace # Namespace of the certificate CR
|
||||
# targets:
|
||||
# - select:
|
||||
# kind: MutatingWebhookConfiguration
|
||||
# fieldPaths:
|
||||
# - .metadata.annotations.[cert-manager.io/inject-ca-from]
|
||||
# options:
|
||||
# delimiter: '/'
|
||||
# index: 0
|
||||
# create: true
|
||||
# - source:
|
||||
# kind: Certificate
|
||||
# group: cert-manager.io
|
||||
# version: v1
|
||||
# name: serving-cert
|
||||
# fieldPath: .metadata.name
|
||||
# targets:
|
||||
# - select:
|
||||
# kind: MutatingWebhookConfiguration
|
||||
# fieldPaths:
|
||||
# - .metadata.annotations.[cert-manager.io/inject-ca-from]
|
||||
# options:
|
||||
# delimiter: '/'
|
||||
# index: 1
|
||||
# create: true
|
||||
|
||||
# - source: # Uncomment the following block if you have a ConversionWebhook (--conversion)
|
||||
# kind: Certificate
|
||||
# group: cert-manager.io
|
||||
# version: v1
|
||||
# name: serving-cert
|
||||
# fieldPath: .metadata.namespace # Namespace of the certificate CR
|
||||
# targets: # Do not remove or uncomment the following scaffold marker; required to generate code for target CRD.
|
||||
# +kubebuilder:scaffold:crdkustomizecainjectionns
|
||||
# - source:
|
||||
# kind: Certificate
|
||||
# group: cert-manager.io
|
||||
# version: v1
|
||||
# name: serving-cert
|
||||
# fieldPath: .metadata.name
|
||||
# targets: # Do not remove or uncomment the following scaffold marker; required to generate code for target CRD.
|
||||
# +kubebuilder:scaffold:crdkustomizecainjectionname
|
||||
@@ -0,0 +1,4 @@
|
||||
# This patch adds the args to allow exposing the metrics endpoint using HTTPS
|
||||
- op: add
|
||||
path: /spec/template/spec/containers/0/args/0
|
||||
value: --metrics-bind-address=:8443
|
||||
18
deploy/rig-operator/config/default/metrics_service.yaml
Normal file
18
deploy/rig-operator/config/default/metrics_service.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
control-plane: controller-manager
|
||||
app.kubernetes.io/name: deploy
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: controller-manager-metrics-service
|
||||
namespace: system
|
||||
spec:
|
||||
ports:
|
||||
- name: https
|
||||
port: 8443
|
||||
protocol: TCP
|
||||
targetPort: 8443
|
||||
selector:
|
||||
control-plane: controller-manager
|
||||
app.kubernetes.io/name: deploy
|
||||
2
deploy/rig-operator/config/manager/kustomization.yaml
Normal file
2
deploy/rig-operator/config/manager/kustomization.yaml
Normal file
@@ -0,0 +1,2 @@
|
||||
resources:
|
||||
- manager.yaml
|
||||
99
deploy/rig-operator/config/manager/manager.yaml
Normal file
99
deploy/rig-operator/config/manager/manager.yaml
Normal file
@@ -0,0 +1,99 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
labels:
|
||||
control-plane: controller-manager
|
||||
app.kubernetes.io/name: deploy
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: system
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: controller-manager
|
||||
namespace: system
|
||||
labels:
|
||||
control-plane: controller-manager
|
||||
app.kubernetes.io/name: deploy
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
control-plane: controller-manager
|
||||
app.kubernetes.io/name: deploy
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
kubectl.kubernetes.io/default-container: manager
|
||||
labels:
|
||||
control-plane: controller-manager
|
||||
app.kubernetes.io/name: deploy
|
||||
spec:
|
||||
# TODO(user): Uncomment the following code to configure the nodeAffinity expression
|
||||
# according to the platforms which are supported by your solution.
|
||||
# It is considered best practice to support multiple architectures. You can
|
||||
# build your manager image using the makefile target docker-buildx.
|
||||
# affinity:
|
||||
# nodeAffinity:
|
||||
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||
# nodeSelectorTerms:
|
||||
# - matchExpressions:
|
||||
# - key: kubernetes.io/arch
|
||||
# operator: In
|
||||
# values:
|
||||
# - amd64
|
||||
# - arm64
|
||||
# - ppc64le
|
||||
# - s390x
|
||||
# - key: kubernetes.io/os
|
||||
# operator: In
|
||||
# values:
|
||||
# - linux
|
||||
securityContext:
|
||||
# Projects are configured by default to adhere to the "restricted" Pod Security Standards.
|
||||
# This ensures that deployments meet the highest security requirements for Kubernetes.
|
||||
# For more details, see: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted
|
||||
runAsNonRoot: true
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
containers:
|
||||
- command:
|
||||
- /manager
|
||||
args:
|
||||
- --leader-elect
|
||||
- --health-probe-bind-address=:8081
|
||||
image: controller:latest
|
||||
name: manager
|
||||
ports: []
|
||||
securityContext:
|
||||
readOnlyRootFilesystem: true
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- "ALL"
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8081
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 20
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readyz
|
||||
port: 8081
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
# TODO(user): Configure the resources accordingly based on the project requirements.
|
||||
# More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
||||
resources:
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 128Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 64Mi
|
||||
volumeMounts: []
|
||||
volumes: []
|
||||
serviceAccountName: controller-manager
|
||||
terminationGracePeriodSeconds: 10
|
||||
@@ -0,0 +1,27 @@
|
||||
# This NetworkPolicy allows ingress traffic
|
||||
# with Pods running on namespaces labeled with 'metrics: enabled'. Only Pods on those
|
||||
# namespaces are able to gather data from the metrics endpoint.
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: deploy
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: allow-metrics-traffic
|
||||
namespace: system
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
control-plane: controller-manager
|
||||
app.kubernetes.io/name: deploy
|
||||
policyTypes:
|
||||
- Ingress
|
||||
ingress:
|
||||
# This allows ingress traffic from any namespace with the label metrics: enabled
|
||||
- from:
|
||||
- namespaceSelector:
|
||||
matchLabels:
|
||||
metrics: enabled # Only from namespaces with this label
|
||||
ports:
|
||||
- port: 8443
|
||||
protocol: TCP
|
||||
@@ -0,0 +1,2 @@
|
||||
resources:
|
||||
- allow-metrics-traffic.yaml
|
||||
11
deploy/rig-operator/config/prometheus/kustomization.yaml
Normal file
11
deploy/rig-operator/config/prometheus/kustomization.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
resources:
|
||||
- monitor.yaml
|
||||
|
||||
# [PROMETHEUS-WITH-CERTS] The following patch configures the ServiceMonitor in ../prometheus
|
||||
# to securely reference certificates created and managed by cert-manager.
|
||||
# Additionally, ensure that you uncomment the [METRICS WITH CERTMANAGER] patch under config/default/kustomization.yaml
|
||||
# to mount the "metrics-server-cert" secret in the Manager Deployment.
|
||||
#patches:
|
||||
# - path: monitor_tls_patch.yaml
|
||||
# target:
|
||||
# kind: ServiceMonitor
|
||||
27
deploy/rig-operator/config/prometheus/monitor.yaml
Normal file
27
deploy/rig-operator/config/prometheus/monitor.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
# Prometheus Monitor Service (Metrics)
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
labels:
|
||||
control-plane: controller-manager
|
||||
app.kubernetes.io/name: deploy
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: controller-manager-metrics-monitor
|
||||
namespace: system
|
||||
spec:
|
||||
endpoints:
|
||||
- path: /metrics
|
||||
port: https # Ensure this is the name of the port that exposes HTTPS metrics
|
||||
scheme: https
|
||||
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
tlsConfig:
|
||||
# TODO(user): The option insecureSkipVerify: true is not recommended for production since it disables
|
||||
# certificate verification, exposing the system to potential man-in-the-middle attacks.
|
||||
# For production environments, it is recommended to use cert-manager for automatic TLS certificate management.
|
||||
# To apply this configuration, enable cert-manager and use the patch located at config/prometheus/servicemonitor_tls_patch.yaml,
|
||||
# which securely references the certificate from the 'metrics-server-cert' secret.
|
||||
insecureSkipVerify: true
|
||||
selector:
|
||||
matchLabels:
|
||||
control-plane: controller-manager
|
||||
app.kubernetes.io/name: deploy
|
||||
19
deploy/rig-operator/config/prometheus/monitor_tls_patch.yaml
Normal file
19
deploy/rig-operator/config/prometheus/monitor_tls_patch.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
# Patch for Prometheus ServiceMonitor to enable secure TLS configuration
|
||||
# using certificates managed by cert-manager
|
||||
- op: replace
|
||||
path: /spec/endpoints/0/tlsConfig
|
||||
value:
|
||||
# SERVICE_NAME and SERVICE_NAMESPACE will be substituted by kustomize
|
||||
serverName: SERVICE_NAME.SERVICE_NAMESPACE.svc
|
||||
insecureSkipVerify: false
|
||||
ca:
|
||||
secret:
|
||||
name: metrics-server-cert
|
||||
key: ca.crt
|
||||
cert:
|
||||
secret:
|
||||
name: metrics-server-cert
|
||||
key: tls.crt
|
||||
keySecret:
|
||||
name: metrics-server-cert
|
||||
key: tls.key
|
||||
@@ -0,0 +1,27 @@
|
||||
# This rule is not used by the project deploy itself.
|
||||
# It is provided to allow the cluster admin to help manage permissions for users.
|
||||
#
|
||||
# Grants full permissions ('*') over rig.appstack.io.
|
||||
# This role is intended for users authorized to modify roles and bindings within the cluster,
|
||||
# enabling them to delegate specific permissions to other users or groups as needed.
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: deploy
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: clusterblueprint-admin-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- rig.appstack.io
|
||||
resources:
|
||||
- clusterblueprints
|
||||
verbs:
|
||||
- '*'
|
||||
- apiGroups:
|
||||
- rig.appstack.io
|
||||
resources:
|
||||
- clusterblueprints/status
|
||||
verbs:
|
||||
- get
|
||||
@@ -0,0 +1,33 @@
|
||||
# This rule is not used by the project deploy itself.
|
||||
# It is provided to allow the cluster admin to help manage permissions for users.
|
||||
#
|
||||
# Grants permissions to create, update, and delete resources within the rig.appstack.io.
|
||||
# This role is intended for users who need to manage these resources
|
||||
# but should not control RBAC or manage permissions for others.
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: deploy
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: clusterblueprint-editor-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- rig.appstack.io
|
||||
resources:
|
||||
- clusterblueprints
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- rig.appstack.io
|
||||
resources:
|
||||
- clusterblueprints/status
|
||||
verbs:
|
||||
- get
|
||||
@@ -0,0 +1,29 @@
|
||||
# This rule is not used by the project deploy itself.
|
||||
# It is provided to allow the cluster admin to help manage permissions for users.
|
||||
#
|
||||
# Grants read-only access to rig.appstack.io resources.
|
||||
# This role is intended for users who need visibility into these resources
|
||||
# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing.
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: deploy
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: clusterblueprint-viewer-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- rig.appstack.io
|
||||
resources:
|
||||
- clusterblueprints
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- rig.appstack.io
|
||||
resources:
|
||||
- clusterblueprints/status
|
||||
verbs:
|
||||
- get
|
||||
@@ -0,0 +1,27 @@
|
||||
# This rule is not used by the project deploy itself.
|
||||
# It is provided to allow the cluster admin to help manage permissions for users.
|
||||
#
|
||||
# Grants full permissions ('*') over rig.appstack.io.
|
||||
# This role is intended for users authorized to modify roles and bindings within the cluster,
|
||||
# enabling them to delegate specific permissions to other users or groups as needed.
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: deploy
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: harvesterblueprint-admin-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- rig.appstack.io
|
||||
resources:
|
||||
- harvesterblueprints
|
||||
verbs:
|
||||
- '*'
|
||||
- apiGroups:
|
||||
- rig.appstack.io
|
||||
resources:
|
||||
- harvesterblueprints/status
|
||||
verbs:
|
||||
- get
|
||||
@@ -0,0 +1,33 @@
|
||||
# This rule is not used by the project deploy itself.
|
||||
# It is provided to allow the cluster admin to help manage permissions for users.
|
||||
#
|
||||
# Grants permissions to create, update, and delete resources within the rig.appstack.io.
|
||||
# This role is intended for users who need to manage these resources
|
||||
# but should not control RBAC or manage permissions for others.
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: deploy
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: harvesterblueprint-editor-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- rig.appstack.io
|
||||
resources:
|
||||
- harvesterblueprints
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- rig.appstack.io
|
||||
resources:
|
||||
- harvesterblueprints/status
|
||||
verbs:
|
||||
- get
|
||||
@@ -0,0 +1,29 @@
|
||||
# This rule is not used by the project deploy itself.
|
||||
# It is provided to allow the cluster admin to help manage permissions for users.
|
||||
#
|
||||
# Grants read-only access to rig.appstack.io resources.
|
||||
# This role is intended for users who need visibility into these resources
|
||||
# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing.
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: deploy
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: harvesterblueprint-viewer-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- rig.appstack.io
|
||||
resources:
|
||||
- harvesterblueprints
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- rig.appstack.io
|
||||
resources:
|
||||
- harvesterblueprints/status
|
||||
verbs:
|
||||
- get
|
||||
@@ -0,0 +1,27 @@
|
||||
# This rule is not used by the project deploy itself.
|
||||
# It is provided to allow the cluster admin to help manage permissions for users.
|
||||
#
|
||||
# Grants full permissions ('*') over rig.appstack.io.
|
||||
# This role is intended for users authorized to modify roles and bindings within the cluster,
|
||||
# enabling them to delegate specific permissions to other users or groups as needed.
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: deploy
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: infrablueprint-admin-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- rig.appstack.io
|
||||
resources:
|
||||
- infrablueprints
|
||||
verbs:
|
||||
- '*'
|
||||
- apiGroups:
|
||||
- rig.appstack.io
|
||||
resources:
|
||||
- infrablueprints/status
|
||||
verbs:
|
||||
- get
|
||||
@@ -0,0 +1,33 @@
|
||||
# This rule is not used by the project deploy itself.
|
||||
# It is provided to allow the cluster admin to help manage permissions for users.
|
||||
#
|
||||
# Grants permissions to create, update, and delete resources within the rig.appstack.io.
|
||||
# This role is intended for users who need to manage these resources
|
||||
# but should not control RBAC or manage permissions for others.
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: deploy
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: infrablueprint-editor-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- rig.appstack.io
|
||||
resources:
|
||||
- infrablueprints
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- rig.appstack.io
|
||||
resources:
|
||||
- infrablueprints/status
|
||||
verbs:
|
||||
- get
|
||||
@@ -0,0 +1,29 @@
|
||||
# This rule is not used by the project deploy itself.
|
||||
# It is provided to allow the cluster admin to help manage permissions for users.
|
||||
#
|
||||
# Grants read-only access to rig.appstack.io resources.
|
||||
# This role is intended for users who need visibility into these resources
|
||||
# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing.
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: deploy
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: infrablueprint-viewer-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- rig.appstack.io
|
||||
resources:
|
||||
- infrablueprints
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- rig.appstack.io
|
||||
resources:
|
||||
- infrablueprints/status
|
||||
verbs:
|
||||
- get
|
||||
37
deploy/rig-operator/config/rbac/kustomization.yaml
Normal file
37
deploy/rig-operator/config/rbac/kustomization.yaml
Normal file
@@ -0,0 +1,37 @@
|
||||
resources:
|
||||
# All RBAC will be applied under this service account in
|
||||
# the deployment namespace. You may comment out this resource
|
||||
# if your manager will use a service account that exists at
|
||||
# runtime. Be sure to update RoleBinding and ClusterRoleBinding
|
||||
# subjects if changing service account names.
|
||||
- service_account.yaml
|
||||
- role.yaml
|
||||
- role_binding.yaml
|
||||
- leader_election_role.yaml
|
||||
- leader_election_role_binding.yaml
|
||||
# The following RBAC configurations are used to protect
|
||||
# the metrics endpoint with authn/authz. These configurations
|
||||
# ensure that only authorized users and service accounts
|
||||
# can access the metrics endpoint. Comment the following
|
||||
# permissions if you want to disable this protection.
|
||||
# More info: https://book.kubebuilder.io/reference/metrics.html
|
||||
- metrics_auth_role.yaml
|
||||
- metrics_auth_role_binding.yaml
|
||||
- metrics_reader_role.yaml
|
||||
# For each CRD, "Admin", "Editor" and "Viewer" roles are scaffolded by
|
||||
# default, aiding admins in cluster management. Those roles are
|
||||
# not used by the deploy itself. You can comment the following lines
|
||||
# if you do not want those helpers be installed with your Project.
|
||||
- vsphereblueprint_admin_role.yaml
|
||||
- vsphereblueprint_editor_role.yaml
|
||||
- vsphereblueprint_viewer_role.yaml
|
||||
- harvesterblueprint_admin_role.yaml
|
||||
- harvesterblueprint_editor_role.yaml
|
||||
- harvesterblueprint_viewer_role.yaml
|
||||
- infrablueprint_admin_role.yaml
|
||||
- infrablueprint_editor_role.yaml
|
||||
- infrablueprint_viewer_role.yaml
|
||||
- clusterblueprint_admin_role.yaml
|
||||
- clusterblueprint_editor_role.yaml
|
||||
- clusterblueprint_viewer_role.yaml
|
||||
|
||||
40
deploy/rig-operator/config/rbac/leader_election_role.yaml
Normal file
40
deploy/rig-operator/config/rbac/leader_election_role.yaml
Normal file
@@ -0,0 +1,40 @@
|
||||
# permissions to do leader election.
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: deploy
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: leader-election-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
@@ -0,0 +1,15 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: deploy
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: leader-election-rolebinding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: leader-election-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: controller-manager
|
||||
namespace: system
|
||||
17
deploy/rig-operator/config/rbac/metrics_auth_role.yaml
Normal file
17
deploy/rig-operator/config/rbac/metrics_auth_role.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: metrics-auth-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- authentication.k8s.io
|
||||
resources:
|
||||
- tokenreviews
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- authorization.k8s.io
|
||||
resources:
|
||||
- subjectaccessreviews
|
||||
verbs:
|
||||
- create
|
||||
@@ -0,0 +1,12 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: metrics-auth-rolebinding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: metrics-auth-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: controller-manager
|
||||
namespace: system
|
||||
9
deploy/rig-operator/config/rbac/metrics_reader_role.yaml
Normal file
9
deploy/rig-operator/config/rbac/metrics_reader_role.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: metrics-reader
|
||||
rules:
|
||||
- nonResourceURLs:
|
||||
- "/metrics"
|
||||
verbs:
|
||||
- get
|
||||
54
deploy/rig-operator/config/rbac/role.yaml
Normal file
54
deploy/rig-operator/config/rbac/role.yaml
Normal file
@@ -0,0 +1,54 @@
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: manager-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- rig.appstack.io
|
||||
resources:
|
||||
- clusterblueprints
|
||||
- infrablueprints
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- rig.appstack.io
|
||||
resources:
|
||||
- clusterblueprints/finalizers
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- rig.appstack.io
|
||||
resources:
|
||||
- clusterblueprints/status
|
||||
- infrablueprints/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- rig.appstack.io
|
||||
resources:
|
||||
- harvesterblueprints
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
15
deploy/rig-operator/config/rbac/role_binding.yaml
Normal file
15
deploy/rig-operator/config/rbac/role_binding.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: deploy
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: manager-rolebinding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: manager-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: controller-manager
|
||||
namespace: system
|
||||
8
deploy/rig-operator/config/rbac/service_account.yaml
Normal file
8
deploy/rig-operator/config/rbac/service_account.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: deploy
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: controller-manager
|
||||
namespace: system
|
||||
@@ -0,0 +1,27 @@
|
||||
# This rule is not used by the project deploy itself.
|
||||
# It is provided to allow the cluster admin to help manage permissions for users.
|
||||
#
|
||||
# Grants full permissions ('*') over rig.appstack.io.
|
||||
# This role is intended for users authorized to modify roles and bindings within the cluster,
|
||||
# enabling them to delegate specific permissions to other users or groups as needed.
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: deploy
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: vsphereblueprint-admin-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- rig.appstack.io
|
||||
resources:
|
||||
- vsphereblueprints
|
||||
verbs:
|
||||
- '*'
|
||||
- apiGroups:
|
||||
- rig.appstack.io
|
||||
resources:
|
||||
- vsphereblueprints/status
|
||||
verbs:
|
||||
- get
|
||||
@@ -0,0 +1,33 @@
|
||||
# This rule is not used by the project deploy itself.
|
||||
# It is provided to allow the cluster admin to help manage permissions for users.
|
||||
#
|
||||
# Grants permissions to create, update, and delete resources within the rig.appstack.io.
|
||||
# This role is intended for users who need to manage these resources
|
||||
# but should not control RBAC or manage permissions for others.
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: deploy
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: vsphereblueprint-editor-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- rig.appstack.io
|
||||
resources:
|
||||
- vsphereblueprints
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- rig.appstack.io
|
||||
resources:
|
||||
- vsphereblueprints/status
|
||||
verbs:
|
||||
- get
|
||||
@@ -0,0 +1,29 @@
|
||||
# This rule is not used by the project deploy itself.
|
||||
# It is provided to allow the cluster admin to help manage permissions for users.
|
||||
#
|
||||
# Grants read-only access to rig.appstack.io resources.
|
||||
# This role is intended for users who need visibility into these resources
|
||||
# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing.
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: deploy
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: vsphereblueprint-viewer-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- rig.appstack.io
|
||||
resources:
|
||||
- vsphereblueprints
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- rig.appstack.io
|
||||
resources:
|
||||
- vsphereblueprints/status
|
||||
verbs:
|
||||
- get
|
||||
7
deploy/rig-operator/config/samples/kustomization.yaml
Normal file
7
deploy/rig-operator/config/samples/kustomization.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
## Append samples of your project ##
|
||||
resources:
|
||||
- rig_v1alpha1_clusterblueprint.yaml
|
||||
- rig_v1alpha1_infrablueprint.yaml
|
||||
- rig_v1alpha1_harvesterblueprint.yaml
|
||||
- rig_v1alpha1_vsphereblueprint.yaml
|
||||
# +kubebuilder:scaffold:manifestskustomizesamples
|
||||
@@ -0,0 +1,22 @@
|
||||
apiVersion: rig.appstack.io/v1alpha1
|
||||
kind: ClusterBlueprint
|
||||
metadata:
|
||||
name: test-cluster-01
|
||||
namespace: fleet-default
|
||||
spec:
|
||||
# Points to the InfraBlueprint (which links to Harvester + Quotas)
|
||||
infraBlueprintRef: "dev-environment-v1"
|
||||
|
||||
# 1. Lifecycle
|
||||
kubernetesVersion: "v1.33.5+rke2r1"
|
||||
|
||||
# 2. Topology: Control Plane (1 Node)
|
||||
controlPlaneHA: false
|
||||
|
||||
# 3. Topology: Workers
|
||||
workerPools:
|
||||
- name: "app-workers"
|
||||
quantity: 1
|
||||
cpuCores: 4
|
||||
memoryGb: 16
|
||||
diskGb: 60
|
||||
@@ -0,0 +1,14 @@
|
||||
apiVersion: rig.appstack.io/v1alpha1
|
||||
kind: HarvesterBlueprint
|
||||
metadata:
|
||||
name: dev-harvester-config
|
||||
namespace: fleet-default
|
||||
spec:
|
||||
# [MOVED] Technical connection details live here now
|
||||
harvesterUrl: "https://172.27.27.190:6443"
|
||||
|
||||
# [MOVED] VM Template details
|
||||
vmNamespace: "vanderlande"
|
||||
imageName: "vanderlande/image-qhtpc"
|
||||
networkName: "vanderlande/vm-lan"
|
||||
sshUser: "rancher"
|
||||
@@ -0,0 +1,17 @@
|
||||
apiVersion: rig.appstack.io/v1alpha1
|
||||
kind: InfraBlueprint
|
||||
metadata:
|
||||
name: dev-environment-v1
|
||||
namespace: fleet-default
|
||||
spec:
|
||||
cloudCredentialSecret: "cc-mrklm"
|
||||
# [NEW] Added Rancher URL
|
||||
rancherUrl: "https://rancher-mgmt.product.lan"
|
||||
|
||||
providerRef:
|
||||
kind: HarvesterBlueprint
|
||||
name: dev-harvester-config
|
||||
quota:
|
||||
maxCpu: 100
|
||||
maxMemoryGb: 256
|
||||
maxDiskGb: 3000
|
||||
@@ -0,0 +1,9 @@
|
||||
apiVersion: rig.appstack.io/v1alpha1
|
||||
kind: VsphereBlueprint
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: deploy
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: vsphereblueprint-sample
|
||||
spec:
|
||||
# TODO(user): Add fields here
|
||||
70
deploy/rig-operator/config/samples/vsphere_stack.yaml
Normal file
70
deploy/rig-operator/config/samples/vsphere_stack.yaml
Normal file
@@ -0,0 +1,70 @@
|
||||
# ---------------------------------------------------------
|
||||
# 1. Technical Configuration (The Location)
|
||||
# ---------------------------------------------------------
|
||||
apiVersion: rig.appstack.io/v1alpha1
|
||||
kind: VsphereBlueprint
|
||||
metadata:
|
||||
name: dev-vsphere-config
|
||||
namespace: fleet-default
|
||||
spec:
|
||||
vCenter: "vcenter.vanderlande.com"
|
||||
datacenter: "NL001"
|
||||
folder: "ICT Digitalisation - Rancher"
|
||||
resourcePool: "NL001 Development - Rancher/Resources"
|
||||
datastore: "NL001 Development - Rancher SDRS"
|
||||
network: "nl001.vDS.Distri.Vlan.1542"
|
||||
template: "nl001-cp-ubuntu-22.04-amd64-20250327-5.15.0-135-rke2-k3s"
|
||||
|
||||
---
|
||||
# ---------------------------------------------------------
|
||||
# 2. Infra Manager (The Accountant & Identity)
|
||||
# ---------------------------------------------------------
|
||||
apiVersion: rig.appstack.io/v1alpha1
|
||||
kind: InfraBlueprint
|
||||
metadata:
|
||||
name: dev-vsphere-infra
|
||||
namespace: fleet-default
|
||||
spec:
|
||||
# Credentials (Must exist in Rancher/Kubernetes)
|
||||
cloudCredentialSecret: "cc-lhtl9"
|
||||
rancherUrl: "https://rancher.tst.vanderlande.com"
|
||||
|
||||
# Point to the vSphere Configuration above
|
||||
providerRef:
|
||||
kind: VsphereBlueprint
|
||||
name: dev-vsphere-config
|
||||
|
||||
# Budget Limits for this Environment
|
||||
quota:
|
||||
maxCpu: 50 # Total vCPUs allowed
|
||||
maxMemoryGb: 128 # Total RAM allowed
|
||||
maxDiskGb: 5000 # Total Disk allowed
|
||||
|
||||
---
|
||||
# ---------------------------------------------------------
|
||||
# 3. Cluster Request (The User Goal)
|
||||
# ---------------------------------------------------------
|
||||
apiVersion: rig.appstack.io/v1alpha1
|
||||
kind: ClusterBlueprint
|
||||
metadata:
|
||||
name: test-vsphere-cluster-01
|
||||
namespace: fleet-default
|
||||
spec:
|
||||
# Link to the vSphere Infra defined above
|
||||
infraBlueprintRef: "dev-vsphere-infra"
|
||||
|
||||
# Lifecycle
|
||||
kubernetesVersion: "v1.31.12+rke2r1"
|
||||
|
||||
# Topology: Control Plane (1 Node)
|
||||
# Uses default sizing from values.yaml (2 CPU / 8 GB)
|
||||
controlPlaneHA: false
|
||||
|
||||
# Topology: Workers
|
||||
# These sizes (GB) will be converted to MB automatically by your Strategy
|
||||
workerPools:
|
||||
- name: "app-workers"
|
||||
quantity: 2
|
||||
cpuCores: 4
|
||||
memoryGb: 8 # Strategy converts to 8192 MB
|
||||
diskGb: 100 # Strategy converts to 102400 MB
|
||||
67
deploy/rig-operator/docs/blueprint_orchestration.svg
Normal file
67
deploy/rig-operator/docs/blueprint_orchestration.svg
Normal file
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 548 KiB |
29
deploy/rig-operator/docs/controllerflow.mermaid
Normal file
29
deploy/rig-operator/docs/controllerflow.mermaid
Normal file
@@ -0,0 +1,29 @@
|
||||
sequenceDiagram
|
||||
participant User
|
||||
participant Controller
|
||||
participant InfraBP as InfraBlueprint
|
||||
participant ProviderBP as Harvester/Vsphere BP
|
||||
participant Strategy
|
||||
participant Builder as MasterBuilder
|
||||
participant Helm
|
||||
|
||||
User->>Controller: Create ClusterBlueprint
|
||||
Controller->>InfraBP: 1. Get Infra & Quota
|
||||
InfraBP-->>Controller: ProviderRef (Kind="HarvesterBlueprint")
|
||||
|
||||
note over Controller: Dynamic Switching Logic
|
||||
|
||||
alt Kind is Harvester
|
||||
Controller->>ProviderBP: 2. Get Harvester Config
|
||||
Controller->>Strategy: 3. Init HarvesterStrategy
|
||||
else Kind is Vsphere
|
||||
Controller->>ProviderBP: 2. Get Vsphere Config
|
||||
Controller->>Strategy: 3. Init VsphereStrategy
|
||||
end
|
||||
|
||||
Controller->>Builder: 4. Build(Strategy)
|
||||
Builder->>Strategy: GenerateNodePools()
|
||||
Strategy-->>Builder: [Pool A, Pool B]
|
||||
Builder-->>Controller: map[values]
|
||||
|
||||
Controller->>Helm: 5. Apply(values)
|
||||
67
deploy/rig-operator/docs/flow-diagram.svg
Normal file
67
deploy/rig-operator/docs/flow-diagram.svg
Normal file
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 504 KiB |
121
deploy/rig-operator/docs/uml.mermaid
Normal file
121
deploy/rig-operator/docs/uml.mermaid
Normal file
@@ -0,0 +1,121 @@
|
||||
classDiagram
|
||||
direction TB
|
||||
|
||||
%% ==========================================
|
||||
%% PACKAGE: K8s API Definitions (Blueprints)
|
||||
%% ==========================================
|
||||
namespace API_Blueprints {
|
||||
class ClusterBlueprint {
|
||||
<<Kind: ClusterBlueprint, Short: cbp>>
|
||||
Description: Generic cluster request
|
||||
---
|
||||
+spec.infraBlueprintRef : string
|
||||
+spec.kubernetesVersion : string
|
||||
+spec.workerPools : List~GenericPoolReq~
|
||||
}
|
||||
|
||||
class InfraBlueprint {
|
||||
<<Kind: InfraBlueprint, Short: ibp>>
|
||||
Description: Manages quotas and provider ref
|
||||
---
|
||||
+spec.quotaLimits : ResourceList
|
||||
+status.quotaUsed : ResourceList
|
||||
+spec.providerRef : TypedLocalObjectReference
|
||||
}
|
||||
|
||||
class VsphereBlueprint {
|
||||
<<Kind: VsphereBlueprint, Short: vbp>>
|
||||
Description: Concrete vSphere details
|
||||
---
|
||||
+spec.vcenterURL : string
|
||||
+spec.datacenterID : string
|
||||
+spec.networkIDs : List~string~
|
||||
}
|
||||
|
||||
class HarvesterBlueprint {
|
||||
<<Kind: HarvesterBlueprint, Short: hbp>>
|
||||
Description: Concrete Harvester details
|
||||
---
|
||||
+spec.harvesterURL : string
|
||||
+spec.vmNamespace : string
|
||||
+spec.imageName : string
|
||||
}
|
||||
|
||||
class AzureBlueprint {
|
||||
<<Kind: AzureBlueprint, Short: abp>>
|
||||
Description: Future Azure details
|
||||
}
|
||||
}
|
||||
|
||||
%% Relationships between Blueprints
|
||||
ClusterBlueprint --> InfraBlueprint : 1. References by Name
|
||||
note for InfraBlueprint "The providerRef is polymorphic.\nIt points to ANY ProviderBlueprint Kind\n(vbp, hbp, or abp)."
|
||||
InfraBlueprint ..> VsphereBlueprint : 2. Dynamically references Kind=vbp
|
||||
InfraBlueprint ..> HarvesterBlueprint : 2. Dynamically references Kind=hbp
|
||||
InfraBlueprint ..> AzureBlueprint : 2. Dynamically references Kind=abp
|
||||
|
||||
|
||||
%% ==========================================
|
||||
%% PACKAGE: Controller (Orchestration)
|
||||
%% ==========================================
|
||||
namespace Controller_Layer {
|
||||
class RIGController {
|
||||
Description: The brain. Watches CBPs, checks IBP quotas.
|
||||
---
|
||||
+Reconcile(request)
|
||||
}
|
||||
}
|
||||
|
||||
RIGController "watches" --> ClusterBlueprint
|
||||
RIGController "reads & checks quota" --> InfraBlueprint
|
||||
|
||||
|
||||
%% ==========================================
|
||||
%% PACKAGE: Builders & Strategies (Generation)
|
||||
%% ==========================================
|
||||
namespace Generation_Layer {
|
||||
class MasterValuesBuilder {
|
||||
Description: Knows generic Helm structure.
|
||||
---
|
||||
-strategy : ProviderStrategy
|
||||
+BuildHelmValues(cbp, ibp) Map
|
||||
}
|
||||
|
||||
class ProviderStrategy {
|
||||
<<Interface>>
|
||||
Description: Contract for isolated provider logic.
|
||||
---
|
||||
+GenerateNodePools(genericPools, providerBP) List~Any~
|
||||
+GetGlobalOverrides(providerBP) Map
|
||||
+PerformPreFlight(ctx, providerBP) Error
|
||||
}
|
||||
|
||||
class VsphereStrategy {
|
||||
Description: Specialist VBP to Helm translation.
|
||||
---
|
||||
+GenerateNodePools(...)
|
||||
}
|
||||
|
||||
class HarvesterStrategy {
|
||||
Description: Specialist HBP to Helm translation.
|
||||
---
|
||||
+GenerateNodePools(...)
|
||||
+PerformPreFlight(...)
|
||||
}
|
||||
}
|
||||
|
||||
%% Controller orchestrates builders
|
||||
note for RIGController "1. Reads IBP.providerRef.Kind\n2. Instantiates correct Strategy (e.g. VsphereStrategy)\n3. Injects Strategy into MasterBuilder\n4. Calls MasterBuilder.Build()"
|
||||
|
||||
RIGController "configures & calls" --> MasterValuesBuilder
|
||||
|
||||
%% Master Builder uses the interface
|
||||
MasterValuesBuilder o--> ProviderStrategy : Injected Dependency
|
||||
|
||||
%% Realization of strategies
|
||||
ProviderStrategy <|.. VsphereStrategy : Implements
|
||||
ProviderStrategy <|.. HarvesterStrategy : Implements
|
||||
|
||||
%% Strategies read their specific blueprints
|
||||
VsphereStrategy ..> VsphereBlueprint : Reads config to map data
|
||||
HarvesterStrategy ..> HarvesterBlueprint : Reads config to map data
|
||||
161
deploy/rig-operator/go.mod
Normal file
161
deploy/rig-operator/go.mod
Normal file
@@ -0,0 +1,161 @@
|
||||
module vanderlande.com/ittp/appstack/rig-operator
|
||||
|
||||
go 1.25.0
|
||||
|
||||
require (
|
||||
github.com/onsi/ginkgo/v2 v2.27.2
|
||||
github.com/onsi/gomega v1.38.2
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
helm.sh/helm/v3 v3.19.4
|
||||
k8s.io/api v0.35.0
|
||||
k8s.io/apimachinery v0.35.0
|
||||
k8s.io/cli-runtime v0.35.0
|
||||
k8s.io/client-go v0.35.0
|
||||
sigs.k8s.io/controller-runtime v0.22.4
|
||||
)
|
||||
|
||||
require (
|
||||
cel.dev/expr v0.24.0 // indirect
|
||||
dario.cat/mergo v1.0.1 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
|
||||
github.com/BurntSushi/toml v1.5.0 // indirect
|
||||
github.com/MakeNowJust/heredoc v1.0.0 // indirect
|
||||
github.com/Masterminds/goutils v1.1.1 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.4.0 // indirect
|
||||
github.com/Masterminds/sprig/v3 v3.3.0 // indirect
|
||||
github.com/Masterminds/squirrel v1.5.4 // indirect
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/chai2010/gettext-go v1.0.2 // indirect
|
||||
github.com/containerd/containerd v1.7.29 // indirect
|
||||
github.com/containerd/errdefs v0.3.0 // indirect
|
||||
github.com/containerd/log v0.1.0 // indirect
|
||||
github.com/containerd/platforms v0.2.1 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.6.1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
|
||||
github.com/evanphx/json-patch v5.9.11+incompatible // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.9.11 // indirect
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.9.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
|
||||
github.com/go-errors/errors v1.4.2 // indirect
|
||||
github.com/go-gorp/gorp/v3 v3.1.0 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-logr/zapr v1.3.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||
github.com/gobwas/glob v0.2.3 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/google/btree v1.1.3 // indirect
|
||||
github.com/google/cel-go v0.26.0 // indirect
|
||||
github.com/google/gnostic-models v0.7.0 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
|
||||
github.com/gosuri/uitable v0.0.4 // indirect
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/huandu/xstrings v1.5.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jmoiron/sqlx v1.4.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
|
||||
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
|
||||
github.com/lib/pq v1.10.9 // indirect
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.17 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.9 // indirect
|
||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
github.com/moby/spdystream v0.5.0 // indirect
|
||||
github.com/moby/term v0.5.2 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.1 // indirect
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_golang v1.22.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.62.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/rubenv/sql-migrate v1.8.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 // indirect
|
||||
github.com/shopspring/decimal v1.4.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/spf13/cast v1.7.0 // indirect
|
||||
github.com/spf13/cobra v1.10.1 // indirect
|
||||
github.com/spf13/pflag v1.0.10 // indirect
|
||||
github.com/stoewer/go-strcase v1.3.0 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/xlab/treeprint v1.2.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect
|
||||
go.opentelemetry.io/otel v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.35.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.5.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.3 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
golang.org/x/crypto v0.45.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
|
||||
golang.org/x/mod v0.29.0 // indirect
|
||||
golang.org/x/net v0.47.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sync v0.18.0 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/term v0.37.0 // indirect
|
||||
golang.org/x/text v0.31.0 // indirect
|
||||
golang.org/x/time v0.12.0 // indirect
|
||||
golang.org/x/tools v0.38.0 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb // indirect
|
||||
google.golang.org/grpc v1.72.1 // indirect
|
||||
google.golang.org/protobuf v1.36.8 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.34.2 // indirect
|
||||
k8s.io/apiserver v0.34.2 // indirect
|
||||
k8s.io/component-base v0.34.2 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect
|
||||
k8s.io/kubectl v0.34.2 // indirect
|
||||
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 // indirect
|
||||
oras.land/oras-go/v2 v2.6.0 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect
|
||||
sigs.k8s.io/kustomize/api v0.20.1 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.20.1 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
|
||||
sigs.k8s.io/yaml v1.6.0 // indirect
|
||||
)
|
||||
501
deploy/rig-operator/go.sum
Normal file
501
deploy/rig-operator/go.sum
Normal file
@@ -0,0 +1,501 @@
|
||||
cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY=
|
||||
cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw=
|
||||
dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
|
||||
dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
||||
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
|
||||
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU=
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
|
||||
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU=
|
||||
github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ=
|
||||
github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE=
|
||||
github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
|
||||
github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
|
||||
github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0=
|
||||
github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
|
||||
github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs=
|
||||
github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0=
|
||||
github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM=
|
||||
github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
|
||||
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
|
||||
github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70=
|
||||
github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk=
|
||||
github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA=
|
||||
github.com/containerd/containerd v1.7.29 h1:90fWABQsaN9mJhGkoVnuzEY+o1XDPbg9BTC9QTAHnuE=
|
||||
github.com/containerd/containerd v1.7.29/go.mod h1:azUkWcOvHrWvaiUjSQH0fjzuHIwSPg1WL5PshGP4Szs=
|
||||
github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4=
|
||||
github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
|
||||
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
||||
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
|
||||
github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A=
|
||||
github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
|
||||
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
||||
github.com/cyphar/filepath-securejoin v0.6.1 h1:5CeZ1jPXEiYt3+Z6zqprSAgSWiggmpVyciv8syjIpVE=
|
||||
github.com/cyphar/filepath-securejoin v0.6.1/go.mod h1:A8hd4EnAeyujCJRrICiOWqjS1AX0a9kM5XL+NwKoYSc=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/distribution/distribution/v3 v3.0.0 h1:q4R8wemdRQDClzoNNStftB2ZAfqOiN6UX90KJc4HjyM=
|
||||
github.com/distribution/distribution/v3 v3.0.0/go.mod h1:tRNuFoZsUdyRVegq8xGNeds4KLjwLCRin/tTo6i1DhU=
|
||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI=
|
||||
github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
|
||||
github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo=
|
||||
github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M=
|
||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8=
|
||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
|
||||
github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8=
|
||||
github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
|
||||
github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
|
||||
github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/evanphx/json-patch v5.9.11+incompatible h1:ixHHqfcGvxhWkniF1tWxBHA0yb4Z+d1UQi45df52xW8=
|
||||
github.com/evanphx/json-patch v5.9.11+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU=
|
||||
github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM=
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4=
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc=
|
||||
github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
|
||||
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7DlmewI=
|
||||
github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk=
|
||||
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
|
||||
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
|
||||
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
|
||||
github.com/gkampitakis/ciinfo v0.3.2 h1:JcuOPk8ZU7nZQjdUhctuhQofk7BGHuIy0c9Ez8BNhXs=
|
||||
github.com/gkampitakis/ciinfo v0.3.2/go.mod h1:1NIwaOcFChN4fa/B0hEBdAb6npDlFL8Bwx4dfRLRqAo=
|
||||
github.com/gkampitakis/go-diff v1.3.2 h1:Qyn0J9XJSDTgnsgHRdz9Zp24RaJeKMUHg2+PDZZdC4M=
|
||||
github.com/gkampitakis/go-diff v1.3.2/go.mod h1:LLgOrpqleQe26cte8s36HTWcTmMEur6OPYerdAAS9tk=
|
||||
github.com/gkampitakis/go-snaps v0.5.15 h1:amyJrvM1D33cPHwVrjo9jQxX8g/7E2wYdZ+01KS3zGE=
|
||||
github.com/gkampitakis/go-snaps v0.5.15/go.mod h1:HNpx/9GoKisdhw9AFOBT1N7DBs9DiHo/hGheFGBZ+mc=
|
||||
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
|
||||
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
|
||||
github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs=
|
||||
github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
|
||||
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
|
||||
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
||||
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
|
||||
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
|
||||
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
|
||||
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
|
||||
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
|
||||
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
|
||||
github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
|
||||
github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
|
||||
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
|
||||
github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw=
|
||||
github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
|
||||
github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
|
||||
github.com/google/cel-go v0.26.0 h1:DPGjXackMpJWH680oGY4lZhYjIameYmR+/6RBdDGmaI=
|
||||
github.com/google/cel-go v0.26.0/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM=
|
||||
github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
|
||||
github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8=
|
||||
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE=
|
||||
github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w=
|
||||
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
||||
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
|
||||
github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY=
|
||||
github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo=
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA=
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||
github.com/hashicorp/golang-lru/arc/v2 v2.0.5 h1:l2zaLDubNhW4XO3LnliVj0GXO3+/CGNJAg1dcN2Fpfw=
|
||||
github.com/hashicorp/golang-lru/arc/v2 v2.0.5/go.mod h1:ny6zBSQZi2JxIeYcv7kt2sH2PXJtirBN7RDhRpxPkxU=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvHgwfx2Vm4=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.5/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI=
|
||||
github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o=
|
||||
github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/joshdk/go-junit v1.0.0 h1:S86cUKIdwBHWwA6xCmFlf3RTLfVXYQfvanM5Uh+K6GE=
|
||||
github.com/joshdk/go-junit v1.0.0/go.mod h1:TiiV0PqkaNfFXjEiyjWM3XXrhVyCa1K4Zfga6W52ung=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw=
|
||||
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o=
|
||||
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk=
|
||||
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw=
|
||||
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
||||
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0=
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/maruel/natural v1.1.1 h1:Hja7XhhmvEFhcByqDoHz9QZbkWey+COd9xWfCfn1ioo=
|
||||
github.com/maruel/natural v1.1.1/go.mod h1:v+Rfd79xlw1AgVBjbO0BEQmptqb5HvL/k9GRHB7ZKEg=
|
||||
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng=
|
||||
github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
|
||||
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
|
||||
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||
github.com/mfridman/tparse v0.18.0 h1:wh6dzOKaIwkUGyKgOntDW4liXSo37qg5AXbIhkMV3vE=
|
||||
github.com/mfridman/tparse v0.18.0/go.mod h1:gEvqZTuCgEhPbYk/2lS3Kcxg1GmTxxU7kTC8DvP0i/A=
|
||||
github.com/miekg/dns v1.1.57 h1:Jzi7ApEIzwEPLHWRcafCN9LZSBbqQpxjt/wpgvg7wcM=
|
||||
github.com/miekg/dns v1.1.57/go.mod h1:uqRjCRUuEAA6qsOiJvDd+CFo/vW+y5WR6SNmHE55hZk=
|
||||
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
|
||||
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0=
|
||||
github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0=
|
||||
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
|
||||
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
|
||||
github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU=
|
||||
github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
|
||||
github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ=
|
||||
github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0=
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns=
|
||||
github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo=
|
||||
github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A=
|
||||
github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
|
||||
github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI=
|
||||
github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY=
|
||||
github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg=
|
||||
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
|
||||
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
|
||||
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 h1:EaDatTxkdHG+U3Bk4EUr+DZ7fOGwTfezUiUJMaIcaho=
|
||||
github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5/go.mod h1:fyalQWdtzDBECAQFBJuQe5bzQ02jGd5Qcbgb97Flm7U=
|
||||
github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 h1:EfpWLLCyXw8PSM2/XNJLjI3Pb27yVE+gIAfeqp8LUCc=
|
||||
github.com/redis/go-redis/extra/redisotel/v9 v9.0.5/go.mod h1:WZjPDy7VNzn77AAfnAfVjZNvfJTYfPetfZk5yoSTLaQ=
|
||||
github.com/redis/go-redis/v9 v9.7.3 h1:YpPyAayJV+XErNsatSElgRZZVCwXX9QzkKYNvO7x0wM=
|
||||
github.com/redis/go-redis/v9 v9.7.3/go.mod h1:bGUrSggJ9X9GUmZpZNEOQKaANxSGgOEBRltRTZHSvrA=
|
||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||
github.com/rubenv/sql-migrate v1.8.0 h1:dXnYiJk9k3wetp7GfQbKJcPHjVJL6YK19tKj8t2Ns0o=
|
||||
github.com/rubenv/sql-migrate v1.8.0/go.mod h1:F2bGFBwCU+pnmbtNYDeKvSuvL6lBVtXDXUUv5t+u1qw=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 h1:KRzFb2m7YtdldCEkzs6KqmJw4nqEVZGK7IN2kJkjTuQ=
|
||||
github.com/santhosh-tekuri/jsonschema/v6 v6.0.2/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU=
|
||||
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
|
||||
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||
github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k=
|
||||
github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w=
|
||||
github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
|
||||
github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
|
||||
github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
|
||||
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
|
||||
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs=
|
||||
github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY=
|
||||
github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
||||
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
|
||||
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
|
||||
github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4=
|
||||
github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
||||
github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY=
|
||||
github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ=
|
||||
github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 h1:UW0+QyeyBVhn+COBec3nGhfnFe5lwB0ic1JBVjzhk0w=
|
||||
go.opentelemetry.io/contrib/bridges/prometheus v0.57.0/go.mod h1:ppciCHRLsyCio54qbzQv0E4Jyth/fLWDTJYfvWpcSVk=
|
||||
go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 h1:jmTVJ86dP60C01K3slFQa2NQ/Aoi7zA+wy7vMOKD9H4=
|
||||
go.opentelemetry.io/contrib/exporters/autoexport v0.57.0/go.mod h1:EJBheUMttD/lABFyLXhce47Wr6DPWYReCzaZiXadH7g=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q=
|
||||
go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ=
|
||||
go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 h1:WzNab7hOOLzdDF/EoWCt4glhrbMPVMOO5JYTmpz36Ls=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0/go.mod h1:hKvJwTzJdp90Vh7p6q/9PAOd55dI6WA6sWj62a/JvSs=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 h1:S+LdBGiQXtJdowoJoQPEtI52syEP/JYBUpjO49EQhV8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0/go.mod h1:5KXybFvPGds3QinJWQT7pmXf+TN5YIa7CNYObWRkj50=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 h1:j7ZSD+5yn+lo3sGV69nW04rRR0jhYnBwjuX3r0HvnK0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0/go.mod h1:WXbYJTUaZXAbYd8lbgGuvih0yuCfOFC5RJoYnoLcGz8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 h1:t/Qur3vKSkUCcDVaSumWF2PKHt85pc7fRvFuoVT8qFU=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0/go.mod h1:Rl61tySSdcOJWoEgYZVtmnKdA0GeKrSqkHC1t+91CH8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glBlOBp5+WlYsOElzTNmiPW/x60=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 h1:tgJ0uaNS4c98WRNUEx5U3aDlrDOI5Rs+1Vifcw4DJ8U=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0/go.mod h1:U7HYyW0zt/a9x5J1Kjs+r1f/d4ZHnYFclhYY2+YbeoE=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 h1:cMyu9O88joYEaI47CnQkxO1XZdpoTF9fEnW2duIddhw=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0/go.mod h1:6Am3rn7P9TVVeXYG+wtcGE7IE1tsQ+bP3AuWcKt/gOI=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.54.0 h1:rFwzp68QMgtzu9PgP3jm9XaMICI6TsofWWPcBDKwlsU=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.54.0/go.mod h1:QyjcV9qDP6VeK5qPyKETvNjmaaEc7+gqjh4SS0ZYzDU=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0 h1:CHXNXwfKWfzS65yrlB2PVds1IBZcdsX8Vepy9of0iRU=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0/go.mod h1:zKU4zUgKiaRxrdovSS2amdM5gOc59slmo/zJwGX+YBg=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 h1:SZmDnHcgp3zwlPBS2JX2urGYe/jBKEIT6ZedHRUyCz8=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0/go.mod h1:fdWW0HtZJ7+jNpTKUR0GpMEDP69nR8YBJQxNiVCE3jk=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 h1:cC2yDI3IQd0Udsux7Qmq8ToKAx1XCilTQECZ0KDZyTw=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0/go.mod h1:2PD5Ex6z8CFzDbTdOlwyNIUywRr1DN0ospafJM1wJ+s=
|
||||
go.opentelemetry.io/otel/log v0.8.0 h1:egZ8vV5atrUWUbnSsHn6vB8R21G2wrKqNiDt3iWertk=
|
||||
go.opentelemetry.io/otel/log v0.8.0/go.mod h1:M9qvDdUTRCopJcGRKg57+JSQ9LgLBrwwfC32epk5NX8=
|
||||
go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M=
|
||||
go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE=
|
||||
go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A=
|
||||
go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU=
|
||||
go.opentelemetry.io/otel/sdk/log v0.8.0 h1:zg7GUYXqxk1jnGF/dTdLPrK06xJdrXgqgFLnI4Crxvs=
|
||||
go.opentelemetry.io/otel/sdk/log v0.8.0/go.mod h1:50iXr0UVwQrYS45KbruFrEt4LvAdCaWWgIrsN3ZQggo=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w=
|
||||
go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs=
|
||||
go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc=
|
||||
go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4=
|
||||
go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
||||
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
|
||||
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
|
||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
|
||||
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
|
||||
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
||||
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
|
||||
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
|
||||
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb h1:TLPQVbx1GJ8VKZxz52VAxl1EBgKXXbTiU9Fc5fZeLn4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I=
|
||||
google.golang.org/grpc v1.72.1 h1:HR03wO6eyZ7lknl75XlxABNVLLFc2PAb6mHlYh756mA=
|
||||
google.golang.org/grpc v1.72.1/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM=
|
||||
google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc=
|
||||
google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
helm.sh/helm/v3 v3.19.4 h1:E2yFBejmZBczWr5LblhjZbvAOAwVumfBO1AtN3nqI30=
|
||||
helm.sh/helm/v3 v3.19.4/go.mod h1:PC1rk7PqacpkV4acUFMLStOOis7QM9Jq3DveHBInu4s=
|
||||
k8s.io/api v0.35.0 h1:iBAU5LTyBI9vw3L5glmat1njFK34srdLmktWwLTprlY=
|
||||
k8s.io/api v0.35.0/go.mod h1:AQ0SNTzm4ZAczM03QH42c7l3bih1TbAXYo0DkF8ktnA=
|
||||
k8s.io/apiextensions-apiserver v0.34.2 h1:WStKftnGeoKP4AZRz/BaAAEJvYp4mlZGN0UCv+uvsqo=
|
||||
k8s.io/apiextensions-apiserver v0.34.2/go.mod h1:398CJrsgXF1wytdaanynDpJ67zG4Xq7yj91GrmYN2SE=
|
||||
k8s.io/apimachinery v0.35.0 h1:Z2L3IHvPVv/MJ7xRxHEtk6GoJElaAqDCCU0S6ncYok8=
|
||||
k8s.io/apimachinery v0.35.0/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns=
|
||||
k8s.io/apiserver v0.34.2 h1:2/yu8suwkmES7IzwlehAovo8dDE07cFRC7KMDb1+MAE=
|
||||
k8s.io/apiserver v0.34.2/go.mod h1:gqJQy2yDOB50R3JUReHSFr+cwJnL8G1dzTA0YLEqAPI=
|
||||
k8s.io/cli-runtime v0.35.0 h1:PEJtYS/Zr4p20PfZSLCbY6YvaoLrfByd6THQzPworUE=
|
||||
k8s.io/cli-runtime v0.35.0/go.mod h1:VBRvHzosVAoVdP3XwUQn1Oqkvaa8facnokNkD7jOTMY=
|
||||
k8s.io/client-go v0.35.0 h1:IAW0ifFbfQQwQmga0UdoH0yvdqrbwMdq9vIFEhRpxBE=
|
||||
k8s.io/client-go v0.35.0/go.mod h1:q2E5AAyqcbeLGPdoRB+Nxe3KYTfPce1Dnu1myQdqz9o=
|
||||
k8s.io/component-base v0.34.2 h1:HQRqK9x2sSAsd8+R4xxRirlTjowsg6fWCPwWYeSvogQ=
|
||||
k8s.io/component-base v0.34.2/go.mod h1:9xw2FHJavUHBFpiGkZoKuYZ5pdtLKe97DEByaA+hHbM=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE=
|
||||
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ=
|
||||
k8s.io/kubectl v0.34.2 h1:+fWGrVlDONMUmmQLDaGkQ9i91oszjjRAa94cr37hzqA=
|
||||
k8s.io/kubectl v0.34.2/go.mod h1:X2KTOdtZZNrTWmUD4oHApJ836pevSl+zvC5sI6oO2YQ=
|
||||
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck=
|
||||
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc=
|
||||
oras.land/oras-go/v2 v2.6.0/go.mod h1:magiQDfG6H1O9APp+rOsvCPcW1GD2MM7vgnKY0Y+u1o=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
|
||||
sigs.k8s.io/controller-runtime v0.22.4 h1:GEjV7KV3TY8e+tJ2LCTxUTanW4z/FmNB7l327UfMq9A=
|
||||
sigs.k8s.io/controller-runtime v0.22.4/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8=
|
||||
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg=
|
||||
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
|
||||
sigs.k8s.io/kustomize/api v0.20.1 h1:iWP1Ydh3/lmldBnH/S5RXgT98vWYMaTUL1ADcr+Sv7I=
|
||||
sigs.k8s.io/kustomize/api v0.20.1/go.mod h1:t6hUFxO+Ph0VxIk1sKp1WS0dOjbPCtLJ4p8aADLwqjM=
|
||||
sigs.k8s.io/kustomize/kyaml v0.20.1 h1:PCMnA2mrVbRP3NIB6v9kYCAc38uvFLVs8j/CD567A78=
|
||||
sigs.k8s.io/kustomize/kyaml v0.20.1/go.mod h1:0EmkQHRUsJxY8Ug9Niig1pUMSCGHxQ5RklbpV/Ri6po=
|
||||
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
|
||||
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
|
||||
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
|
||||
sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
|
||||
15
deploy/rig-operator/hack/boilerplate.go.txt
Normal file
15
deploy/rig-operator/hack/boilerplate.go.txt
Normal file
@@ -0,0 +1,15 @@
|
||||
/*
|
||||
Copyright 2026.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
134
deploy/rig-operator/internal/builder/master.go
Normal file
134
deploy/rig-operator/internal/builder/master.go
Normal file
@@ -0,0 +1,134 @@
|
||||
package builder
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
"vanderlande.com/ittp/appstack/rig-operator/api/v1alpha1"
|
||||
"vanderlande.com/ittp/appstack/rig-operator/internal/provider"
|
||||
)
|
||||
|
||||
// ChartConfig holds the helm settings extracted from the YAML _defaults
|
||||
// The Controller needs this to know WHICH chart to fetch.
|
||||
type ChartConfig struct {
|
||||
Repo string
|
||||
Name string
|
||||
Version string
|
||||
}
|
||||
|
||||
type MasterBuilder struct {
|
||||
strategy provider.Strategy
|
||||
baseTemplate []byte
|
||||
chartConfig ChartConfig
|
||||
}
|
||||
|
||||
func NewMasterBuilder(strategy provider.Strategy, baseTemplate []byte) *MasterBuilder {
|
||||
b := &MasterBuilder{
|
||||
strategy: strategy,
|
||||
baseTemplate: baseTemplate,
|
||||
// Safe defaults
|
||||
chartConfig: ChartConfig{
|
||||
Name: "oci://ghcr.io/rancherfederal/charts/rancher-cluster-templates",
|
||||
},
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// GetChartConfig returns the chart details found in the template.
|
||||
func (b *MasterBuilder) GetChartConfig() ChartConfig {
|
||||
return b.chartConfig
|
||||
}
|
||||
|
||||
// Build orchestrates the values generation process
|
||||
func (b *MasterBuilder) Build(ctx context.Context, cbp *v1alpha1.ClusterBlueprint, credentialSecret string) (map[string]interface{}, error) {
|
||||
values := make(map[string]interface{})
|
||||
if err := yaml.Unmarshal(b.baseTemplate, &values); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal base template: %w", err)
|
||||
}
|
||||
|
||||
// 1. Extract Chart Config from _defaults (Legacy Logic Ported)
|
||||
// We do this so the Controller knows what version to install.
|
||||
if defaults, ok := values["_defaults"].(map[string]interface{}); ok {
|
||||
if chartCfg, ok := defaults["helmChart"].(map[string]interface{}); ok {
|
||||
if v, ok := chartCfg["repo"].(string); ok {
|
||||
b.chartConfig.Repo = v
|
||||
}
|
||||
if v, ok := chartCfg["name"].(string); ok {
|
||||
b.chartConfig.Name = v
|
||||
}
|
||||
if v, ok := chartCfg["version"].(string); ok {
|
||||
b.chartConfig.Version = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 2. Generate Node Pools (Delegated to Strategy)
|
||||
// [DIFFERENCE]: We don't loop here. The Strategy knows how to map CBP -> Provider NodePools.
|
||||
nodePools, err := b.strategy.GenerateNodePools(ctx, cbp)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("strategy failed to generate node pools: %w", err)
|
||||
}
|
||||
|
||||
// 3. Get Global Overrides (Delegated to Strategy)
|
||||
// [DIFFERENCE]: We don't hardcode "cloud_provider_name" here. The Strategy returns it.
|
||||
overrides, err := b.strategy.GetGlobalOverrides(ctx, cbp, credentialSecret)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("strategy failed to get global overrides: %w", err)
|
||||
}
|
||||
|
||||
// 4. Inject Logic into the Helm Structure
|
||||
if clusterMap, ok := values["cluster"].(map[string]interface{}); ok {
|
||||
clusterMap["name"] = cbp.Name
|
||||
|
||||
if configMap, ok := clusterMap["config"].(map[string]interface{}); ok {
|
||||
configMap["kubernetesVersion"] = cbp.Spec.KubernetesVersion
|
||||
|
||||
// Ensure globalConfig exists
|
||||
if _, ok := configMap["globalConfig"]; !ok {
|
||||
configMap["globalConfig"] = make(map[string]interface{})
|
||||
}
|
||||
globalConfig := configMap["globalConfig"].(map[string]interface{})
|
||||
|
||||
// Inject Overrides
|
||||
for k, v := range overrides {
|
||||
// A. Handle specific Global Config keys
|
||||
if k == "cloud_provider_name" || k == "cloud_provider_config" {
|
||||
globalConfig[k] = v
|
||||
continue
|
||||
}
|
||||
|
||||
// B. Handle Chart Values (CCM/CSI Addons)
|
||||
if k == "chartValues" {
|
||||
if existingChartVals, ok := configMap["chartValues"].(map[string]interface{}); ok {
|
||||
if newChartVals, ok := v.(map[string]interface{}); ok {
|
||||
for ck, cv := range newChartVals {
|
||||
existingChartVals[ck] = cv
|
||||
}
|
||||
}
|
||||
} else {
|
||||
configMap["chartValues"] = v
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// C. Default: Inject at Root level
|
||||
values[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 5. Inject Node Pools
|
||||
// We marshal/unmarshal to ensure JSON tags from the Strategy structs are respected
|
||||
tempJSON, _ := json.Marshal(nodePools)
|
||||
var cleanNodePools interface{}
|
||||
_ = json.Unmarshal(tempJSON, &cleanNodePools)
|
||||
values["nodepools"] = cleanNodePools
|
||||
|
||||
// 6. Cleanup internal keys
|
||||
delete(values, "_defaults")
|
||||
|
||||
return values, nil
|
||||
}
|
||||
@@ -0,0 +1,291 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/tools/record"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
rigv1 "vanderlande.com/ittp/appstack/rig-operator/api/v1alpha1"
|
||||
"vanderlande.com/ittp/appstack/rig-operator/internal/builder"
|
||||
"vanderlande.com/ittp/appstack/rig-operator/internal/helm"
|
||||
"vanderlande.com/ittp/appstack/rig-operator/internal/provider"
|
||||
"vanderlande.com/ittp/appstack/rig-operator/internal/provider/harvester"
|
||||
harvesterTemplate "vanderlande.com/ittp/appstack/rig-operator/internal/templates/harvester"
|
||||
|
||||
"vanderlande.com/ittp/appstack/rig-operator/internal/provider/vsphere"
|
||||
vsphereTemplate "vanderlande.com/ittp/appstack/rig-operator/internal/templates/vsphere"
|
||||
)
|
||||
|
||||
const (
|
||||
rigFinalizer = "rig.appstack.io/finalizer"
|
||||
)
|
||||
|
||||
// ClusterBlueprintReconciler reconciles a ClusterBlueprint object
|
||||
type ClusterBlueprintReconciler struct {
|
||||
client.Client
|
||||
Scheme *runtime.Scheme
|
||||
Recorder record.EventRecorder
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=rig.appstack.io,resources=clusterblueprints,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=rig.appstack.io,resources=clusterblueprints/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=rig.appstack.io,resources=clusterblueprints/finalizers,verbs=update
|
||||
// +kubebuilder:rbac:groups=rig.appstack.io,resources=infrablueprints,verbs=get;list;watch
|
||||
// +kubebuilder:rbac:groups=rig.appstack.io,resources=harvesterblueprints,verbs=get;list;watch
|
||||
// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;patch;delete
|
||||
|
||||
func (r *ClusterBlueprintReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
// 1. Fetch ClusterBlueprint (CBP)
|
||||
cbp := &rigv1.ClusterBlueprint{}
|
||||
if err := r.Get(ctx, req.NamespacedName, cbp); err != nil {
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
// 2. Handle Deletion ... (Same as before)
|
||||
if !cbp.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
return r.handleDelete(ctx, cbp)
|
||||
}
|
||||
|
||||
// 3. Ensure Finalizer ... (Same as before)
|
||||
if !controllerutil.ContainsFinalizer(cbp, rigFinalizer) {
|
||||
controllerutil.AddFinalizer(cbp, rigFinalizer)
|
||||
if err := r.Update(ctx, cbp); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// 4. Fetch InfraBlueprint (IBP)
|
||||
ibp := &rigv1.InfraBlueprint{}
|
||||
if err := r.Get(ctx, types.NamespacedName{Name: cbp.Spec.InfraBlueprintRef, Namespace: cbp.Namespace}, ibp); err != nil {
|
||||
l.Error(err, "InfraBlueprint not found", "Infra", cbp.Spec.InfraBlueprintRef)
|
||||
r.updateStatus(ctx, cbp, "PendingInfra", false)
|
||||
return ctrl.Result{RequeueAfter: 1 * time.Minute}, nil
|
||||
}
|
||||
|
||||
// =====================================================================
|
||||
// 4.5. QUOTA CHECK (The Gatekeeper)
|
||||
// Only check quota if we are NOT already deployed.
|
||||
// (Existing clusters keep running even if quota shrinks later)
|
||||
// =====================================================================
|
||||
if cbp.Status.Phase != "Deployed" {
|
||||
if err := r.checkQuota(cbp, ibp); err != nil {
|
||||
l.Error(err, "Quota Exceeded")
|
||||
// We stop here! Helm Apply will NOT run.
|
||||
r.updateStatus(ctx, cbp, "QuotaExceeded", false)
|
||||
// Requeue slowly to check if resources freed up later
|
||||
return ctrl.Result{RequeueAfter: 5 * time.Minute}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// 5. Select Strategy based on Infra ProviderRef
|
||||
var selectedStrategy provider.Strategy
|
||||
var baseTemplate []byte
|
||||
var credentialSecret string
|
||||
|
||||
switch ibp.Spec.ProviderRef.Kind {
|
||||
case "HarvesterBlueprint":
|
||||
// A. Fetch the specific Harvester Config (HBP)
|
||||
hbp := &rigv1.HarvesterBlueprint{}
|
||||
hbpName := types.NamespacedName{Name: ibp.Spec.ProviderRef.Name, Namespace: cbp.Namespace}
|
||||
if err := r.Get(ctx, hbpName, hbp); err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to load HarvesterBlueprint: %w", err)
|
||||
}
|
||||
|
||||
// B. Ensure Identity (Mint ServiceAccount/Secret)
|
||||
idMgr := harvester.NewIdentityManager(r.Client, r.Scheme)
|
||||
secretName, err := idMgr.Ensure(ctx, cbp, ibp, hbp)
|
||||
if err != nil {
|
||||
l.Error(err, "Failed to ensure identity")
|
||||
r.updateStatus(ctx, cbp, "ProvisioningFailed", false)
|
||||
return ctrl.Result{RequeueAfter: 30 * time.Second}, nil
|
||||
}
|
||||
credentialSecret = secretName
|
||||
|
||||
// C. Load Defaults & Init Strategy
|
||||
defaults, err := harvesterTemplate.GetDefaults()
|
||||
if err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
baseTemplate = harvesterTemplate.GetBaseValues()
|
||||
// [UPDATED] Pass ibp.Spec.RancherURL to the factory
|
||||
selectedStrategy = harvester.NewStrategy(
|
||||
hbp,
|
||||
ibp.Spec.UserData,
|
||||
ibp.Spec.RancherURL, // <--- Passing the URL here
|
||||
defaults,
|
||||
)
|
||||
|
||||
case "VsphereBlueprint":
|
||||
// A. Fetch the specific vSphere Config (VBP)
|
||||
vbp := &rigv1.VsphereBlueprint{}
|
||||
vbpName := types.NamespacedName{Name: ibp.Spec.ProviderRef.Name, Namespace: cbp.Namespace}
|
||||
if err := r.Get(ctx, vbpName, vbp); err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to load VsphereBlueprint: %w", err)
|
||||
}
|
||||
|
||||
// B. Load Defaults (CPU/RAM sizing safety nets)
|
||||
defaults, err := vsphereTemplate.GetDefaults()
|
||||
if err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
baseTemplate = vsphereTemplate.GetBaseValues()
|
||||
|
||||
// C. Init Strategy
|
||||
// Note: vSphere typically uses the global 'cloudCredentialSecret' defined in InfraBlueprint
|
||||
// rather than minting dynamic tokens per cluster like Harvester does.
|
||||
credentialSecret = ibp.Spec.CloudCredentialSecret
|
||||
|
||||
selectedStrategy = vsphere.NewStrategy(
|
||||
vbp,
|
||||
ibp.Spec.UserData,
|
||||
ibp.Spec.RancherURL,
|
||||
defaults,
|
||||
)
|
||||
|
||||
default:
|
||||
return ctrl.Result{}, fmt.Errorf("unsupported provider kind: %s", ibp.Spec.ProviderRef.Kind)
|
||||
}
|
||||
|
||||
// 6. Build Helm Values (Generic Engine)
|
||||
masterBuilder := builder.NewMasterBuilder(selectedStrategy, baseTemplate)
|
||||
|
||||
values, err := masterBuilder.Build(ctx, cbp, credentialSecret)
|
||||
if err != nil {
|
||||
l.Error(err, "Failed to build helm values")
|
||||
r.updateStatus(ctx, cbp, "ConfigGenerationFailed", false)
|
||||
return ctrl.Result{}, nil // Fatal error, don't retry until config changes
|
||||
}
|
||||
|
||||
// 7. Apply Helm Chart
|
||||
// We use the ChartConfig extracted by the MasterBuilder (from the YAML defaults)
|
||||
chartCfg := masterBuilder.GetChartConfig()
|
||||
|
||||
helmConfig := helm.Config{
|
||||
Namespace: cbp.Namespace,
|
||||
ReleaseName: cbp.Name, // We use the Cluster name as the Release name
|
||||
RepoURL: chartCfg.Repo,
|
||||
ChartName: chartCfg.Name,
|
||||
Version: chartCfg.Version,
|
||||
Values: values,
|
||||
}
|
||||
|
||||
l.Info("Applying Helm Release", "Release", cbp.Name, "Chart", chartCfg.Name)
|
||||
if err := helm.Apply(helmConfig); err != nil {
|
||||
l.Error(err, "Helm Install/Upgrade failed")
|
||||
r.updateStatus(ctx, cbp, "HelmApplyFailed", false)
|
||||
return ctrl.Result{RequeueAfter: 1 * time.Minute}, nil
|
||||
}
|
||||
|
||||
// 8. Success!
|
||||
r.updateStatus(ctx, cbp, "Deployed", true)
|
||||
return ctrl.Result{RequeueAfter: 10 * time.Minute}, nil // Re-sync periodically
|
||||
}
|
||||
|
||||
func (r *ClusterBlueprintReconciler) handleDelete(ctx context.Context, cbp *rigv1.ClusterBlueprint) (ctrl.Result, error) {
|
||||
if controllerutil.ContainsFinalizer(cbp, rigFinalizer) {
|
||||
// 1. Uninstall Helm Release
|
||||
helmCfg := helm.Config{
|
||||
Namespace: cbp.Namespace,
|
||||
ReleaseName: cbp.Name,
|
||||
}
|
||||
// Best effort uninstall
|
||||
if err := helm.Uninstall(helmCfg); err != nil {
|
||||
log.FromContext(ctx).Error(err, "Failed to uninstall helm release during cleanup")
|
||||
}
|
||||
|
||||
// 2. Cleanup Identity (Harvester SA)
|
||||
// We need to look up IBP -> HBP again to know WHERE to clean up
|
||||
// This is a simplified lookup; in production we might need to handle missing IBP gracefully
|
||||
ibp := &rigv1.InfraBlueprint{}
|
||||
if err := r.Get(ctx, types.NamespacedName{Name: cbp.Spec.InfraBlueprintRef, Namespace: cbp.Namespace}, ibp); err == nil {
|
||||
if ibp.Spec.ProviderRef.Kind == "HarvesterBlueprint" {
|
||||
hbp := &rigv1.HarvesterBlueprint{}
|
||||
if err := r.Get(ctx, types.NamespacedName{Name: ibp.Spec.ProviderRef.Name, Namespace: cbp.Namespace}, hbp); err == nil {
|
||||
idMgr := harvester.NewIdentityManager(r.Client, r.Scheme)
|
||||
idMgr.Cleanup(ctx, cbp, ibp, hbp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 3. Remove Finalizer
|
||||
controllerutil.RemoveFinalizer(cbp, rigFinalizer)
|
||||
if err := r.Update(ctx, cbp); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *ClusterBlueprintReconciler) updateStatus(ctx context.Context, cbp *rigv1.ClusterBlueprint, phase string, ready bool) {
|
||||
cbp.Status.Phase = phase
|
||||
cbp.Status.Ready = ready
|
||||
if err := r.Status().Update(ctx, cbp); err != nil {
|
||||
log.FromContext(ctx).Error(err, "Failed to update status")
|
||||
}
|
||||
}
|
||||
|
||||
// SetupWithManager sets up the controller with the Manager.
|
||||
func (r *ClusterBlueprintReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&rigv1.ClusterBlueprint{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
// Helper function to calculate required resources vs available
|
||||
func (r *ClusterBlueprintReconciler) checkQuota(cbp *rigv1.ClusterBlueprint, ibp *rigv1.InfraBlueprint) error {
|
||||
// 1. Calculate what this cluster needs
|
||||
var reqCpu, reqMem, reqDisk int
|
||||
|
||||
// Control Plane Sizing (Using safe defaults or template logic)
|
||||
// Ideally, this should match the defaults in your template/strategy
|
||||
cpCount := 1
|
||||
if cbp.Spec.ControlPlaneHA {
|
||||
cpCount = 3
|
||||
}
|
||||
reqCpu += cpCount * 4
|
||||
reqMem += cpCount * 8
|
||||
reqDisk += cpCount * 40
|
||||
|
||||
// Worker Pools Sizing
|
||||
for _, pool := range cbp.Spec.WorkerPools {
|
||||
reqCpu += pool.Quantity * pool.CpuCores
|
||||
reqMem += pool.Quantity * pool.MemoryGB
|
||||
reqDisk += pool.Quantity * pool.DiskGB
|
||||
}
|
||||
|
||||
// 2. Check against Limits
|
||||
// Note: We use the Status.Usage which is calculated by the InfraController.
|
||||
// This includes "other" clusters, but might include "this" cluster if it was already counted.
|
||||
// For strict "Admission Control", usually we check:
|
||||
// (CurrentUsage + Request) > MaxLimit
|
||||
|
||||
// However, since InfraController runs asynchronously, 'Status.Usage' might NOT yet include this new cluster.
|
||||
// So (Usage + Request) > Max is the safest check for a new provisioning.
|
||||
|
||||
q := ibp.Spec.Quota
|
||||
u := ibp.Status.Usage
|
||||
|
||||
if q.MaxCPU > 0 && (u.UsedCPU+reqCpu) > q.MaxCPU {
|
||||
return fmt.Errorf("requested CPU %d exceeds remaining quota (Max: %d, Used: %d)", reqCpu, q.MaxCPU, u.UsedCPU)
|
||||
}
|
||||
|
||||
if q.MaxMemoryGB > 0 && (u.UsedMemoryGB+reqMem) > q.MaxMemoryGB {
|
||||
return fmt.Errorf("requested Mem %dGB exceeds remaining quota (Max: %d, Used: %d)", reqMem, q.MaxMemoryGB, u.UsedMemoryGB)
|
||||
}
|
||||
|
||||
if q.MaxDiskGB > 0 && (u.UsedDiskGB+reqDisk) > q.MaxDiskGB {
|
||||
return fmt.Errorf("requested Disk %dGB exceeds remaining quota (Max: %d, Used: %d)", reqDisk, q.MaxDiskGB, u.UsedDiskGB)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
/*
|
||||
Copyright 2026.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
rigv1alpha1 "vanderlande.com/ittp/appstack/rig-operator/api/v1alpha1"
|
||||
)
|
||||
|
||||
var _ = Describe("ClusterBlueprint Controller", func() {
|
||||
Context("When reconciling a resource", func() {
|
||||
const resourceName = "test-resource"
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
typeNamespacedName := types.NamespacedName{
|
||||
Name: resourceName,
|
||||
Namespace: "default", // TODO(user):Modify as needed
|
||||
}
|
||||
clusterblueprint := &rigv1alpha1.ClusterBlueprint{}
|
||||
|
||||
BeforeEach(func() {
|
||||
By("creating the custom resource for the Kind ClusterBlueprint")
|
||||
err := k8sClient.Get(ctx, typeNamespacedName, clusterblueprint)
|
||||
if err != nil && errors.IsNotFound(err) {
|
||||
resource := &rigv1alpha1.ClusterBlueprint{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: resourceName,
|
||||
Namespace: "default",
|
||||
},
|
||||
// TODO(user): Specify other spec details if needed.
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, resource)).To(Succeed())
|
||||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
// TODO(user): Cleanup logic after each test, like removing the resource instance.
|
||||
resource := &rigv1alpha1.ClusterBlueprint{}
|
||||
err := k8sClient.Get(ctx, typeNamespacedName, resource)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Cleanup the specific resource instance ClusterBlueprint")
|
||||
Expect(k8sClient.Delete(ctx, resource)).To(Succeed())
|
||||
})
|
||||
It("should successfully reconcile the resource", func() {
|
||||
By("Reconciling the created resource")
|
||||
controllerReconciler := &ClusterBlueprintReconciler{
|
||||
Client: k8sClient,
|
||||
Scheme: k8sClient.Scheme(),
|
||||
}
|
||||
|
||||
_, err := controllerReconciler.Reconcile(ctx, reconcile.Request{
|
||||
NamespacedName: typeNamespacedName,
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// TODO(user): Add more specific assertions depending on your controller's reconciliation logic.
|
||||
// Example: If you expect a certain status condition after reconciliation, verify it here.
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -0,0 +1,128 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
rigv1 "vanderlande.com/ittp/appstack/rig-operator/api/v1alpha1"
|
||||
)
|
||||
|
||||
// InfraBlueprintReconciler reconciles a InfraBlueprint object
|
||||
type InfraBlueprintReconciler struct {
|
||||
client.Client
|
||||
Scheme *runtime.Scheme
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=rig.appstack.io,resources=infrablueprints,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=rig.appstack.io,resources=infrablueprints/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=rig.appstack.io,resources=clusterblueprints,verbs=get;list;watch
|
||||
|
||||
func (r *InfraBlueprintReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
// 1. Fetch the InfraBlueprint
|
||||
infra := &rigv1.InfraBlueprint{}
|
||||
if err := r.Get(ctx, req.NamespacedName, infra); err != nil {
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
// 2. List ALL ClusterBlueprints in the same namespace
|
||||
// (We assume Infra and Clusters live in the same namespace for security/tenancy)
|
||||
var clusterList rigv1.ClusterBlueprintList
|
||||
if err := r.List(ctx, &clusterList, client.InNamespace(req.Namespace)); err != nil {
|
||||
l.Error(err, "Failed to list clusters for quota calculation")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// 3. Calculate Usage (The Accountant Logic)
|
||||
var usedCpu, usedMem, usedDisk int
|
||||
|
||||
for _, cluster := range clusterList.Items {
|
||||
// Only count clusters that belong to THIS Infra
|
||||
if cluster.Spec.InfraBlueprintRef != infra.Name {
|
||||
continue
|
||||
}
|
||||
|
||||
// Sum Control Plane
|
||||
if cluster.Spec.ControlPlaneHA {
|
||||
// Hardcoded fallback or we could duplicate the defaults logic here.
|
||||
// Ideally, we'd read the templates, but for accounting, safe estimates are usually okay.
|
||||
// Or better: The Cluster status could report its own "ResourcesConsumed".
|
||||
// For now, we use the standard defaults we know:
|
||||
usedCpu += 3 * 4 // 3 nodes * 4 cores
|
||||
usedMem += 3 * 8 // 3 nodes * 8 GB
|
||||
usedDisk += 3 * 40 // 3 nodes * 40 GB
|
||||
} else {
|
||||
usedCpu += 1 * 4
|
||||
usedMem += 1 * 8
|
||||
usedDisk += 1 * 40
|
||||
}
|
||||
|
||||
// Sum Worker Pools
|
||||
for _, pool := range cluster.Spec.WorkerPools {
|
||||
usedCpu += pool.Quantity * pool.CpuCores
|
||||
usedMem += pool.Quantity * pool.MemoryGB
|
||||
usedDisk += pool.Quantity * pool.DiskGB
|
||||
}
|
||||
}
|
||||
|
||||
// 4. Update Status if changed
|
||||
if infra.Status.Usage.UsedCPU != usedCpu ||
|
||||
infra.Status.Usage.UsedMemoryGB != usedMem ||
|
||||
infra.Status.Usage.UsedDiskGB != usedDisk {
|
||||
|
||||
infra.Status.Usage.UsedCPU = usedCpu
|
||||
infra.Status.Usage.UsedMemoryGB = usedMem
|
||||
infra.Status.Usage.UsedDiskGB = usedDisk
|
||||
|
||||
l.Info("Updating Infra Quota Usage", "Infra", infra.Name, "CPU", usedCpu, "Mem", usedMem)
|
||||
if err := r.Status().Update(ctx, infra); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// 5. Verify Connectivity (Optional)
|
||||
// We could check if the ProviderRef exists here and set Ready=true
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// SetupWithManager sets up the controller with the Manager.
|
||||
func (r *InfraBlueprintReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&rigv1.InfraBlueprint{}).
|
||||
// Watch ClusterBlueprints too!
|
||||
// If a Cluster is added/modified, we need to Reconcile the Infra it points to.
|
||||
Watches(
|
||||
&rigv1.ClusterBlueprint{},
|
||||
handler.EnqueueRequestsFromMapFunc(r.findInfraForCluster),
|
||||
).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
// findInfraForCluster maps a Cluster change event to a Reconcile request for its parent Infra
|
||||
func (r *InfraBlueprintReconciler) findInfraForCluster(ctx context.Context, obj client.Object) []reconcile.Request {
|
||||
cluster, ok := obj.(*rigv1.ClusterBlueprint)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
if cluster.Spec.InfraBlueprintRef != "" {
|
||||
return []reconcile.Request{
|
||||
{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Name: cluster.Spec.InfraBlueprintRef,
|
||||
Namespace: cluster.Namespace,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
/*
|
||||
Copyright 2026.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
rigv1alpha1 "vanderlande.com/ittp/appstack/rig-operator/api/v1alpha1"
|
||||
)
|
||||
|
||||
var _ = Describe("InfraBlueprint Controller", func() {
|
||||
Context("When reconciling a resource", func() {
|
||||
const resourceName = "test-resource"
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
typeNamespacedName := types.NamespacedName{
|
||||
Name: resourceName,
|
||||
Namespace: "default", // TODO(user):Modify as needed
|
||||
}
|
||||
infrablueprint := &rigv1alpha1.InfraBlueprint{}
|
||||
|
||||
BeforeEach(func() {
|
||||
By("creating the custom resource for the Kind InfraBlueprint")
|
||||
err := k8sClient.Get(ctx, typeNamespacedName, infrablueprint)
|
||||
if err != nil && errors.IsNotFound(err) {
|
||||
resource := &rigv1alpha1.InfraBlueprint{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: resourceName,
|
||||
Namespace: "default",
|
||||
},
|
||||
// TODO(user): Specify other spec details if needed.
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, resource)).To(Succeed())
|
||||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
// TODO(user): Cleanup logic after each test, like removing the resource instance.
|
||||
resource := &rigv1alpha1.InfraBlueprint{}
|
||||
err := k8sClient.Get(ctx, typeNamespacedName, resource)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Cleanup the specific resource instance InfraBlueprint")
|
||||
Expect(k8sClient.Delete(ctx, resource)).To(Succeed())
|
||||
})
|
||||
It("should successfully reconcile the resource", func() {
|
||||
By("Reconciling the created resource")
|
||||
controllerReconciler := &InfraBlueprintReconciler{
|
||||
Client: k8sClient,
|
||||
Scheme: k8sClient.Scheme(),
|
||||
}
|
||||
|
||||
_, err := controllerReconciler.Reconcile(ctx, reconcile.Request{
|
||||
NamespacedName: typeNamespacedName,
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// TODO(user): Add more specific assertions depending on your controller's reconciliation logic.
|
||||
// Example: If you expect a certain status condition after reconciliation, verify it here.
|
||||
})
|
||||
})
|
||||
})
|
||||
116
deploy/rig-operator/internal/controller/suite_test.go
Normal file
116
deploy/rig-operator/internal/controller/suite_test.go
Normal file
@@ -0,0 +1,116 @@
|
||||
/*
|
||||
Copyright 2026.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
|
||||
rigv1alpha1 "vanderlande.com/ittp/appstack/rig-operator/api/v1alpha1"
|
||||
// +kubebuilder:scaffold:imports
|
||||
)
|
||||
|
||||
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
|
||||
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
|
||||
|
||||
var (
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
testEnv *envtest.Environment
|
||||
cfg *rest.Config
|
||||
k8sClient client.Client
|
||||
)
|
||||
|
||||
func TestControllers(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
|
||||
RunSpecs(t, "Controller Suite")
|
||||
}
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
|
||||
|
||||
ctx, cancel = context.WithCancel(context.TODO())
|
||||
|
||||
var err error
|
||||
err = rigv1alpha1.AddToScheme(scheme.Scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// +kubebuilder:scaffold:scheme
|
||||
|
||||
By("bootstrapping test environment")
|
||||
testEnv = &envtest.Environment{
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")},
|
||||
ErrorIfCRDPathMissing: true,
|
||||
}
|
||||
|
||||
// Retrieve the first found binary directory to allow running tests from IDEs
|
||||
if getFirstFoundEnvTestBinaryDir() != "" {
|
||||
testEnv.BinaryAssetsDirectory = getFirstFoundEnvTestBinaryDir()
|
||||
}
|
||||
|
||||
// cfg is defined in this file globally.
|
||||
cfg, err = testEnv.Start()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(cfg).NotTo(BeNil())
|
||||
|
||||
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(k8sClient).NotTo(BeNil())
|
||||
})
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
By("tearing down the test environment")
|
||||
cancel()
|
||||
err := testEnv.Stop()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
// getFirstFoundEnvTestBinaryDir locates the first binary in the specified path.
|
||||
// ENVTEST-based tests depend on specific binaries, usually located in paths set by
|
||||
// controller-runtime. When running tests directly (e.g., via an IDE) without using
|
||||
// Makefile targets, the 'BinaryAssetsDirectory' must be explicitly configured.
|
||||
//
|
||||
// This function streamlines the process by finding the required binaries, similar to
|
||||
// setting the 'KUBEBUILDER_ASSETS' environment variable. To ensure the binaries are
|
||||
// properly set up, run 'make setup-envtest' beforehand.
|
||||
func getFirstFoundEnvTestBinaryDir() string {
|
||||
basePath := filepath.Join("..", "..", "bin", "k8s")
|
||||
entries, err := os.ReadDir(basePath)
|
||||
if err != nil {
|
||||
logf.Log.Error(err, "Failed to read directory", "path", basePath)
|
||||
return ""
|
||||
}
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
return filepath.Join(basePath, entry.Name())
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
126
deploy/rig-operator/internal/helm/client.go
Normal file
126
deploy/rig-operator/internal/helm/client.go
Normal file
@@ -0,0 +1,126 @@
|
||||
package helm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"helm.sh/helm/v3/pkg/action"
|
||||
"helm.sh/helm/v3/pkg/chart/loader"
|
||||
"helm.sh/helm/v3/pkg/cli"
|
||||
"helm.sh/helm/v3/pkg/registry" // [NEW] Required for OCI
|
||||
"helm.sh/helm/v3/pkg/storage/driver"
|
||||
"k8s.io/cli-runtime/pkg/genericclioptions"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
Namespace string
|
||||
ReleaseName string
|
||||
RepoURL string
|
||||
ChartName string
|
||||
Version string
|
||||
Values map[string]interface{}
|
||||
}
|
||||
|
||||
func Apply(cfg Config) error {
|
||||
settings := cli.New()
|
||||
|
||||
// 1. Initialize Action Config
|
||||
actionConfig := new(action.Configuration)
|
||||
getter := genericclioptions.NewConfigFlags(false)
|
||||
|
||||
if err := actionConfig.Init(getter, cfg.Namespace, os.Getenv("HELM_DRIVER"), log.Printf); err != nil {
|
||||
return fmt.Errorf("failed to init helm config: %w", err)
|
||||
}
|
||||
|
||||
// 2. [NEW] Initialize OCI Registry Client
|
||||
// This tells Helm how to talk to ghcr.io, docker.io, etc.
|
||||
registryClient, err := registry.NewClient(
|
||||
registry.ClientOptDebug(true),
|
||||
registry.ClientOptEnableCache(true),
|
||||
registry.ClientOptCredentialsFile(settings.RegistryConfig), // Uses ~/.config/helm/registry/config.json
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to init registry client: %w", err)
|
||||
}
|
||||
actionConfig.RegistryClient = registryClient
|
||||
|
||||
// 3. Setup Install Action
|
||||
client := action.NewInstall(actionConfig)
|
||||
client.Version = cfg.Version
|
||||
client.Namespace = cfg.Namespace
|
||||
client.ReleaseName = cfg.ReleaseName
|
||||
client.CreateNamespace = true
|
||||
|
||||
if cfg.RepoURL != "" {
|
||||
client.RepoURL = cfg.RepoURL
|
||||
}
|
||||
|
||||
// 4. Locate Chart (Now supports oci:// because RegistryClient is set)
|
||||
cp, err := client.ChartPathOptions.LocateChart(cfg.ChartName, settings)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to locate chart %s: %w", cfg.ChartName, err)
|
||||
}
|
||||
|
||||
chart, err := loader.Load(cp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load chart: %w", err)
|
||||
}
|
||||
|
||||
// 5. Install or Upgrade
|
||||
histClient := action.NewHistory(actionConfig)
|
||||
histClient.Max = 1
|
||||
|
||||
if _, err := histClient.Run(cfg.ReleaseName); err == driver.ErrReleaseNotFound {
|
||||
fmt.Printf("Installing OCI Release %s...\n", cfg.ReleaseName)
|
||||
_, err := client.Run(chart, cfg.Values)
|
||||
return err
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("Upgrading OCI Release %s...\n", cfg.ReleaseName)
|
||||
upgrade := action.NewUpgrade(actionConfig)
|
||||
upgrade.Version = cfg.Version
|
||||
upgrade.Namespace = cfg.Namespace
|
||||
// Important: Upgrade also needs the RegistryClient, but it shares 'actionConfig'
|
||||
// so it is already set up.
|
||||
if cfg.RepoURL != "" {
|
||||
upgrade.RepoURL = cfg.RepoURL
|
||||
}
|
||||
_, err = upgrade.Run(cfg.ReleaseName, chart, cfg.Values)
|
||||
return err
|
||||
}
|
||||
|
||||
func Uninstall(cfg Config) error {
|
||||
settings := cli.New()
|
||||
|
||||
// 1. Initialize Action Config (Same as Apply)
|
||||
actionConfig := new(action.Configuration)
|
||||
getter := genericclioptions.NewConfigFlags(false)
|
||||
if err := actionConfig.Init(getter, cfg.Namespace, os.Getenv("HELM_DRIVER"), log.Printf); err != nil {
|
||||
return fmt.Errorf("failed to init helm config: %w", err)
|
||||
}
|
||||
|
||||
// 2. Initialize OCI Registry Client (Crucial for OCI charts)
|
||||
registryClient, err := registry.NewClient(
|
||||
registry.ClientOptDebug(true),
|
||||
registry.ClientOptEnableCache(true),
|
||||
registry.ClientOptCredentialsFile(settings.RegistryConfig),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to init registry client: %w", err)
|
||||
}
|
||||
actionConfig.RegistryClient = registryClient
|
||||
|
||||
// 3. Run Uninstall
|
||||
client := action.NewUninstall(actionConfig)
|
||||
// Don't fail if it's already gone
|
||||
_, err = client.Run(cfg.ReleaseName)
|
||||
if err != nil && err != driver.ErrReleaseNotFound {
|
||||
return fmt.Errorf("failed to uninstall release: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("✅ Uninstalled Release %s\n", cfg.ReleaseName)
|
||||
return nil
|
||||
}
|
||||
176
deploy/rig-operator/internal/provider/harvester/credential.go
Normal file
176
deploy/rig-operator/internal/provider/harvester/credential.go
Normal file
@@ -0,0 +1,176 @@
|
||||
package harvester
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
authenticationv1 "k8s.io/api/authentication/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
// DeleteCredentialResources connects to Harvester and removes the specific SA and bindings
|
||||
func DeleteCredentialResources(ctx context.Context, masterKubeconfig []byte, serviceAccountName, vmNamespace string) error {
|
||||
restConfig, err := clientcmd.RESTConfigFromKubeConfig(masterKubeconfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hvClient, err := kubernetes.NewForConfig(restConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
deletePolicy := metav1.DeletePropagationBackground
|
||||
deleteOpts := metav1.DeleteOptions{PropagationPolicy: &deletePolicy}
|
||||
|
||||
// 1. Delete Global CSI Binding (ClusterRoleBinding)
|
||||
csiBindingName := fmt.Sprintf("%s-csi-binding", serviceAccountName)
|
||||
// We ignore NotFound errors to make this idempotent
|
||||
if err := hvClient.RbacV1().ClusterRoleBindings().Delete(ctx, csiBindingName, deleteOpts); err != nil && !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
// 2. Delete Cloud Provider Binding (RoleBinding in VM Namespace)
|
||||
cpBindingName := fmt.Sprintf("%s-cloud-binding", serviceAccountName)
|
||||
if err := hvClient.RbacV1().RoleBindings(vmNamespace).Delete(ctx, cpBindingName, deleteOpts); err != nil && !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
// 3. Delete ServiceAccount (VM Namespace)
|
||||
if err := hvClient.CoreV1().ServiceAccounts(vmNamespace).Delete(ctx, serviceAccountName, deleteOpts); err != nil && !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnsureCredential mints a dedicated ServiceAccount in the specific VM Namespace
|
||||
func EnsureCredential(ctx context.Context, masterKubeconfig []byte, clusterName, targetNamespace, vmNamespace, harvesterURL string) (*corev1.Secret, string, time.Time, error) {
|
||||
|
||||
// --- PHASE 1: Connect ---
|
||||
restConfig, err := clientcmd.RESTConfigFromKubeConfig(masterKubeconfig)
|
||||
if err != nil {
|
||||
return nil, "", time.Time{}, fmt.Errorf("invalid rancher cloud credential kubeconfig: %w", err)
|
||||
}
|
||||
hvClient, err := kubernetes.NewForConfig(restConfig)
|
||||
if err != nil {
|
||||
return nil, "", time.Time{}, err
|
||||
}
|
||||
|
||||
// --- PHASE 2: Create Identity ---
|
||||
if vmNamespace == "" {
|
||||
vmNamespace = "default"
|
||||
}
|
||||
saName := fmt.Sprintf("prov-%s", clusterName)
|
||||
|
||||
// A. Create ServiceAccount
|
||||
sa := &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: saName, Namespace: vmNamespace}}
|
||||
if _, err := hvClient.CoreV1().ServiceAccounts(vmNamespace).Create(ctx, sa, metav1.CreateOptions{}); err != nil {
|
||||
if !apierrors.IsAlreadyExists(err) {
|
||||
return nil, "", time.Time{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// B. Create RoleBinding (Cloud Provider)
|
||||
rb := &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saName + "-cloud-binding", Namespace: vmNamespace},
|
||||
Subjects: []rbacv1.Subject{{Kind: "ServiceAccount", Name: saName, Namespace: vmNamespace}},
|
||||
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "harvesterhci.io:cloudprovider", APIGroup: "rbac.authorization.k8s.io"},
|
||||
}
|
||||
if _, err := hvClient.RbacV1().RoleBindings(vmNamespace).Create(ctx, rb, metav1.CreateOptions{}); err != nil {
|
||||
if !apierrors.IsAlreadyExists(err) { /* Ignore */
|
||||
}
|
||||
}
|
||||
|
||||
// C. Create ClusterRoleBinding (CSI Driver)
|
||||
crb := &rbacv1.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saName + "-csi-binding"},
|
||||
Subjects: []rbacv1.Subject{{Kind: "ServiceAccount", Name: saName, Namespace: vmNamespace}},
|
||||
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "harvesterhci.io:csi-driver", APIGroup: "rbac.authorization.k8s.io"},
|
||||
}
|
||||
if _, err := hvClient.RbacV1().ClusterRoleBindings().Create(ctx, crb, metav1.CreateOptions{}); err != nil {
|
||||
if !apierrors.IsAlreadyExists(err) { /* Ignore */
|
||||
}
|
||||
}
|
||||
|
||||
// D. Mint Token
|
||||
ttlSeconds := int64(315360000) // ~10 years
|
||||
tokenRequest, err := hvClient.CoreV1().ServiceAccounts(vmNamespace).CreateToken(ctx, saName, &authenticationv1.TokenRequest{
|
||||
Spec: authenticationv1.TokenRequestSpec{ExpirationSeconds: &ttlSeconds},
|
||||
}, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return nil, "", time.Time{}, fmt.Errorf("failed to mint harvester token: %w", err)
|
||||
}
|
||||
expiryTime := time.Now().Add(time.Duration(ttlSeconds) * time.Second)
|
||||
|
||||
// --- PHASE 3: Determine URL & CA ---
|
||||
if harvesterURL == "" {
|
||||
harvesterURL = restConfig.Host
|
||||
}
|
||||
|
||||
// Fetch internal CA (required because proxy CA != internal CA)
|
||||
harvesterCA := restConfig.CAData
|
||||
caConfigMap, err := hvClient.CoreV1().ConfigMaps("default").Get(ctx, "kube-root-ca.crt", metav1.GetOptions{})
|
||||
if err == nil {
|
||||
if caStr, ok := caConfigMap.Data["ca.crt"]; ok {
|
||||
harvesterCA = []byte(caStr)
|
||||
}
|
||||
}
|
||||
|
||||
// --- PHASE 4: Construct Kubeconfig ---
|
||||
caData := base64.StdEncoding.EncodeToString(harvesterCA)
|
||||
token := tokenRequest.Status.Token
|
||||
|
||||
newKubeconfig := fmt.Sprintf(
|
||||
`apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- name: harvester
|
||||
cluster:
|
||||
server: %s
|
||||
certificate-authority-data: %s
|
||||
users:
|
||||
- name: provisioner
|
||||
user:
|
||||
token: %s
|
||||
contexts:
|
||||
- name: default
|
||||
context:
|
||||
cluster: harvester
|
||||
user: provisioner
|
||||
namespace: %s
|
||||
current-context: default
|
||||
`, harvesterURL, caData, token, vmNamespace)
|
||||
|
||||
// --- PHASE 5: Create Secret Object ---
|
||||
secretName := fmt.Sprintf("harvesterconfig-%s", clusterName)
|
||||
|
||||
secret := &corev1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{Kind: "Secret", APIVersion: "v1"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: secretName,
|
||||
Namespace: targetNamespace,
|
||||
Annotations: map[string]string{
|
||||
// [CRITICAL] These annotations authorize the guest cluster to use this secret
|
||||
"v2prov-secret-authorized-for-cluster": clusterName,
|
||||
"v2prov-authorized-secret-deletes-on-cluster-removal": "true",
|
||||
},
|
||||
Labels: map[string]string{
|
||||
"cattle.io/creator": "rig-operator", // Updated creator
|
||||
"rig.appstack.io/cluster": clusterName,
|
||||
},
|
||||
},
|
||||
Type: "Opaque",
|
||||
StringData: map[string]string{
|
||||
"credential": newKubeconfig,
|
||||
},
|
||||
}
|
||||
|
||||
return secret, saName, expiryTime, nil
|
||||
}
|
||||
126
deploy/rig-operator/internal/provider/harvester/manager.go
Normal file
126
deploy/rig-operator/internal/provider/harvester/manager.go
Normal file
@@ -0,0 +1,126 @@
|
||||
package harvester
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
"vanderlande.com/ittp/appstack/rig-operator/api/v1alpha1"
|
||||
)
|
||||
|
||||
type IdentityManager struct {
|
||||
client client.Client
|
||||
scheme *runtime.Scheme
|
||||
}
|
||||
|
||||
func NewIdentityManager(c client.Client, s *runtime.Scheme) *IdentityManager {
|
||||
return &IdentityManager{client: c, scheme: s}
|
||||
}
|
||||
|
||||
// Ensure checks if an identity exists. If not, it fetches master creds, mints a new one, and updates Status.
|
||||
func (m *IdentityManager) Ensure(ctx context.Context, cbp *v1alpha1.ClusterBlueprint, ibp *v1alpha1.InfraBlueprint, hbp *v1alpha1.HarvesterBlueprint) (string, error) {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
// 1. Fast Path: If identity already exists in Status, return it
|
||||
if cbp.Status.Identity != nil && cbp.Status.Identity.SecretRef != "" {
|
||||
return cbp.Status.Identity.SecretRef, nil
|
||||
}
|
||||
|
||||
l.Info("Minting Harvester identity", "Cluster", cbp.Name)
|
||||
|
||||
// 2. Fetch Master Credential (from Infra)
|
||||
rancherCredName := ibp.Spec.CloudCredentialSecret
|
||||
if rancherCredName == "" {
|
||||
return "", fmt.Errorf("CloudCredentialSecret is missing in InfraBlueprint %s", ibp.Name)
|
||||
}
|
||||
|
||||
var rancherSecret corev1.Secret
|
||||
// Note: Rancher secrets are expected in cattle-global-data
|
||||
if err := m.client.Get(ctx, types.NamespacedName{Name: rancherCredName, Namespace: "cattle-global-data"}, &rancherSecret); err != nil {
|
||||
return "", fmt.Errorf("failed to fetch rancher credential %s: %w", rancherCredName, err)
|
||||
}
|
||||
|
||||
// 3. Extract Kubeconfig
|
||||
const kubeconfigKey = "harvestercredentialConfig-kubeconfigContent"
|
||||
adminKubeconfigBytes := rancherSecret.Data[kubeconfigKey]
|
||||
if len(adminKubeconfigBytes) == 0 {
|
||||
if len(rancherSecret.Data["credential"]) > 0 {
|
||||
adminKubeconfigBytes = rancherSecret.Data["credential"]
|
||||
} else {
|
||||
return "", fmt.Errorf("secret %s missing kubeconfig data", rancherCredName)
|
||||
}
|
||||
}
|
||||
|
||||
// 4. Call Factory (low-level)
|
||||
newSecret, saName, _, err := EnsureCredential(
|
||||
ctx,
|
||||
adminKubeconfigBytes,
|
||||
cbp.Name,
|
||||
cbp.Namespace, // Target Namespace (where secret goes)
|
||||
hbp.Spec.VmNamespace, // Harvester Namespace (where VM goes)
|
||||
hbp.Spec.HarvesterURL, // Explicit URL from HBP
|
||||
)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to mint harvester credential: %w", err)
|
||||
}
|
||||
|
||||
// 5. Persist Secret
|
||||
// Set OwnerRef so if CBP is deleted, Secret is deleted automatically
|
||||
if err := controllerutil.SetControllerReference(cbp, newSecret, m.scheme); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
patchOpts := []client.PatchOption{client.ForceOwnership, client.FieldOwner("rig-operator")}
|
||||
if err := m.client.Patch(ctx, newSecret, client.Apply, patchOpts...); err != nil {
|
||||
return "", fmt.Errorf("failed to patch new secret: %w", err)
|
||||
}
|
||||
|
||||
// 6. Update CBP Status
|
||||
// We do this here so the identity is "locked" to the object immediately
|
||||
if cbp.Status.Identity == nil {
|
||||
cbp.Status.Identity = &v1alpha1.IdentityStatus{}
|
||||
}
|
||||
cbp.Status.Identity.SecretRef = newSecret.Name
|
||||
cbp.Status.Identity.ServiceAccount = saName
|
||||
|
||||
if err := m.client.Status().Update(ctx, cbp); err != nil {
|
||||
return "", fmt.Errorf("failed to update cluster status: %w", err)
|
||||
}
|
||||
|
||||
return newSecret.Name, nil
|
||||
}
|
||||
|
||||
// Cleanup removes the ServiceAccount from Harvester when the Cluster is deleted
|
||||
func (m *IdentityManager) Cleanup(ctx context.Context, cbp *v1alpha1.ClusterBlueprint, ibp *v1alpha1.InfraBlueprint, hbp *v1alpha1.HarvesterBlueprint) {
|
||||
if cbp.Status.Identity == nil || cbp.Status.Identity.ServiceAccount == "" {
|
||||
return
|
||||
}
|
||||
|
||||
// Fetch Master Secret again to get connection details
|
||||
rancherCredName := ibp.Spec.CloudCredentialSecret
|
||||
var rancherSecret corev1.Secret
|
||||
if err := m.client.Get(ctx, types.NamespacedName{Name: rancherCredName, Namespace: "cattle-global-data"}, &rancherSecret); err != nil {
|
||||
log.FromContext(ctx).V(1).Info("Cleanup: Could not fetch master secret (connection lost), skipping manual cleanup")
|
||||
return
|
||||
}
|
||||
|
||||
var kubeBytes []byte
|
||||
if len(rancherSecret.Data["harvestercredentialConfig-kubeconfigContent"]) > 0 {
|
||||
kubeBytes = rancherSecret.Data["harvestercredentialConfig-kubeconfigContent"]
|
||||
} else if len(rancherSecret.Data["credential"]) > 0 {
|
||||
kubeBytes = rancherSecret.Data["credential"]
|
||||
} else {
|
||||
return
|
||||
}
|
||||
|
||||
// Delegate to low-level cleanup
|
||||
if err := DeleteCredentialResources(ctx, kubeBytes, cbp.Status.Identity.ServiceAccount, hbp.Spec.VmNamespace); err != nil {
|
||||
log.FromContext(ctx).Error(err, "Failed to cleanup Harvester resources (best effort)")
|
||||
}
|
||||
}
|
||||
140
deploy/rig-operator/internal/provider/harvester/strategy.go
Normal file
140
deploy/rig-operator/internal/provider/harvester/strategy.go
Normal file
@@ -0,0 +1,140 @@
|
||||
package harvester
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"vanderlande.com/ittp/appstack/rig-operator/api/v1alpha1"
|
||||
template "vanderlande.com/ittp/appstack/rig-operator/internal/templates/harvester"
|
||||
)
|
||||
|
||||
// harvesterNodePool matches the exact JSON structure required by the Helm Chart
|
||||
type harvesterNodePool struct {
|
||||
Name string `json:"name"`
|
||||
DisplayName string `json:"displayName"`
|
||||
Quantity int `json:"quantity"`
|
||||
Etcd bool `json:"etcd"`
|
||||
ControlPlane bool `json:"controlplane"`
|
||||
Worker bool `json:"worker"`
|
||||
Paused bool `json:"paused"`
|
||||
|
||||
// Harvester Specific Fields
|
||||
CpuCount int `json:"cpuCount"`
|
||||
MemorySize int `json:"memorySize"` // GB
|
||||
DiskSize int `json:"diskSize"` // GB
|
||||
ImageName string `json:"imageName"`
|
||||
NetworkName string `json:"networkName"`
|
||||
SshUser string `json:"sshUser"`
|
||||
VmNamespace string `json:"vmNamespace"`
|
||||
UserData string `json:"userData"`
|
||||
}
|
||||
|
||||
type Strategy struct {
|
||||
blueprint *v1alpha1.HarvesterBlueprint
|
||||
userData string
|
||||
rancherURL string
|
||||
defaults template.Defaults
|
||||
}
|
||||
|
||||
// NewStrategy initializes the strategy with defaults and optional overrides
|
||||
func NewStrategy(hbp *v1alpha1.HarvesterBlueprint, infraUserData string, infraRancherURL string, defaults template.Defaults) *Strategy { // 1. Determine UserData priority: Infra (IBP) > Template Default
|
||||
finalUserData := infraUserData
|
||||
if finalUserData == "" {
|
||||
finalUserData = defaults.UserData
|
||||
}
|
||||
|
||||
return &Strategy{
|
||||
blueprint: hbp,
|
||||
userData: finalUserData,
|
||||
rancherURL: infraRancherURL,
|
||||
defaults: defaults,
|
||||
}
|
||||
}
|
||||
|
||||
// GenerateNodePools implements provider.Strategy
|
||||
func (s *Strategy) GenerateNodePools(ctx context.Context, cbp *v1alpha1.ClusterBlueprint) (interface{}, error) {
|
||||
var pools []interface{}
|
||||
|
||||
// Helper to map generic req -> harvester specific struct
|
||||
mapPool := func(name string, qty, cpu, memGB, diskGB int, isEtcd, isCp, isWk bool) harvesterNodePool {
|
||||
return harvesterNodePool{
|
||||
Name: name,
|
||||
DisplayName: name,
|
||||
Quantity: qty,
|
||||
Etcd: isEtcd,
|
||||
ControlPlane: isCp,
|
||||
Worker: isWk,
|
||||
Paused: false,
|
||||
|
||||
// Mapping: Generic (GB) -> Harvester (GB) [No conversion needed]
|
||||
CpuCount: cpu,
|
||||
MemorySize: memGB,
|
||||
DiskSize: diskGB,
|
||||
|
||||
// Harvester Specifics from HBP
|
||||
ImageName: s.blueprint.Spec.ImageName,
|
||||
NetworkName: s.blueprint.Spec.NetworkName,
|
||||
SshUser: s.blueprint.Spec.SshUser,
|
||||
VmNamespace: s.blueprint.Spec.VmNamespace,
|
||||
UserData: s.userData,
|
||||
}
|
||||
}
|
||||
|
||||
// 1. Control Plane Pool
|
||||
cpQty := 1
|
||||
if cbp.Spec.ControlPlaneHA {
|
||||
cpQty = 3
|
||||
}
|
||||
|
||||
// Use Defaults from YAML for CP sizing
|
||||
pools = append(pools, mapPool(
|
||||
"cp-pool",
|
||||
cpQty,
|
||||
s.defaults.CP_CPU,
|
||||
s.defaults.CP_Mem,
|
||||
s.defaults.CP_Disk,
|
||||
true, true, false,
|
||||
))
|
||||
|
||||
// 2. Worker Pools
|
||||
for _, wp := range cbp.Spec.WorkerPools {
|
||||
pools = append(pools, mapPool(
|
||||
wp.Name,
|
||||
wp.Quantity,
|
||||
wp.CpuCores,
|
||||
wp.MemoryGB,
|
||||
wp.DiskGB,
|
||||
false, false, true,
|
||||
))
|
||||
}
|
||||
|
||||
return pools, nil
|
||||
}
|
||||
|
||||
// GetGlobalOverrides implements provider.Strategy
|
||||
func (s *Strategy) GetGlobalOverrides(ctx context.Context, cbp *v1alpha1.ClusterBlueprint, credentialSecretName string) (map[string]interface{}, error) {
|
||||
// secret://<namespace>:<secretName>
|
||||
secretURI := fmt.Sprintf("secret://%s:%s", cbp.Namespace, credentialSecretName)
|
||||
|
||||
overrides := map[string]interface{}{
|
||||
"cloud_provider_name": "harvester",
|
||||
"cloud_provider_config": secretURI,
|
||||
// Inject Rancher URL
|
||||
"rancher": map[string]interface{}{
|
||||
"cattle": map[string]interface{}{
|
||||
"url": s.rancherURL,
|
||||
},
|
||||
},
|
||||
|
||||
"chartValues": map[string]interface{}{
|
||||
"harvester-cloud-provider": map[string]interface{}{
|
||||
"global": map[string]interface{}{
|
||||
"cattle": map[string]interface{}{
|
||||
"clusterName": cbp.Name,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return overrides, nil
|
||||
}
|
||||
16
deploy/rig-operator/internal/provider/interface.go
Normal file
16
deploy/rig-operator/internal/provider/interface.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package provider
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"vanderlande.com/ittp/appstack/rig-operator/api/v1alpha1"
|
||||
)
|
||||
|
||||
type Strategy interface {
|
||||
// GenerateNodePools generates the provider-specific node pool list.
|
||||
// [CHANGED] Return type is now interface{} to support both Structs and Maps
|
||||
GenerateNodePools(ctx context.Context, cbp *v1alpha1.ClusterBlueprint) (interface{}, error)
|
||||
|
||||
// GetGlobalOverrides returns the provider-specific helm values.
|
||||
GetGlobalOverrides(ctx context.Context, cbp *v1alpha1.ClusterBlueprint, credentialSecret string) (map[string]interface{}, error)
|
||||
}
|
||||
138
deploy/rig-operator/internal/provider/vsphere/strategy.go
Normal file
138
deploy/rig-operator/internal/provider/vsphere/strategy.go
Normal file
@@ -0,0 +1,138 @@
|
||||
package vsphere
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
rigv1 "vanderlande.com/ittp/appstack/rig-operator/api/v1alpha1"
|
||||
vspheretpl "vanderlande.com/ittp/appstack/rig-operator/internal/templates/vsphere"
|
||||
)
|
||||
|
||||
type Strategy struct {
|
||||
blueprint *rigv1.VsphereBlueprint
|
||||
userData string
|
||||
rancherURL string
|
||||
defaults vspheretpl.Defaults
|
||||
}
|
||||
|
||||
// NewStrategy creates the vSphere logic handler
|
||||
func NewStrategy(vbp *rigv1.VsphereBlueprint, userData string, rancherURL string, defaults vspheretpl.Defaults) *Strategy {
|
||||
// 1. Resolve UserData (Infra > Template Default)
|
||||
finalUserData := userData
|
||||
if finalUserData == "" {
|
||||
finalUserData = defaults.UserData
|
||||
}
|
||||
|
||||
return &Strategy{
|
||||
blueprint: vbp,
|
||||
userData: finalUserData,
|
||||
rancherURL: rancherURL,
|
||||
defaults: defaults,
|
||||
}
|
||||
}
|
||||
|
||||
// GenerateNodePools maps the generic ClusterBlueprint to vSphere-specific NodePool maps
|
||||
func (s *Strategy) GenerateNodePools(ctx context.Context, cbp *rigv1.ClusterBlueprint) (interface{}, error) {
|
||||
var nodePools []map[string]interface{}
|
||||
|
||||
// 1. Control Plane Node Pool
|
||||
// We rely on the defaults extracted from values.yaml (e.g. 4 Core, 8GB)
|
||||
// vSphere Chart expects MB, so we multiply GB * 1024.
|
||||
cpQty := 1
|
||||
if cbp.Spec.ControlPlaneHA {
|
||||
cpQty = 3
|
||||
}
|
||||
|
||||
nodePools = append(nodePools, s.buildPool(
|
||||
"control-plane-nodes", // Name
|
||||
"cp-nodes", // Display Name
|
||||
cpQty, // Quantity
|
||||
s.defaults.CP_CPU, // Cores
|
||||
s.defaults.CP_Mem*1024, // RAM (GB -> MB)
|
||||
s.defaults.CP_Disk*1024, // Disk (GB -> MB)
|
||||
true, // Etcd
|
||||
true, // ControlPlane
|
||||
false, // Worker
|
||||
))
|
||||
|
||||
// 2. Worker Pools
|
||||
// We iterate over the user's requested pools in the CBP
|
||||
for _, wp := range cbp.Spec.WorkerPools {
|
||||
nodePools = append(nodePools, s.buildPool(
|
||||
wp.Name,
|
||||
wp.Name,
|
||||
wp.Quantity,
|
||||
wp.CpuCores,
|
||||
wp.MemoryGB*1024, // Convert GB to MB
|
||||
wp.DiskGB*1024, // Convert GB to MB
|
||||
false, // Etcd
|
||||
false, // ControlPlane
|
||||
true, // Worker
|
||||
))
|
||||
}
|
||||
|
||||
return nodePools, nil
|
||||
}
|
||||
|
||||
// GetGlobalOverrides injects the vSphere-specific global values (Cloud Provider, Credentials, URLs)
|
||||
func (s *Strategy) GetGlobalOverrides(ctx context.Context, cbp *rigv1.ClusterBlueprint, credentialSecret string) (map[string]interface{}, error) {
|
||||
overrides := map[string]interface{}{
|
||||
// Tell Helm we are on vSphere
|
||||
"cloudprovider": "vsphere",
|
||||
|
||||
// The Secret containing username/password/vcenter-address
|
||||
"cloudCredentialSecretName": credentialSecret,
|
||||
|
||||
// Register with the correct Rancher Manager
|
||||
"rancher": map[string]interface{}{
|
||||
"cattle": map[string]interface{}{
|
||||
"url": s.rancherURL,
|
||||
},
|
||||
},
|
||||
|
||||
// Cluster Metadata
|
||||
"cluster": map[string]interface{}{
|
||||
"name": cbp.Name,
|
||||
"config": map[string]interface{}{
|
||||
"kubernetesVersion": cbp.Spec.KubernetesVersion,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return overrides, nil
|
||||
}
|
||||
|
||||
// buildPool is a private helper to construct the exact map structure the vSphere Helm Chart expects
|
||||
func (s *Strategy) buildPool(name, displayName string, qty, cpu, ramMB, diskMB int, etcd, cp, worker bool) map[string]interface{} {
|
||||
pool := map[string]interface{}{
|
||||
// Generic RKE2 Node Settings
|
||||
"name": name,
|
||||
"displayName": displayName,
|
||||
"quantity": qty,
|
||||
"etcd": etcd,
|
||||
"controlplane": cp,
|
||||
"worker": worker,
|
||||
"paused": false,
|
||||
|
||||
// vSphere Infrastructure Location (From Blueprint)
|
||||
"vcenter": s.blueprint.Spec.VCenter,
|
||||
"datacenter": s.blueprint.Spec.Datacenter,
|
||||
"folder": s.blueprint.Spec.Folder,
|
||||
"pool": s.blueprint.Spec.ResourcePool,
|
||||
"datastoreCluster": s.blueprint.Spec.Datastore, // Assumes chart supports this key. If not, use "datastore".
|
||||
"network": []string{s.blueprint.Spec.Network},
|
||||
|
||||
// Cloning Details
|
||||
"creationType": "template",
|
||||
"cloneFrom": s.blueprint.Spec.Template,
|
||||
|
||||
// Hardware Sizing (Already converted to MB)
|
||||
"cpuCount": cpu,
|
||||
"memorySize": ramMB,
|
||||
"diskSize": diskMB,
|
||||
|
||||
// Cloud Init
|
||||
"cloudConfig": s.userData,
|
||||
}
|
||||
|
||||
return pool
|
||||
}
|
||||
69
deploy/rig-operator/internal/templates/harvester/embed.go
Normal file
69
deploy/rig-operator/internal/templates/harvester/embed.go
Normal file
@@ -0,0 +1,69 @@
|
||||
package harvester
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"fmt"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
//go:embed values.yaml
|
||||
var valuesYAML []byte
|
||||
|
||||
type Defaults struct {
|
||||
CP_CPU int
|
||||
CP_Mem int
|
||||
CP_Disk int
|
||||
|
||||
ChartRepo string
|
||||
ChartName string
|
||||
ChartVersion string
|
||||
|
||||
// [NEW] Default UserData for this provider
|
||||
UserData string
|
||||
}
|
||||
|
||||
func GetDefaults() (Defaults, error) {
|
||||
var raw map[string]interface{}
|
||||
if err := yaml.Unmarshal(valuesYAML, &raw); err != nil {
|
||||
return Defaults{}, fmt.Errorf("failed to parse harvester base values: %w", err)
|
||||
}
|
||||
|
||||
d := Defaults{
|
||||
CP_CPU: 4, CP_Mem: 8, CP_Disk: 40, // Safety Fallbacks
|
||||
}
|
||||
|
||||
if defs, ok := raw["_defaults"].(map[string]interface{}); ok {
|
||||
if cp, ok := defs["controlPlaneProfile"].(map[string]interface{}); ok {
|
||||
if v, ok := cp["cpuCores"].(int); ok {
|
||||
d.CP_CPU = v
|
||||
}
|
||||
if v, ok := cp["memoryGb"].(int); ok {
|
||||
d.CP_Mem = v
|
||||
}
|
||||
if v, ok := cp["diskGb"].(int); ok {
|
||||
d.CP_Disk = v
|
||||
}
|
||||
}
|
||||
if chart, ok := defs["helmChart"].(map[string]interface{}); ok {
|
||||
if v, ok := chart["repo"].(string); ok {
|
||||
d.ChartRepo = v
|
||||
}
|
||||
if v, ok := chart["name"].(string); ok {
|
||||
d.ChartName = v
|
||||
}
|
||||
if v, ok := chart["version"].(string); ok {
|
||||
d.ChartVersion = v
|
||||
}
|
||||
}
|
||||
// [NEW] Extract UserData
|
||||
if v, ok := defs["userData"].(string); ok {
|
||||
d.UserData = v
|
||||
}
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
func GetBaseValues() []byte {
|
||||
return valuesYAML
|
||||
}
|
||||
456
deploy/rig-operator/internal/templates/harvester/values.yaml
Normal file
456
deploy/rig-operator/internal/templates/harvester/values.yaml
Normal file
@@ -0,0 +1,456 @@
|
||||
# ----------------------------------------------------------------
|
||||
# BASE TEMPLATE (internal/templates/base_values.yaml)
|
||||
# ----------------------------------------------------------------
|
||||
|
||||
_defaults:
|
||||
helmChart:
|
||||
repo: ""
|
||||
name: "oci://ghcr.io/rancherfederal/charts/rancher-cluster-templates"
|
||||
version: "0.7.2"
|
||||
controlPlaneProfile:
|
||||
cpuCores: 4
|
||||
memoryGb: 8
|
||||
diskGb: 40
|
||||
userData: &userData |
|
||||
#cloud-config
|
||||
package_update: false
|
||||
package_upgrade: false
|
||||
snap:
|
||||
commands:
|
||||
00: snap refresh --hold=forever
|
||||
package_reboot_if_required: true
|
||||
packages:
|
||||
- qemu-guest-agent
|
||||
- yq
|
||||
- jq
|
||||
- curl
|
||||
- wget
|
||||
|
||||
bootcmd:
|
||||
- sysctl -w net.ipv6.conf.all.disable_ipv6=1
|
||||
- sysctl -w net.ipv6.conf.default.disable_ipv6=1
|
||||
|
||||
write_files:
|
||||
# ----------------------------------------------------------------
|
||||
# 1. CNI Permission Fix Script & Cron (CIS 1.1.9 Persistence)
|
||||
# ----------------------------------------------------------------
|
||||
- path: /usr/local/bin/fix-cni-perms.sh
|
||||
permissions: '0700'
|
||||
owner: root:root
|
||||
content: |
|
||||
#!/bin/bash
|
||||
# Wait 60s on boot for RKE2 to write files
|
||||
[ "$1" == "boot" ] && sleep 60
|
||||
|
||||
# Enforce 600 on CNI files (CIS 1.1.9)
|
||||
if [ -d /etc/cni/net.d ]; then
|
||||
find /etc/cni/net.d -type f -exec chmod 600 {} \;
|
||||
fi
|
||||
if [ -d /var/lib/cni/networks ]; then
|
||||
find /var/lib/cni/networks -type f -exec chmod 600 {} \;
|
||||
fi
|
||||
|
||||
# Every RKE2 service restart can reset CNI file permissions, so we run
|
||||
# this script on reboot and daily via cron to maintain CIS compliance.
|
||||
|
||||
- path: /etc/cron.d/cis-cni-fix
|
||||
permissions: '0644'
|
||||
owner: root:root
|
||||
content: |
|
||||
# Run on Reboot (with delay) to fix files created during startup
|
||||
@reboot root /usr/local/bin/fix-cni-perms.sh boot
|
||||
# Run once daily at 00:00 to correct any drift
|
||||
0 0 * * * root /usr/local/bin/fix-cni-perms.sh
|
||||
|
||||
# ----------------------------------------------------------------
|
||||
# 2. RKE2 Admission Config
|
||||
# ----------------------------------------------------------------
|
||||
- path: /etc/rancher/rke2/rke2-admission.yaml
|
||||
permissions: '0600'
|
||||
owner: root:root
|
||||
content: |
|
||||
apiVersion: apiserver.config.k8s.io/v1
|
||||
kind: AdmissionConfiguration
|
||||
plugins:
|
||||
- name: PodSecurity
|
||||
configuration:
|
||||
apiVersion: pod-security.admission.config.k8s.io/v1beta1
|
||||
kind: PodSecurityConfiguration
|
||||
defaults:
|
||||
enforce: "restricted"
|
||||
enforce-version: "latest"
|
||||
audit: "restricted"
|
||||
audit-version: "latest"
|
||||
warn: "restricted"
|
||||
warn-version: "latest"
|
||||
exemptions:
|
||||
usernames: []
|
||||
runtimeClasses: []
|
||||
namespaces: [compliance-operator-system,kube-system, cis-operator-system, tigera-operator, calico-system, rke2-ingress-nginx, cattle-system, cattle-fleet-system, longhorn-system, cattle-neuvector-system]
|
||||
- name: EventRateLimit
|
||||
configuration:
|
||||
apiVersion: eventratelimit.admission.k8s.io/v1alpha1
|
||||
kind: Configuration
|
||||
limits:
|
||||
- type: Server
|
||||
qps: 5000
|
||||
burst: 20000
|
||||
|
||||
# ----------------------------------------------------------------
|
||||
# 3. RKE2 Audit Policy
|
||||
# ----------------------------------------------------------------
|
||||
- path: /etc/rancher/rke2/audit-policy.yaml
|
||||
permissions: '0600'
|
||||
owner: root:root
|
||||
content: |
|
||||
apiVersion: audit.k8s.io/v1
|
||||
kind: Policy
|
||||
rules:
|
||||
- level: None
|
||||
users: ["system:kube-controller-manager", "system:kube-scheduler", "system:serviceaccount:kube-system:endpoint-controller"]
|
||||
verbs: ["get", "update"]
|
||||
resources:
|
||||
- group: ""
|
||||
resources: ["endpoints", "services", "services/status"]
|
||||
- level: None
|
||||
verbs: ["get"]
|
||||
resources:
|
||||
- group: ""
|
||||
resources: ["nodes", "nodes/status", "pods", "pods/status"]
|
||||
- level: None
|
||||
users: ["kube-proxy"]
|
||||
verbs: ["watch"]
|
||||
resources:
|
||||
- group: ""
|
||||
resources: ["endpoints", "services", "services/status", "configmaps"]
|
||||
- level: Metadata
|
||||
resources:
|
||||
- group: ""
|
||||
resources: ["secrets", "configmaps"]
|
||||
- level: RequestResponse
|
||||
omitStages:
|
||||
- RequestReceived
|
||||
|
||||
# ----------------------------------------------------------------
|
||||
# 4. Static NetworkPolicies
|
||||
# ----------------------------------------------------------------
|
||||
- path: /var/lib/rancher/rke2/server/manifests/cis-network-policy.yaml
|
||||
permissions: '0600'
|
||||
owner: root:root
|
||||
content: |
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: default-deny-ingress
|
||||
namespace: default
|
||||
spec:
|
||||
podSelector: {}
|
||||
policyTypes:
|
||||
- Ingress
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: allow-all-metrics
|
||||
namespace: kube-public
|
||||
spec:
|
||||
podSelector: {}
|
||||
ingress:
|
||||
- {}
|
||||
policyTypes:
|
||||
- Ingress
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: allow-all-system
|
||||
namespace: kube-system
|
||||
spec:
|
||||
podSelector: {}
|
||||
ingress:
|
||||
- {}
|
||||
policyTypes:
|
||||
- Ingress
|
||||
|
||||
# ----------------------------------------------------------------
|
||||
# 5. Service Account Hardening
|
||||
# ----------------------------------------------------------------
|
||||
- path: /var/lib/rancher/rke2/server/manifests/cis-sa-config.yaml
|
||||
permissions: '0600'
|
||||
owner: root:root
|
||||
content: |
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: default
|
||||
namespace: default
|
||||
automountServiceAccountToken: false
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: default
|
||||
namespace: kube-system
|
||||
automountServiceAccountToken: false
|
||||
|
||||
- path: /var/lib/rancher/rke2/server/manifests/cis-sa-cron.yaml
|
||||
permissions: '0600'
|
||||
owner: root:root
|
||||
content: |
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata: {name: sa-cleaner, namespace: kube-system}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata: {name: sa-cleaner-role}
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["namespaces", "serviceaccounts"]
|
||||
verbs: ["get", "list", "patch"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata: {name: sa-cleaner-binding}
|
||||
subjects: [{kind: ServiceAccount, name: sa-cleaner, namespace: kube-system}]
|
||||
roleRef: {kind: ClusterRole, name: sa-cleaner-role, apiGroup: rbac.authorization.k8s.io}
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: sa-cleaner
|
||||
namespace: kube-system
|
||||
spec:
|
||||
schedule: "0 */6 * * *" # Run every 6 hours
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
serviceAccountName: sa-cleaner
|
||||
containers:
|
||||
- name: cleaner
|
||||
image: rancher/kubectl:v1.26.0
|
||||
command:
|
||||
- /bin/bash
|
||||
- -c
|
||||
- |
|
||||
# Get all namespaces
|
||||
for ns in $(kubectl get ns -o jsonpath='{.items[*].metadata.name}'); do
|
||||
# Check if default SA has automount=true (or null)
|
||||
automount=$(kubectl get sa default -n $ns -o jsonpath='{.automountServiceAccountToken}')
|
||||
if [ "$automount" != "false" ]; then
|
||||
echo "Securing default SA in namespace: $ns"
|
||||
kubectl patch sa default -n $ns -p '{"automountServiceAccountToken": false}'
|
||||
fi
|
||||
done
|
||||
restartPolicy: OnFailure
|
||||
|
||||
# ----------------------------------------------------------------
|
||||
# 6. OS Sysctls Hardening
|
||||
# ----------------------------------------------------------------
|
||||
- path: /etc/sysctl.d/60-rke2-cis.conf
|
||||
permissions: '0644'
|
||||
content: |
|
||||
vm.overcommit_memory=1
|
||||
vm.max_map_count=65530
|
||||
vm.panic_on_oom=0
|
||||
fs.inotify.max_user_watches=1048576
|
||||
fs.inotify.max_user_instances=8192
|
||||
kernel.panic=10
|
||||
kernel.panic_on_oops=1
|
||||
net.ipv4.conf.all.rp_filter=1
|
||||
net.ipv4.conf.default.rp_filter=1
|
||||
net.ipv4.conf.all.accept_source_route=0
|
||||
net.ipv4.conf.default.accept_source_route=0
|
||||
net.ipv4.conf.all.accept_redirects=0
|
||||
net.ipv4.conf.default.accept_redirects=0
|
||||
net.ipv4.conf.all.send_redirects=0
|
||||
net.ipv4.conf.default.send_redirects=0
|
||||
net.ipv4.conf.all.log_martians=1
|
||||
net.ipv4.conf.default.log_martians=1
|
||||
net.ipv4.icmp_echo_ignore_broadcasts=1
|
||||
net.ipv4.icmp_ignore_bogus_error_responses=1
|
||||
net.ipv6.conf.all.disable_ipv6=1
|
||||
net.ipv6.conf.default.disable_ipv6=1
|
||||
fs.protected_hardlinks=1
|
||||
fs.protected_symlinks=1
|
||||
|
||||
# ----------------------------------------------------------------
|
||||
# 7. Environment & Setup Scripts
|
||||
# ----------------------------------------------------------------
|
||||
- path: /etc/profile.d/rke2.sh
|
||||
permissions: '0644'
|
||||
content: |
|
||||
export PATH=$PATH:/var/lib/rancher/rke2/bin:/opt/rke2/bin
|
||||
export KUBECONFIG=/etc/rancher/rke2/rke2.yaml
|
||||
|
||||
|
||||
- path: /root/updates.sh
|
||||
permissions: '0550'
|
||||
content: |
|
||||
#!/bin/bash
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
apt-mark hold linux-headers-generic
|
||||
apt-mark hold linux-headers-virtual
|
||||
apt-mark hold linux-image-virtual
|
||||
apt-mark hold linux-virtual
|
||||
apt-get update
|
||||
apt-get upgrade -y
|
||||
apt-get autoremove -y
|
||||
|
||||
users:
|
||||
- name: rancher
|
||||
gecos: Rancher service account
|
||||
hashed_passwd: $6$Mas.x2i7B2cefjUy$59363FmEuoU.LiTLNRZmtemlH2W0D0SWsig22KSZ3QzOmfxeZXxdSx5wIw9wO7GXF/M9W.9SHoKVBOYj1HPX3.
|
||||
lock_passwd: false
|
||||
shell: /bin/bash
|
||||
groups: [users, sudo, docker]
|
||||
sudo: ALL=(ALL:ALL) ALL
|
||||
ssh_authorized_keys:
|
||||
- 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEwWnnOTAu0LlAZRczQ0Z0KvNlUdPhGQhpZie+nF1O3s'
|
||||
|
||||
- name: etcd
|
||||
gecos: "etcd user"
|
||||
shell: /sbin/nologin
|
||||
system: true
|
||||
lock_passwd: true
|
||||
|
||||
disable_root: true
|
||||
ssh_pwauth: true
|
||||
|
||||
runcmd:
|
||||
- systemctl enable --now qemu-guest-agent
|
||||
- sysctl --system
|
||||
- /root/updates.sh
|
||||
# Immediate run of fix script
|
||||
- /usr/local/bin/fix-cni-perms.sh
|
||||
|
||||
final_message: |
|
||||
VI_CNV_CLOUD_INIT has been applied successfully.
|
||||
Node ready for Rancher!
|
||||
|
||||
# amazonec2, azure, digitalocean, harvester, vsphere, custom
|
||||
cloudprovider: harvester
|
||||
|
||||
# cloud provider credentials
|
||||
cloudCredentialSecretName: cc-mrklm
|
||||
|
||||
# rancher manager url
|
||||
rancher:
|
||||
cattle:
|
||||
url: rancher-mgmt.product.lan
|
||||
|
||||
# cluster values
|
||||
cluster:
|
||||
|
||||
name: default-cluster
|
||||
# labels:
|
||||
# key: value
|
||||
config:
|
||||
kubernetesVersion: v1.33.5+rke2r1
|
||||
enableNetworkPolicy: true
|
||||
localClusterAuthEndpoint:
|
||||
enabled: false
|
||||
chartValues:
|
||||
harvester-cloud-provider:
|
||||
global:
|
||||
cattle:
|
||||
clusterName: default-cluster
|
||||
|
||||
# Pod Security Standard (Replaces PSP)
|
||||
defaultPodSecurityAdmissionConfigurationTemplateName: "rancher-restricted"
|
||||
|
||||
globalConfig:
|
||||
systemDefaultRegistry: docker.io
|
||||
cni: canal
|
||||
docker: false
|
||||
disable_scheduler: false
|
||||
disable_cloud_controller: false
|
||||
disable_kube_proxy: false
|
||||
etcd_expose_metrics: false
|
||||
profile: 'cis'
|
||||
selinux: false
|
||||
secrets_encryption: true
|
||||
write_kubeconfig_mode: 0600
|
||||
use_service_account_credentials: false
|
||||
protect_kernel_defaults: true
|
||||
cloud_provider_name: harvester
|
||||
cloud_provider_config: secret://fleet-default:harvesterconfigzswmd
|
||||
|
||||
kube_apiserver_arg:
|
||||
- "service-account-extend-token-expiration=false"
|
||||
- "anonymous-auth=false"
|
||||
- "enable-admission-plugins=NodeRestriction,PodSecurity,EventRateLimit,DenyServiceExternalIPs"
|
||||
- "admission-control-config-file=/etc/rancher/rke2/rke2-admission.yaml"
|
||||
- "audit-policy-file=/etc/rancher/rke2/audit-policy.yaml"
|
||||
- "audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log"
|
||||
- "audit-log-maxage=30"
|
||||
- "audit-log-maxbackup=10"
|
||||
- "audit-log-maxsize=100"
|
||||
|
||||
kubelet_arg:
|
||||
# Strong Ciphers (CIS 4.2.12)
|
||||
- "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305"
|
||||
# PID Limit (CIS 4.2.13)
|
||||
- "pod-max-pids=4096"
|
||||
# Seccomp Default (CIS 4.2.14)
|
||||
- "seccomp-default=true"
|
||||
- "protect-kernel-defaults=true"
|
||||
- "make-iptables-util-chains=true"
|
||||
|
||||
upgradeStrategy:
|
||||
controlPlaneConcurrency: 10%
|
||||
controlPlaneDrainOptions:
|
||||
enabled: false
|
||||
workerConcurrency: 10%
|
||||
workerDrainOptions:
|
||||
enabled: false
|
||||
addons:
|
||||
monitoring:
|
||||
enabled: false
|
||||
logging:
|
||||
enabled: false
|
||||
longhorn:
|
||||
enabled: false
|
||||
neuvector:
|
||||
enabled: false
|
||||
|
||||
# node and nodepool(s) values
|
||||
# ----------------------------------------------------------------
|
||||
# MANUAL TESTING SECTION
|
||||
# The Operator will DELETE and OVERWRITE this section at runtime.
|
||||
# These values are only used if you run 'helm install' manually.
|
||||
# ----------------------------------------------------------------
|
||||
nodepools:
|
||||
- name: control-plane-nodes
|
||||
displayName: cp-nodes
|
||||
quantity: 1
|
||||
etcd: true
|
||||
controlplane: true
|
||||
worker: false
|
||||
paused: false
|
||||
cpuCount: 4
|
||||
diskSize: 40
|
||||
imageName: vanderlande/image-qhtpc
|
||||
memorySize: 8
|
||||
networkName: vanderlande/vm-lan
|
||||
sshUser: rancher
|
||||
vmNamespace: vanderlande
|
||||
userData: *userData
|
||||
|
||||
- name: worker-nodes
|
||||
displayName: wk-nodes
|
||||
quantity: 2
|
||||
etcd: false
|
||||
controlplane: false
|
||||
worker: true
|
||||
paused: false
|
||||
cpuCount: 2
|
||||
diskSize: 40
|
||||
imageName: vanderlande/image-qmx5q
|
||||
memorySize: 8
|
||||
networkName: vanderlande/vm-lan
|
||||
sshUser: rancher
|
||||
vmNamespace: vanderlande
|
||||
userData: *userData
|
||||
|
||||
77
deploy/rig-operator/internal/templates/vsphere/embed.go
Normal file
77
deploy/rig-operator/internal/templates/vsphere/embed.go
Normal file
@@ -0,0 +1,77 @@
|
||||
package vsphere
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"fmt"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
//go:embed values.yaml
|
||||
var valuesYAML []byte
|
||||
|
||||
type Defaults struct {
|
||||
CP_CPU int
|
||||
CP_Mem int
|
||||
CP_Disk int
|
||||
|
||||
ChartRepo string
|
||||
ChartName string
|
||||
ChartVersion string
|
||||
|
||||
UserData string
|
||||
}
|
||||
|
||||
// GetDefaults parses the embedded values.yaml to extract global settings
|
||||
func GetDefaults() (Defaults, error) {
|
||||
var raw map[string]interface{}
|
||||
if err := yaml.Unmarshal(valuesYAML, &raw); err != nil {
|
||||
return Defaults{}, fmt.Errorf("failed to parse vsphere base values: %w", err)
|
||||
}
|
||||
|
||||
// 1. Set Hardcoded Fallbacks (Safety Net)
|
||||
d := Defaults{
|
||||
CP_CPU: 2, CP_Mem: 4, CP_Disk: 40, // vSphere might need different defaults than Harvester
|
||||
}
|
||||
|
||||
// 2. Read from _defaults block
|
||||
if defs, ok := raw["_defaults"].(map[string]interface{}); ok {
|
||||
|
||||
// Profile Defaults
|
||||
if cp, ok := defs["controlPlaneProfile"].(map[string]interface{}); ok {
|
||||
if v, ok := cp["cpuCores"].(int); ok {
|
||||
d.CP_CPU = v
|
||||
}
|
||||
if v, ok := cp["memoryGb"].(int); ok {
|
||||
d.CP_Mem = v
|
||||
}
|
||||
if v, ok := cp["diskGb"].(int); ok {
|
||||
d.CP_Disk = v
|
||||
}
|
||||
}
|
||||
|
||||
// Helm Chart Defaults
|
||||
if chart, ok := defs["helmChart"].(map[string]interface{}); ok {
|
||||
if v, ok := chart["repo"].(string); ok {
|
||||
d.ChartRepo = v
|
||||
}
|
||||
if v, ok := chart["name"].(string); ok {
|
||||
d.ChartName = v
|
||||
}
|
||||
if v, ok := chart["version"].(string); ok {
|
||||
d.ChartVersion = v
|
||||
}
|
||||
}
|
||||
|
||||
// UserData Default
|
||||
if v, ok := defs["userData"].(string); ok {
|
||||
d.UserData = v
|
||||
}
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// GetBaseValues returns the raw bytes for the MasterBuilder
|
||||
func GetBaseValues() []byte {
|
||||
return valuesYAML
|
||||
}
|
||||
202
deploy/rig-operator/internal/templates/vsphere/values.yaml
Normal file
202
deploy/rig-operator/internal/templates/vsphere/values.yaml
Normal file
@@ -0,0 +1,202 @@
|
||||
# ----------------------------------------------------------------
|
||||
# BASE TEMPLATE (internal/templates/base_values.yaml)
|
||||
# ----------------------------------------------------------------
|
||||
|
||||
_defaults:
|
||||
helmChart:
|
||||
repo: ""
|
||||
name: "oci://ghcr.io/rancherfederal/charts/rancher-cluster-templates"
|
||||
version: "0.7.2"
|
||||
controlPlaneProfile:
|
||||
cpuCores: 4
|
||||
memoryGb: 8
|
||||
diskGb: 40
|
||||
userData: &userData |
|
||||
#cloud-config
|
||||
package_update: false
|
||||
package_upgrade: false
|
||||
snap:
|
||||
commands:
|
||||
00: snap refresh --hold=forever
|
||||
package_reboot_if_required: true
|
||||
packages:
|
||||
- yq
|
||||
- jq
|
||||
|
||||
disable_root: true
|
||||
ssh_pwauth: false
|
||||
|
||||
write_files:
|
||||
- path: /root/updates.sh
|
||||
permissions: '0550'
|
||||
content: |
|
||||
#!/bin/bash
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
apt-mark hold linux-headers-generic
|
||||
apt-mark hold linux-headers-virtual
|
||||
apt-mark hold linux-image-virtual
|
||||
apt-mark hold linux-virtual
|
||||
apt-get update
|
||||
apt-get upgrade -y
|
||||
apt-get autoremove -y
|
||||
|
||||
users:
|
||||
- name: rancher
|
||||
gecos: Rancher service account
|
||||
hashed_passwd: $6$Mas.x2i7B2cefjUy$59363FmEuoU.LiTLNRZmtemlH2W0D0SWsig22KSZ3QzOmfxeZXxdSx5wIw9wO7GXF/M9W.9SHoKVBOYj1HPX3.
|
||||
lock_passwd: false
|
||||
shell: /bin/bash
|
||||
groups: [users, sudo, docker]
|
||||
sudo: ALL=(ALL:ALL) ALL
|
||||
ssh_authorized_keys:
|
||||
- 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEwWnnOTAu0LlAZRczQ0Z0KvNlUdPhGQhpZie+nF1O3s'
|
||||
|
||||
disable_root: true
|
||||
ssh_pwauth: true
|
||||
|
||||
runcmd:
|
||||
# - systemctl enable --now qemu-guest-agent
|
||||
- sysctl --system
|
||||
- /root/updates.sh
|
||||
# Immediate run of fix script
|
||||
|
||||
bootcmd:
|
||||
- sudo bash /root/networking.sh
|
||||
|
||||
final_message: |
|
||||
VI_CNV_CLOUD_INIT has been applied successfully.
|
||||
Node ready for Rancher!
|
||||
|
||||
# amazonec2, azure, digitalocean, harvester, vsphere, custom
|
||||
cloudprovider: vsphere
|
||||
|
||||
# cloud provider credentials
|
||||
cloudCredentialSecretName: cc-lhtl9
|
||||
|
||||
# rancher manager url
|
||||
rancher:
|
||||
cattle:
|
||||
url: rancher.tst.vanderlande.com
|
||||
|
||||
# cluster values
|
||||
cluster:
|
||||
|
||||
name: default-cluster-005
|
||||
# labels:
|
||||
# key: value
|
||||
config:
|
||||
kubernetesVersion: v1.31.12+rke2r1
|
||||
enableNetworkPolicy: true
|
||||
localClusterAuthEndpoint:
|
||||
enabled: false
|
||||
|
||||
|
||||
# Pod Security Standard (Replaces PSP)
|
||||
# defaultPodSecurityAdmissionConfigurationTemplateName: "rancher-restricted"
|
||||
|
||||
globalConfig:
|
||||
systemDefaultRegistry: docker.io
|
||||
cni: canal
|
||||
docker: false
|
||||
disable_scheduler: false
|
||||
disable_cloud_controller: false
|
||||
disable_kube_proxy: false
|
||||
etcd_expose_metrics: false
|
||||
profile: ''
|
||||
selinux: false
|
||||
secrets_encryption: false
|
||||
write_kubeconfig_mode: 0600
|
||||
use_service_account_credentials: false
|
||||
protect_kernel_defaults: false
|
||||
cloud_provider_name: ''
|
||||
|
||||
# kube_apiserver_arg:
|
||||
# - "service-account-extend-token-expiration=false"
|
||||
# - "anonymous-auth=false"
|
||||
# - "enable-admission-plugins=NodeRestriction,PodSecurity,EventRateLimit,DenyServiceExternalIPs"
|
||||
# - "admission-control-config-file=/etc/rancher/rke2/rke2-admission.yaml"
|
||||
# - "audit-policy-file=/etc/rancher/rke2/audit-policy.yaml"
|
||||
# - "audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log"
|
||||
# - "audit-log-maxage=30"
|
||||
# - "audit-log-maxbackup=10"
|
||||
# - "audit-log-maxsize=100"
|
||||
|
||||
# kubelet_arg:
|
||||
# # Strong Ciphers (CIS 4.2.12)
|
||||
# - "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305"
|
||||
# # PID Limit (CIS 4.2.13)
|
||||
# - "pod-max-pids=4096"
|
||||
# # Seccomp Default (CIS 4.2.14)
|
||||
# - "seccomp-default=true"
|
||||
# - "protect-kernel-defaults=true"
|
||||
# - "make-iptables-util-chains=true"
|
||||
|
||||
upgradeStrategy:
|
||||
controlPlaneConcurrency: 10%
|
||||
controlPlaneDrainOptions:
|
||||
enabled: false
|
||||
workerConcurrency: 10%
|
||||
workerDrainOptions:
|
||||
enabled: false
|
||||
addons:
|
||||
monitoring:
|
||||
enabled: false
|
||||
logging:
|
||||
enabled: false
|
||||
longhorn:
|
||||
enabled: true
|
||||
neuvector:
|
||||
enabled: false
|
||||
|
||||
# node and nodepool(s) values
|
||||
# ----------------------------------------------------------------
|
||||
# MANUAL TESTING SECTION
|
||||
# The Operator will DELETE and OVERWRITE this section at runtime.
|
||||
# These values are only used if you run 'helm install' manually.
|
||||
# ----------------------------------------------------------------
|
||||
nodepools:
|
||||
- name: control-plane-nodes
|
||||
displayName: cp-nodes
|
||||
quantity: 1
|
||||
etcd: true
|
||||
controlplane: true
|
||||
worker: false
|
||||
paused: false
|
||||
# VSPHERE SPECIFIC FIELDS
|
||||
cpuCount: 2
|
||||
memorySize: 8192
|
||||
diskSize: 40000
|
||||
vcenter: "vcenter.vanderlande.com"
|
||||
datacenter: "NL001"
|
||||
folder: "ICT Digitalisation - Rancher"
|
||||
pool: "NL001 Development - Rancher/Resources"
|
||||
datastoreCluster: "NL001 Development - Rancher SDRS" # Matches your SDRS input
|
||||
network:
|
||||
- "nl001.vDS.Distri.Vlan.1542"
|
||||
# Provisioning Source
|
||||
creationType: "template"
|
||||
cloneFrom: "nl001-cp-ubuntu-22.04-amd64-20250327-5.15.0-135-rke2-k3s"
|
||||
cloudConfig: *userData # Using the anchor from your base file
|
||||
|
||||
- name: worker-storage-nodes
|
||||
displayName: wk-nodes
|
||||
quantity: 2
|
||||
etcd: false
|
||||
controlplane: false
|
||||
worker: true
|
||||
paused: false
|
||||
# VSPHERE SPECIFIC FIELDS
|
||||
cpuCount: 4
|
||||
memorySize: 8192
|
||||
diskSize: 100000
|
||||
vcenter: "vcenter.vanderlande.com"
|
||||
datacenter: "NL001"
|
||||
folder: "ICT Digitalisation - Rancher"
|
||||
pool: "NL001 Development - Rancher/Resources"
|
||||
datastoreCluster: "NL001 Development - Rancher SDRS" # Matches your SDRS input
|
||||
network:
|
||||
- "nl001.vDS.Distri.Vlan.1542"
|
||||
# Provisioning Source
|
||||
creationType: "template"
|
||||
cloneFrom: "nl001-cp-ubuntu-22.04-amd64-20250327-5.15.0-135-rke2-k3s"
|
||||
cloudConfig: *userData # Using the anchor from your base file
|
||||
11
deploy/rig-operator/misc/patch-default-sa
Executable file
11
deploy/rig-operator/misc/patch-default-sa
Executable file
@@ -0,0 +1,11 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Copy script into /etc/cron.daily/
|
||||
# Make it executable chmod 0755
|
||||
|
||||
# TODO: Update path to actual kubeconfig
|
||||
export KUBECONFIG=/home/rancher/.kube/config
|
||||
|
||||
for n in $(kubectl get namespaces -A -o=jsonpath="{.items[*]['metadata.name']}"); do
|
||||
kubectl patch serviceaccount default -p '{"automountServiceAccountToken": false}' -n $n
|
||||
done
|
||||
92
deploy/rig-operator/test/e2e/e2e_suite_test.go
Normal file
92
deploy/rig-operator/test/e2e/e2e_suite_test.go
Normal file
@@ -0,0 +1,92 @@
|
||||
//go:build e2e
|
||||
// +build e2e
|
||||
|
||||
/*
|
||||
Copyright 2026.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"vanderlande.com/ittp/appstack/rig-operator/test/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
// Optional Environment Variables:
|
||||
// - CERT_MANAGER_INSTALL_SKIP=true: Skips CertManager installation during test setup.
|
||||
// These variables are useful if CertManager is already installed, avoiding
|
||||
// re-installation and conflicts.
|
||||
skipCertManagerInstall = os.Getenv("CERT_MANAGER_INSTALL_SKIP") == "true"
|
||||
// isCertManagerAlreadyInstalled will be set true when CertManager CRDs be found on the cluster
|
||||
isCertManagerAlreadyInstalled = false
|
||||
|
||||
// projectImage is the name of the image which will be build and loaded
|
||||
// with the code source changes to be tested.
|
||||
projectImage = "example.com/deploy:v0.0.1"
|
||||
)
|
||||
|
||||
// TestE2E runs the end-to-end (e2e) test suite for the project. These tests execute in an isolated,
|
||||
// temporary environment to validate project changes with the purpose of being used in CI jobs.
|
||||
// The default setup requires Kind, builds/loads the Manager Docker image locally, and installs
|
||||
// CertManager.
|
||||
func TestE2E(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
_, _ = fmt.Fprintf(GinkgoWriter, "Starting deploy integration test suite\n")
|
||||
RunSpecs(t, "e2e suite")
|
||||
}
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
By("building the manager(Operator) image")
|
||||
cmd := exec.Command("make", "docker-build", fmt.Sprintf("IMG=%s", projectImage))
|
||||
_, err := utils.Run(cmd)
|
||||
ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Failed to build the manager(Operator) image")
|
||||
|
||||
// TODO(user): If you want to change the e2e test vendor from Kind, ensure the image is
|
||||
// built and available before running the tests. Also, remove the following block.
|
||||
By("loading the manager(Operator) image on Kind")
|
||||
err = utils.LoadImageToKindClusterWithName(projectImage)
|
||||
ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Failed to load the manager(Operator) image into Kind")
|
||||
|
||||
// The tests-e2e are intended to run on a temporary cluster that is created and destroyed for testing.
|
||||
// To prevent errors when tests run in environments with CertManager already installed,
|
||||
// we check for its presence before execution.
|
||||
// Setup CertManager before the suite if not skipped and if not already installed
|
||||
if !skipCertManagerInstall {
|
||||
By("checking if cert manager is installed already")
|
||||
isCertManagerAlreadyInstalled = utils.IsCertManagerCRDsInstalled()
|
||||
if !isCertManagerAlreadyInstalled {
|
||||
_, _ = fmt.Fprintf(GinkgoWriter, "Installing CertManager...\n")
|
||||
Expect(utils.InstallCertManager()).To(Succeed(), "Failed to install CertManager")
|
||||
} else {
|
||||
_, _ = fmt.Fprintf(GinkgoWriter, "WARNING: CertManager is already installed. Skipping installation...\n")
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
// Teardown CertManager after the suite if not skipped and if it was not already installed
|
||||
if !skipCertManagerInstall && !isCertManagerAlreadyInstalled {
|
||||
_, _ = fmt.Fprintf(GinkgoWriter, "Uninstalling CertManager...\n")
|
||||
utils.UninstallCertManager()
|
||||
}
|
||||
})
|
||||
337
deploy/rig-operator/test/e2e/e2e_test.go
Normal file
337
deploy/rig-operator/test/e2e/e2e_test.go
Normal file
@@ -0,0 +1,337 @@
|
||||
//go:build e2e
|
||||
// +build e2e
|
||||
|
||||
/*
|
||||
Copyright 2026.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"vanderlande.com/ittp/appstack/rig-operator/test/utils"
|
||||
)
|
||||
|
||||
// namespace where the project is deployed in
|
||||
const namespace = "deploy-system"
|
||||
|
||||
// serviceAccountName created for the project
|
||||
const serviceAccountName = "deploy-controller-manager"
|
||||
|
||||
// metricsServiceName is the name of the metrics service of the project
|
||||
const metricsServiceName = "deploy-controller-manager-metrics-service"
|
||||
|
||||
// metricsRoleBindingName is the name of the RBAC that will be created to allow get the metrics data
|
||||
const metricsRoleBindingName = "deploy-metrics-binding"
|
||||
|
||||
var _ = Describe("Manager", Ordered, func() {
|
||||
var controllerPodName string
|
||||
|
||||
// Before running the tests, set up the environment by creating the namespace,
|
||||
// enforce the restricted security policy to the namespace, installing CRDs,
|
||||
// and deploying the controller.
|
||||
BeforeAll(func() {
|
||||
By("creating manager namespace")
|
||||
cmd := exec.Command("kubectl", "create", "ns", namespace)
|
||||
_, err := utils.Run(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to create namespace")
|
||||
|
||||
By("labeling the namespace to enforce the restricted security policy")
|
||||
cmd = exec.Command("kubectl", "label", "--overwrite", "ns", namespace,
|
||||
"pod-security.kubernetes.io/enforce=restricted")
|
||||
_, err = utils.Run(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to label namespace with restricted policy")
|
||||
|
||||
By("installing CRDs")
|
||||
cmd = exec.Command("make", "install")
|
||||
_, err = utils.Run(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to install CRDs")
|
||||
|
||||
By("deploying the controller-manager")
|
||||
cmd = exec.Command("make", "deploy", fmt.Sprintf("IMG=%s", projectImage))
|
||||
_, err = utils.Run(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to deploy the controller-manager")
|
||||
})
|
||||
|
||||
// After all tests have been executed, clean up by undeploying the controller, uninstalling CRDs,
|
||||
// and deleting the namespace.
|
||||
AfterAll(func() {
|
||||
By("cleaning up the curl pod for metrics")
|
||||
cmd := exec.Command("kubectl", "delete", "pod", "curl-metrics", "-n", namespace)
|
||||
_, _ = utils.Run(cmd)
|
||||
|
||||
By("undeploying the controller-manager")
|
||||
cmd = exec.Command("make", "undeploy")
|
||||
_, _ = utils.Run(cmd)
|
||||
|
||||
By("uninstalling CRDs")
|
||||
cmd = exec.Command("make", "uninstall")
|
||||
_, _ = utils.Run(cmd)
|
||||
|
||||
By("removing manager namespace")
|
||||
cmd = exec.Command("kubectl", "delete", "ns", namespace)
|
||||
_, _ = utils.Run(cmd)
|
||||
})
|
||||
|
||||
// After each test, check for failures and collect logs, events,
|
||||
// and pod descriptions for debugging.
|
||||
AfterEach(func() {
|
||||
specReport := CurrentSpecReport()
|
||||
if specReport.Failed() {
|
||||
By("Fetching controller manager pod logs")
|
||||
cmd := exec.Command("kubectl", "logs", controllerPodName, "-n", namespace)
|
||||
controllerLogs, err := utils.Run(cmd)
|
||||
if err == nil {
|
||||
_, _ = fmt.Fprintf(GinkgoWriter, "Controller logs:\n %s", controllerLogs)
|
||||
} else {
|
||||
_, _ = fmt.Fprintf(GinkgoWriter, "Failed to get Controller logs: %s", err)
|
||||
}
|
||||
|
||||
By("Fetching Kubernetes events")
|
||||
cmd = exec.Command("kubectl", "get", "events", "-n", namespace, "--sort-by=.lastTimestamp")
|
||||
eventsOutput, err := utils.Run(cmd)
|
||||
if err == nil {
|
||||
_, _ = fmt.Fprintf(GinkgoWriter, "Kubernetes events:\n%s", eventsOutput)
|
||||
} else {
|
||||
_, _ = fmt.Fprintf(GinkgoWriter, "Failed to get Kubernetes events: %s", err)
|
||||
}
|
||||
|
||||
By("Fetching curl-metrics logs")
|
||||
cmd = exec.Command("kubectl", "logs", "curl-metrics", "-n", namespace)
|
||||
metricsOutput, err := utils.Run(cmd)
|
||||
if err == nil {
|
||||
_, _ = fmt.Fprintf(GinkgoWriter, "Metrics logs:\n %s", metricsOutput)
|
||||
} else {
|
||||
_, _ = fmt.Fprintf(GinkgoWriter, "Failed to get curl-metrics logs: %s", err)
|
||||
}
|
||||
|
||||
By("Fetching controller manager pod description")
|
||||
cmd = exec.Command("kubectl", "describe", "pod", controllerPodName, "-n", namespace)
|
||||
podDescription, err := utils.Run(cmd)
|
||||
if err == nil {
|
||||
fmt.Println("Pod description:\n", podDescription)
|
||||
} else {
|
||||
fmt.Println("Failed to describe controller pod")
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
SetDefaultEventuallyTimeout(2 * time.Minute)
|
||||
SetDefaultEventuallyPollingInterval(time.Second)
|
||||
|
||||
Context("Manager", func() {
|
||||
It("should run successfully", func() {
|
||||
By("validating that the controller-manager pod is running as expected")
|
||||
verifyControllerUp := func(g Gomega) {
|
||||
// Get the name of the controller-manager pod
|
||||
cmd := exec.Command("kubectl", "get",
|
||||
"pods", "-l", "control-plane=controller-manager",
|
||||
"-o", "go-template={{ range .items }}"+
|
||||
"{{ if not .metadata.deletionTimestamp }}"+
|
||||
"{{ .metadata.name }}"+
|
||||
"{{ \"\\n\" }}{{ end }}{{ end }}",
|
||||
"-n", namespace,
|
||||
)
|
||||
|
||||
podOutput, err := utils.Run(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred(), "Failed to retrieve controller-manager pod information")
|
||||
podNames := utils.GetNonEmptyLines(podOutput)
|
||||
g.Expect(podNames).To(HaveLen(1), "expected 1 controller pod running")
|
||||
controllerPodName = podNames[0]
|
||||
g.Expect(controllerPodName).To(ContainSubstring("controller-manager"))
|
||||
|
||||
// Validate the pod's status
|
||||
cmd = exec.Command("kubectl", "get",
|
||||
"pods", controllerPodName, "-o", "jsonpath={.status.phase}",
|
||||
"-n", namespace,
|
||||
)
|
||||
output, err := utils.Run(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(output).To(Equal("Running"), "Incorrect controller-manager pod status")
|
||||
}
|
||||
Eventually(verifyControllerUp).Should(Succeed())
|
||||
})
|
||||
|
||||
It("should ensure the metrics endpoint is serving metrics", func() {
|
||||
By("creating a ClusterRoleBinding for the service account to allow access to metrics")
|
||||
cmd := exec.Command("kubectl", "create", "clusterrolebinding", metricsRoleBindingName,
|
||||
"--clusterrole=deploy-metrics-reader",
|
||||
fmt.Sprintf("--serviceaccount=%s:%s", namespace, serviceAccountName),
|
||||
)
|
||||
_, err := utils.Run(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to create ClusterRoleBinding")
|
||||
|
||||
By("validating that the metrics service is available")
|
||||
cmd = exec.Command("kubectl", "get", "service", metricsServiceName, "-n", namespace)
|
||||
_, err = utils.Run(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), "Metrics service should exist")
|
||||
|
||||
By("getting the service account token")
|
||||
token, err := serviceAccountToken()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(token).NotTo(BeEmpty())
|
||||
|
||||
By("ensuring the controller pod is ready")
|
||||
verifyControllerPodReady := func(g Gomega) {
|
||||
cmd := exec.Command("kubectl", "get", "pod", controllerPodName, "-n", namespace,
|
||||
"-o", "jsonpath={.status.conditions[?(@.type=='Ready')].status}")
|
||||
output, err := utils.Run(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(output).To(Equal("True"), "Controller pod not ready")
|
||||
}
|
||||
Eventually(verifyControllerPodReady, 3*time.Minute, time.Second).Should(Succeed())
|
||||
|
||||
By("verifying that the controller manager is serving the metrics server")
|
||||
verifyMetricsServerStarted := func(g Gomega) {
|
||||
cmd := exec.Command("kubectl", "logs", controllerPodName, "-n", namespace)
|
||||
output, err := utils.Run(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(output).To(ContainSubstring("Serving metrics server"),
|
||||
"Metrics server not yet started")
|
||||
}
|
||||
Eventually(verifyMetricsServerStarted, 3*time.Minute, time.Second).Should(Succeed())
|
||||
|
||||
// +kubebuilder:scaffold:e2e-metrics-webhooks-readiness
|
||||
|
||||
By("creating the curl-metrics pod to access the metrics endpoint")
|
||||
cmd = exec.Command("kubectl", "run", "curl-metrics", "--restart=Never",
|
||||
"--namespace", namespace,
|
||||
"--image=curlimages/curl:latest",
|
||||
"--overrides",
|
||||
fmt.Sprintf(`{
|
||||
"spec": {
|
||||
"containers": [{
|
||||
"name": "curl",
|
||||
"image": "curlimages/curl:latest",
|
||||
"command": ["/bin/sh", "-c"],
|
||||
"args": ["curl -v -k -H 'Authorization: Bearer %s' https://%s.%s.svc.cluster.local:8443/metrics"],
|
||||
"securityContext": {
|
||||
"readOnlyRootFilesystem": true,
|
||||
"allowPrivilegeEscalation": false,
|
||||
"capabilities": {
|
||||
"drop": ["ALL"]
|
||||
},
|
||||
"runAsNonRoot": true,
|
||||
"runAsUser": 1000,
|
||||
"seccompProfile": {
|
||||
"type": "RuntimeDefault"
|
||||
}
|
||||
}
|
||||
}],
|
||||
"serviceAccountName": "%s"
|
||||
}
|
||||
}`, token, metricsServiceName, namespace, serviceAccountName))
|
||||
_, err = utils.Run(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to create curl-metrics pod")
|
||||
|
||||
By("waiting for the curl-metrics pod to complete.")
|
||||
verifyCurlUp := func(g Gomega) {
|
||||
cmd := exec.Command("kubectl", "get", "pods", "curl-metrics",
|
||||
"-o", "jsonpath={.status.phase}",
|
||||
"-n", namespace)
|
||||
output, err := utils.Run(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(output).To(Equal("Succeeded"), "curl pod in wrong status")
|
||||
}
|
||||
Eventually(verifyCurlUp, 5*time.Minute).Should(Succeed())
|
||||
|
||||
By("getting the metrics by checking curl-metrics logs")
|
||||
verifyMetricsAvailable := func(g Gomega) {
|
||||
metricsOutput, err := getMetricsOutput()
|
||||
g.Expect(err).NotTo(HaveOccurred(), "Failed to retrieve logs from curl pod")
|
||||
g.Expect(metricsOutput).NotTo(BeEmpty())
|
||||
g.Expect(metricsOutput).To(ContainSubstring("< HTTP/1.1 200 OK"))
|
||||
}
|
||||
Eventually(verifyMetricsAvailable, 2*time.Minute).Should(Succeed())
|
||||
})
|
||||
|
||||
// +kubebuilder:scaffold:e2e-webhooks-checks
|
||||
|
||||
// TODO: Customize the e2e test suite with scenarios specific to your project.
|
||||
// Consider applying sample/CR(s) and check their status and/or verifying
|
||||
// the reconciliation by using the metrics, i.e.:
|
||||
// metricsOutput, err := getMetricsOutput()
|
||||
// Expect(err).NotTo(HaveOccurred(), "Failed to retrieve logs from curl pod")
|
||||
// Expect(metricsOutput).To(ContainSubstring(
|
||||
// fmt.Sprintf(`controller_runtime_reconcile_total{controller="%s",result="success"} 1`,
|
||||
// strings.ToLower(<Kind>),
|
||||
// ))
|
||||
})
|
||||
})
|
||||
|
||||
// serviceAccountToken returns a token for the specified service account in the given namespace.
|
||||
// It uses the Kubernetes TokenRequest API to generate a token by directly sending a request
|
||||
// and parsing the resulting token from the API response.
|
||||
func serviceAccountToken() (string, error) {
|
||||
const tokenRequestRawString = `{
|
||||
"apiVersion": "authentication.k8s.io/v1",
|
||||
"kind": "TokenRequest"
|
||||
}`
|
||||
|
||||
// Temporary file to store the token request
|
||||
secretName := fmt.Sprintf("%s-token-request", serviceAccountName)
|
||||
tokenRequestFile := filepath.Join("/tmp", secretName)
|
||||
err := os.WriteFile(tokenRequestFile, []byte(tokenRequestRawString), os.FileMode(0o644))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var out string
|
||||
verifyTokenCreation := func(g Gomega) {
|
||||
// Execute kubectl command to create the token
|
||||
cmd := exec.Command("kubectl", "create", "--raw", fmt.Sprintf(
|
||||
"/api/v1/namespaces/%s/serviceaccounts/%s/token",
|
||||
namespace,
|
||||
serviceAccountName,
|
||||
), "-f", tokenRequestFile)
|
||||
|
||||
output, err := cmd.CombinedOutput()
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Parse the JSON output to extract the token
|
||||
var token tokenRequest
|
||||
err = json.Unmarshal(output, &token)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
out = token.Status.Token
|
||||
}
|
||||
Eventually(verifyTokenCreation).Should(Succeed())
|
||||
|
||||
return out, err
|
||||
}
|
||||
|
||||
// getMetricsOutput retrieves and returns the logs from the curl pod used to access the metrics endpoint.
|
||||
func getMetricsOutput() (string, error) {
|
||||
By("getting the curl-metrics logs")
|
||||
cmd := exec.Command("kubectl", "logs", "curl-metrics", "-n", namespace)
|
||||
return utils.Run(cmd)
|
||||
}
|
||||
|
||||
// tokenRequest is a simplified representation of the Kubernetes TokenRequest API response,
|
||||
// containing only the token field that we need to extract.
|
||||
type tokenRequest struct {
|
||||
Status struct {
|
||||
Token string `json:"token"`
|
||||
} `json:"status"`
|
||||
}
|
||||
226
deploy/rig-operator/test/utils/utils.go
Normal file
226
deploy/rig-operator/test/utils/utils.go
Normal file
@@ -0,0 +1,226 @@
|
||||
/*
|
||||
Copyright 2026.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2" // nolint:revive,staticcheck
|
||||
)
|
||||
|
||||
const (
|
||||
certmanagerVersion = "v1.19.1"
|
||||
certmanagerURLTmpl = "https://github.com/cert-manager/cert-manager/releases/download/%s/cert-manager.yaml"
|
||||
|
||||
defaultKindBinary = "kind"
|
||||
defaultKindCluster = "kind"
|
||||
)
|
||||
|
||||
func warnError(err error) {
|
||||
_, _ = fmt.Fprintf(GinkgoWriter, "warning: %v\n", err)
|
||||
}
|
||||
|
||||
// Run executes the provided command within this context
|
||||
func Run(cmd *exec.Cmd) (string, error) {
|
||||
dir, _ := GetProjectDir()
|
||||
cmd.Dir = dir
|
||||
|
||||
if err := os.Chdir(cmd.Dir); err != nil {
|
||||
_, _ = fmt.Fprintf(GinkgoWriter, "chdir dir: %q\n", err)
|
||||
}
|
||||
|
||||
cmd.Env = append(os.Environ(), "GO111MODULE=on")
|
||||
command := strings.Join(cmd.Args, " ")
|
||||
_, _ = fmt.Fprintf(GinkgoWriter, "running: %q\n", command)
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return string(output), fmt.Errorf("%q failed with error %q: %w", command, string(output), err)
|
||||
}
|
||||
|
||||
return string(output), nil
|
||||
}
|
||||
|
||||
// UninstallCertManager uninstalls the cert manager
|
||||
func UninstallCertManager() {
|
||||
url := fmt.Sprintf(certmanagerURLTmpl, certmanagerVersion)
|
||||
cmd := exec.Command("kubectl", "delete", "-f", url)
|
||||
if _, err := Run(cmd); err != nil {
|
||||
warnError(err)
|
||||
}
|
||||
|
||||
// Delete leftover leases in kube-system (not cleaned by default)
|
||||
kubeSystemLeases := []string{
|
||||
"cert-manager-cainjector-leader-election",
|
||||
"cert-manager-controller",
|
||||
}
|
||||
for _, lease := range kubeSystemLeases {
|
||||
cmd = exec.Command("kubectl", "delete", "lease", lease,
|
||||
"-n", "kube-system", "--ignore-not-found", "--force", "--grace-period=0")
|
||||
if _, err := Run(cmd); err != nil {
|
||||
warnError(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// InstallCertManager installs the cert manager bundle.
|
||||
func InstallCertManager() error {
|
||||
url := fmt.Sprintf(certmanagerURLTmpl, certmanagerVersion)
|
||||
cmd := exec.Command("kubectl", "apply", "-f", url)
|
||||
if _, err := Run(cmd); err != nil {
|
||||
return err
|
||||
}
|
||||
// Wait for cert-manager-webhook to be ready, which can take time if cert-manager
|
||||
// was re-installed after uninstalling on a cluster.
|
||||
cmd = exec.Command("kubectl", "wait", "deployment.apps/cert-manager-webhook",
|
||||
"--for", "condition=Available",
|
||||
"--namespace", "cert-manager",
|
||||
"--timeout", "5m",
|
||||
)
|
||||
|
||||
_, err := Run(cmd)
|
||||
return err
|
||||
}
|
||||
|
||||
// IsCertManagerCRDsInstalled checks if any Cert Manager CRDs are installed
|
||||
// by verifying the existence of key CRDs related to Cert Manager.
|
||||
func IsCertManagerCRDsInstalled() bool {
|
||||
// List of common Cert Manager CRDs
|
||||
certManagerCRDs := []string{
|
||||
"certificates.cert-manager.io",
|
||||
"issuers.cert-manager.io",
|
||||
"clusterissuers.cert-manager.io",
|
||||
"certificaterequests.cert-manager.io",
|
||||
"orders.acme.cert-manager.io",
|
||||
"challenges.acme.cert-manager.io",
|
||||
}
|
||||
|
||||
// Execute the kubectl command to get all CRDs
|
||||
cmd := exec.Command("kubectl", "get", "crds")
|
||||
output, err := Run(cmd)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check if any of the Cert Manager CRDs are present
|
||||
crdList := GetNonEmptyLines(output)
|
||||
for _, crd := range certManagerCRDs {
|
||||
for _, line := range crdList {
|
||||
if strings.Contains(line, crd) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// LoadImageToKindClusterWithName loads a local docker image to the kind cluster
|
||||
func LoadImageToKindClusterWithName(name string) error {
|
||||
cluster := defaultKindCluster
|
||||
if v, ok := os.LookupEnv("KIND_CLUSTER"); ok {
|
||||
cluster = v
|
||||
}
|
||||
kindOptions := []string{"load", "docker-image", name, "--name", cluster}
|
||||
kindBinary := defaultKindBinary
|
||||
if v, ok := os.LookupEnv("KIND"); ok {
|
||||
kindBinary = v
|
||||
}
|
||||
cmd := exec.Command(kindBinary, kindOptions...)
|
||||
_, err := Run(cmd)
|
||||
return err
|
||||
}
|
||||
|
||||
// GetNonEmptyLines converts given command output string into individual objects
|
||||
// according to line breakers, and ignores the empty elements in it.
|
||||
func GetNonEmptyLines(output string) []string {
|
||||
var res []string
|
||||
elements := strings.Split(output, "\n")
|
||||
for _, element := range elements {
|
||||
if element != "" {
|
||||
res = append(res, element)
|
||||
}
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
// GetProjectDir will return the directory where the project is
|
||||
func GetProjectDir() (string, error) {
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return wd, fmt.Errorf("failed to get current working directory: %w", err)
|
||||
}
|
||||
wd = strings.ReplaceAll(wd, "/test/e2e", "")
|
||||
return wd, nil
|
||||
}
|
||||
|
||||
// UncommentCode searches for target in the file and remove the comment prefix
|
||||
// of the target content. The target content may span multiple lines.
|
||||
func UncommentCode(filename, target, prefix string) error {
|
||||
// false positive
|
||||
// nolint:gosec
|
||||
content, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read file %q: %w", filename, err)
|
||||
}
|
||||
strContent := string(content)
|
||||
|
||||
idx := strings.Index(strContent, target)
|
||||
if idx < 0 {
|
||||
return fmt.Errorf("unable to find the code %q to be uncomment", target)
|
||||
}
|
||||
|
||||
out := new(bytes.Buffer)
|
||||
_, err = out.Write(content[:idx])
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write to output: %w", err)
|
||||
}
|
||||
|
||||
scanner := bufio.NewScanner(bytes.NewBufferString(target))
|
||||
if !scanner.Scan() {
|
||||
return nil
|
||||
}
|
||||
for {
|
||||
if _, err = out.WriteString(strings.TrimPrefix(scanner.Text(), prefix)); err != nil {
|
||||
return fmt.Errorf("failed to write to output: %w", err)
|
||||
}
|
||||
// Avoid writing a newline in case the previous line was the last in target.
|
||||
if !scanner.Scan() {
|
||||
break
|
||||
}
|
||||
if _, err = out.WriteString("\n"); err != nil {
|
||||
return fmt.Errorf("failed to write to output: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if _, err = out.Write(content[idx+len(target):]); err != nil {
|
||||
return fmt.Errorf("failed to write to output: %w", err)
|
||||
}
|
||||
|
||||
// false positive
|
||||
// nolint:gosec
|
||||
if err = os.WriteFile(filename, out.Bytes(), 0644); err != nil {
|
||||
return fmt.Errorf("failed to write file %q: %w", filename, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
Reference in New Issue
Block a user