Drop initial code

This commit is contained in:
Danny Bessems
2026-01-15 09:58:01 +00:00
parent 227d957219
commit 1e7c9ba5cb
228 changed files with 19883 additions and 1 deletions

View File

@@ -0,0 +1,291 @@
package controller
import (
"context"
"fmt"
"time"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/log"
rigv1 "vanderlande.com/ittp/appstack/rig-operator/api/v1alpha1"
"vanderlande.com/ittp/appstack/rig-operator/internal/builder"
"vanderlande.com/ittp/appstack/rig-operator/internal/helm"
"vanderlande.com/ittp/appstack/rig-operator/internal/provider"
"vanderlande.com/ittp/appstack/rig-operator/internal/provider/harvester"
harvesterTemplate "vanderlande.com/ittp/appstack/rig-operator/internal/templates/harvester"
"vanderlande.com/ittp/appstack/rig-operator/internal/provider/vsphere"
vsphereTemplate "vanderlande.com/ittp/appstack/rig-operator/internal/templates/vsphere"
)
const (
rigFinalizer = "rig.appstack.io/finalizer"
)
// ClusterBlueprintReconciler reconciles a ClusterBlueprint object
type ClusterBlueprintReconciler struct {
client.Client
Scheme *runtime.Scheme
Recorder record.EventRecorder
}
// +kubebuilder:rbac:groups=rig.appstack.io,resources=clusterblueprints,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=rig.appstack.io,resources=clusterblueprints/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=rig.appstack.io,resources=clusterblueprints/finalizers,verbs=update
// +kubebuilder:rbac:groups=rig.appstack.io,resources=infrablueprints,verbs=get;list;watch
// +kubebuilder:rbac:groups=rig.appstack.io,resources=harvesterblueprints,verbs=get;list;watch
// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;patch;delete
func (r *ClusterBlueprintReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
l := log.FromContext(ctx)
// 1. Fetch ClusterBlueprint (CBP)
cbp := &rigv1.ClusterBlueprint{}
if err := r.Get(ctx, req.NamespacedName, cbp); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
// 2. Handle Deletion ... (Same as before)
if !cbp.ObjectMeta.DeletionTimestamp.IsZero() {
return r.handleDelete(ctx, cbp)
}
// 3. Ensure Finalizer ... (Same as before)
if !controllerutil.ContainsFinalizer(cbp, rigFinalizer) {
controllerutil.AddFinalizer(cbp, rigFinalizer)
if err := r.Update(ctx, cbp); err != nil {
return ctrl.Result{}, err
}
}
// 4. Fetch InfraBlueprint (IBP)
ibp := &rigv1.InfraBlueprint{}
if err := r.Get(ctx, types.NamespacedName{Name: cbp.Spec.InfraBlueprintRef, Namespace: cbp.Namespace}, ibp); err != nil {
l.Error(err, "InfraBlueprint not found", "Infra", cbp.Spec.InfraBlueprintRef)
r.updateStatus(ctx, cbp, "PendingInfra", false)
return ctrl.Result{RequeueAfter: 1 * time.Minute}, nil
}
// =====================================================================
// 4.5. QUOTA CHECK (The Gatekeeper)
// Only check quota if we are NOT already deployed.
// (Existing clusters keep running even if quota shrinks later)
// =====================================================================
if cbp.Status.Phase != "Deployed" {
if err := r.checkQuota(cbp, ibp); err != nil {
l.Error(err, "Quota Exceeded")
// We stop here! Helm Apply will NOT run.
r.updateStatus(ctx, cbp, "QuotaExceeded", false)
// Requeue slowly to check if resources freed up later
return ctrl.Result{RequeueAfter: 5 * time.Minute}, nil
}
}
// 5. Select Strategy based on Infra ProviderRef
var selectedStrategy provider.Strategy
var baseTemplate []byte
var credentialSecret string
switch ibp.Spec.ProviderRef.Kind {
case "HarvesterBlueprint":
// A. Fetch the specific Harvester Config (HBP)
hbp := &rigv1.HarvesterBlueprint{}
hbpName := types.NamespacedName{Name: ibp.Spec.ProviderRef.Name, Namespace: cbp.Namespace}
if err := r.Get(ctx, hbpName, hbp); err != nil {
return ctrl.Result{}, fmt.Errorf("failed to load HarvesterBlueprint: %w", err)
}
// B. Ensure Identity (Mint ServiceAccount/Secret)
idMgr := harvester.NewIdentityManager(r.Client, r.Scheme)
secretName, err := idMgr.Ensure(ctx, cbp, ibp, hbp)
if err != nil {
l.Error(err, "Failed to ensure identity")
r.updateStatus(ctx, cbp, "ProvisioningFailed", false)
return ctrl.Result{RequeueAfter: 30 * time.Second}, nil
}
credentialSecret = secretName
// C. Load Defaults & Init Strategy
defaults, err := harvesterTemplate.GetDefaults()
if err != nil {
return ctrl.Result{}, err
}
baseTemplate = harvesterTemplate.GetBaseValues()
// [UPDATED] Pass ibp.Spec.RancherURL to the factory
selectedStrategy = harvester.NewStrategy(
hbp,
ibp.Spec.UserData,
ibp.Spec.RancherURL, // <--- Passing the URL here
defaults,
)
case "VsphereBlueprint":
// A. Fetch the specific vSphere Config (VBP)
vbp := &rigv1.VsphereBlueprint{}
vbpName := types.NamespacedName{Name: ibp.Spec.ProviderRef.Name, Namespace: cbp.Namespace}
if err := r.Get(ctx, vbpName, vbp); err != nil {
return ctrl.Result{}, fmt.Errorf("failed to load VsphereBlueprint: %w", err)
}
// B. Load Defaults (CPU/RAM sizing safety nets)
defaults, err := vsphereTemplate.GetDefaults()
if err != nil {
return ctrl.Result{}, err
}
baseTemplate = vsphereTemplate.GetBaseValues()
// C. Init Strategy
// Note: vSphere typically uses the global 'cloudCredentialSecret' defined in InfraBlueprint
// rather than minting dynamic tokens per cluster like Harvester does.
credentialSecret = ibp.Spec.CloudCredentialSecret
selectedStrategy = vsphere.NewStrategy(
vbp,
ibp.Spec.UserData,
ibp.Spec.RancherURL,
defaults,
)
default:
return ctrl.Result{}, fmt.Errorf("unsupported provider kind: %s", ibp.Spec.ProviderRef.Kind)
}
// 6. Build Helm Values (Generic Engine)
masterBuilder := builder.NewMasterBuilder(selectedStrategy, baseTemplate)
values, err := masterBuilder.Build(ctx, cbp, credentialSecret)
if err != nil {
l.Error(err, "Failed to build helm values")
r.updateStatus(ctx, cbp, "ConfigGenerationFailed", false)
return ctrl.Result{}, nil // Fatal error, don't retry until config changes
}
// 7. Apply Helm Chart
// We use the ChartConfig extracted by the MasterBuilder (from the YAML defaults)
chartCfg := masterBuilder.GetChartConfig()
helmConfig := helm.Config{
Namespace: cbp.Namespace,
ReleaseName: cbp.Name, // We use the Cluster name as the Release name
RepoURL: chartCfg.Repo,
ChartName: chartCfg.Name,
Version: chartCfg.Version,
Values: values,
}
l.Info("Applying Helm Release", "Release", cbp.Name, "Chart", chartCfg.Name)
if err := helm.Apply(helmConfig); err != nil {
l.Error(err, "Helm Install/Upgrade failed")
r.updateStatus(ctx, cbp, "HelmApplyFailed", false)
return ctrl.Result{RequeueAfter: 1 * time.Minute}, nil
}
// 8. Success!
r.updateStatus(ctx, cbp, "Deployed", true)
return ctrl.Result{RequeueAfter: 10 * time.Minute}, nil // Re-sync periodically
}
func (r *ClusterBlueprintReconciler) handleDelete(ctx context.Context, cbp *rigv1.ClusterBlueprint) (ctrl.Result, error) {
if controllerutil.ContainsFinalizer(cbp, rigFinalizer) {
// 1. Uninstall Helm Release
helmCfg := helm.Config{
Namespace: cbp.Namespace,
ReleaseName: cbp.Name,
}
// Best effort uninstall
if err := helm.Uninstall(helmCfg); err != nil {
log.FromContext(ctx).Error(err, "Failed to uninstall helm release during cleanup")
}
// 2. Cleanup Identity (Harvester SA)
// We need to look up IBP -> HBP again to know WHERE to clean up
// This is a simplified lookup; in production we might need to handle missing IBP gracefully
ibp := &rigv1.InfraBlueprint{}
if err := r.Get(ctx, types.NamespacedName{Name: cbp.Spec.InfraBlueprintRef, Namespace: cbp.Namespace}, ibp); err == nil {
if ibp.Spec.ProviderRef.Kind == "HarvesterBlueprint" {
hbp := &rigv1.HarvesterBlueprint{}
if err := r.Get(ctx, types.NamespacedName{Name: ibp.Spec.ProviderRef.Name, Namespace: cbp.Namespace}, hbp); err == nil {
idMgr := harvester.NewIdentityManager(r.Client, r.Scheme)
idMgr.Cleanup(ctx, cbp, ibp, hbp)
}
}
}
// 3. Remove Finalizer
controllerutil.RemoveFinalizer(cbp, rigFinalizer)
if err := r.Update(ctx, cbp); err != nil {
return ctrl.Result{}, err
}
}
return ctrl.Result{}, nil
}
func (r *ClusterBlueprintReconciler) updateStatus(ctx context.Context, cbp *rigv1.ClusterBlueprint, phase string, ready bool) {
cbp.Status.Phase = phase
cbp.Status.Ready = ready
if err := r.Status().Update(ctx, cbp); err != nil {
log.FromContext(ctx).Error(err, "Failed to update status")
}
}
// SetupWithManager sets up the controller with the Manager.
func (r *ClusterBlueprintReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&rigv1.ClusterBlueprint{}).
Complete(r)
}
// Helper function to calculate required resources vs available
func (r *ClusterBlueprintReconciler) checkQuota(cbp *rigv1.ClusterBlueprint, ibp *rigv1.InfraBlueprint) error {
// 1. Calculate what this cluster needs
var reqCpu, reqMem, reqDisk int
// Control Plane Sizing (Using safe defaults or template logic)
// Ideally, this should match the defaults in your template/strategy
cpCount := 1
if cbp.Spec.ControlPlaneHA {
cpCount = 3
}
reqCpu += cpCount * 4
reqMem += cpCount * 8
reqDisk += cpCount * 40
// Worker Pools Sizing
for _, pool := range cbp.Spec.WorkerPools {
reqCpu += pool.Quantity * pool.CpuCores
reqMem += pool.Quantity * pool.MemoryGB
reqDisk += pool.Quantity * pool.DiskGB
}
// 2. Check against Limits
// Note: We use the Status.Usage which is calculated by the InfraController.
// This includes "other" clusters, but might include "this" cluster if it was already counted.
// For strict "Admission Control", usually we check:
// (CurrentUsage + Request) > MaxLimit
// However, since InfraController runs asynchronously, 'Status.Usage' might NOT yet include this new cluster.
// So (Usage + Request) > Max is the safest check for a new provisioning.
q := ibp.Spec.Quota
u := ibp.Status.Usage
if q.MaxCPU > 0 && (u.UsedCPU+reqCpu) > q.MaxCPU {
return fmt.Errorf("requested CPU %d exceeds remaining quota (Max: %d, Used: %d)", reqCpu, q.MaxCPU, u.UsedCPU)
}
if q.MaxMemoryGB > 0 && (u.UsedMemoryGB+reqMem) > q.MaxMemoryGB {
return fmt.Errorf("requested Mem %dGB exceeds remaining quota (Max: %d, Used: %d)", reqMem, q.MaxMemoryGB, u.UsedMemoryGB)
}
if q.MaxDiskGB > 0 && (u.UsedDiskGB+reqDisk) > q.MaxDiskGB {
return fmt.Errorf("requested Disk %dGB exceeds remaining quota (Max: %d, Used: %d)", reqDisk, q.MaxDiskGB, u.UsedDiskGB)
}
return nil
}

View File

@@ -0,0 +1,84 @@
/*
Copyright 2026.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
rigv1alpha1 "vanderlande.com/ittp/appstack/rig-operator/api/v1alpha1"
)
var _ = Describe("ClusterBlueprint Controller", func() {
Context("When reconciling a resource", func() {
const resourceName = "test-resource"
ctx := context.Background()
typeNamespacedName := types.NamespacedName{
Name: resourceName,
Namespace: "default", // TODO(user):Modify as needed
}
clusterblueprint := &rigv1alpha1.ClusterBlueprint{}
BeforeEach(func() {
By("creating the custom resource for the Kind ClusterBlueprint")
err := k8sClient.Get(ctx, typeNamespacedName, clusterblueprint)
if err != nil && errors.IsNotFound(err) {
resource := &rigv1alpha1.ClusterBlueprint{
ObjectMeta: metav1.ObjectMeta{
Name: resourceName,
Namespace: "default",
},
// TODO(user): Specify other spec details if needed.
}
Expect(k8sClient.Create(ctx, resource)).To(Succeed())
}
})
AfterEach(func() {
// TODO(user): Cleanup logic after each test, like removing the resource instance.
resource := &rigv1alpha1.ClusterBlueprint{}
err := k8sClient.Get(ctx, typeNamespacedName, resource)
Expect(err).NotTo(HaveOccurred())
By("Cleanup the specific resource instance ClusterBlueprint")
Expect(k8sClient.Delete(ctx, resource)).To(Succeed())
})
It("should successfully reconcile the resource", func() {
By("Reconciling the created resource")
controllerReconciler := &ClusterBlueprintReconciler{
Client: k8sClient,
Scheme: k8sClient.Scheme(),
}
_, err := controllerReconciler.Reconcile(ctx, reconcile.Request{
NamespacedName: typeNamespacedName,
})
Expect(err).NotTo(HaveOccurred())
// TODO(user): Add more specific assertions depending on your controller's reconciliation logic.
// Example: If you expect a certain status condition after reconciliation, verify it here.
})
})
})

View File

@@ -0,0 +1,128 @@
package controller
import (
"context"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
rigv1 "vanderlande.com/ittp/appstack/rig-operator/api/v1alpha1"
)
// InfraBlueprintReconciler reconciles a InfraBlueprint object
type InfraBlueprintReconciler struct {
client.Client
Scheme *runtime.Scheme
}
// +kubebuilder:rbac:groups=rig.appstack.io,resources=infrablueprints,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=rig.appstack.io,resources=infrablueprints/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=rig.appstack.io,resources=clusterblueprints,verbs=get;list;watch
func (r *InfraBlueprintReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
l := log.FromContext(ctx)
// 1. Fetch the InfraBlueprint
infra := &rigv1.InfraBlueprint{}
if err := r.Get(ctx, req.NamespacedName, infra); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
// 2. List ALL ClusterBlueprints in the same namespace
// (We assume Infra and Clusters live in the same namespace for security/tenancy)
var clusterList rigv1.ClusterBlueprintList
if err := r.List(ctx, &clusterList, client.InNamespace(req.Namespace)); err != nil {
l.Error(err, "Failed to list clusters for quota calculation")
return ctrl.Result{}, err
}
// 3. Calculate Usage (The Accountant Logic)
var usedCpu, usedMem, usedDisk int
for _, cluster := range clusterList.Items {
// Only count clusters that belong to THIS Infra
if cluster.Spec.InfraBlueprintRef != infra.Name {
continue
}
// Sum Control Plane
if cluster.Spec.ControlPlaneHA {
// Hardcoded fallback or we could duplicate the defaults logic here.
// Ideally, we'd read the templates, but for accounting, safe estimates are usually okay.
// Or better: The Cluster status could report its own "ResourcesConsumed".
// For now, we use the standard defaults we know:
usedCpu += 3 * 4 // 3 nodes * 4 cores
usedMem += 3 * 8 // 3 nodes * 8 GB
usedDisk += 3 * 40 // 3 nodes * 40 GB
} else {
usedCpu += 1 * 4
usedMem += 1 * 8
usedDisk += 1 * 40
}
// Sum Worker Pools
for _, pool := range cluster.Spec.WorkerPools {
usedCpu += pool.Quantity * pool.CpuCores
usedMem += pool.Quantity * pool.MemoryGB
usedDisk += pool.Quantity * pool.DiskGB
}
}
// 4. Update Status if changed
if infra.Status.Usage.UsedCPU != usedCpu ||
infra.Status.Usage.UsedMemoryGB != usedMem ||
infra.Status.Usage.UsedDiskGB != usedDisk {
infra.Status.Usage.UsedCPU = usedCpu
infra.Status.Usage.UsedMemoryGB = usedMem
infra.Status.Usage.UsedDiskGB = usedDisk
l.Info("Updating Infra Quota Usage", "Infra", infra.Name, "CPU", usedCpu, "Mem", usedMem)
if err := r.Status().Update(ctx, infra); err != nil {
return ctrl.Result{}, err
}
}
// 5. Verify Connectivity (Optional)
// We could check if the ProviderRef exists here and set Ready=true
return ctrl.Result{}, nil
}
// SetupWithManager sets up the controller with the Manager.
func (r *InfraBlueprintReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&rigv1.InfraBlueprint{}).
// Watch ClusterBlueprints too!
// If a Cluster is added/modified, we need to Reconcile the Infra it points to.
Watches(
&rigv1.ClusterBlueprint{},
handler.EnqueueRequestsFromMapFunc(r.findInfraForCluster),
).
Complete(r)
}
// findInfraForCluster maps a Cluster change event to a Reconcile request for its parent Infra
func (r *InfraBlueprintReconciler) findInfraForCluster(ctx context.Context, obj client.Object) []reconcile.Request {
cluster, ok := obj.(*rigv1.ClusterBlueprint)
if !ok {
return nil
}
if cluster.Spec.InfraBlueprintRef != "" {
return []reconcile.Request{
{
NamespacedName: types.NamespacedName{
Name: cluster.Spec.InfraBlueprintRef,
Namespace: cluster.Namespace,
},
},
}
}
return nil
}

View File

@@ -0,0 +1,84 @@
/*
Copyright 2026.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
rigv1alpha1 "vanderlande.com/ittp/appstack/rig-operator/api/v1alpha1"
)
var _ = Describe("InfraBlueprint Controller", func() {
Context("When reconciling a resource", func() {
const resourceName = "test-resource"
ctx := context.Background()
typeNamespacedName := types.NamespacedName{
Name: resourceName,
Namespace: "default", // TODO(user):Modify as needed
}
infrablueprint := &rigv1alpha1.InfraBlueprint{}
BeforeEach(func() {
By("creating the custom resource for the Kind InfraBlueprint")
err := k8sClient.Get(ctx, typeNamespacedName, infrablueprint)
if err != nil && errors.IsNotFound(err) {
resource := &rigv1alpha1.InfraBlueprint{
ObjectMeta: metav1.ObjectMeta{
Name: resourceName,
Namespace: "default",
},
// TODO(user): Specify other spec details if needed.
}
Expect(k8sClient.Create(ctx, resource)).To(Succeed())
}
})
AfterEach(func() {
// TODO(user): Cleanup logic after each test, like removing the resource instance.
resource := &rigv1alpha1.InfraBlueprint{}
err := k8sClient.Get(ctx, typeNamespacedName, resource)
Expect(err).NotTo(HaveOccurred())
By("Cleanup the specific resource instance InfraBlueprint")
Expect(k8sClient.Delete(ctx, resource)).To(Succeed())
})
It("should successfully reconcile the resource", func() {
By("Reconciling the created resource")
controllerReconciler := &InfraBlueprintReconciler{
Client: k8sClient,
Scheme: k8sClient.Scheme(),
}
_, err := controllerReconciler.Reconcile(ctx, reconcile.Request{
NamespacedName: typeNamespacedName,
})
Expect(err).NotTo(HaveOccurred())
// TODO(user): Add more specific assertions depending on your controller's reconciliation logic.
// Example: If you expect a certain status condition after reconciliation, verify it here.
})
})
})

View File

@@ -0,0 +1,116 @@
/*
Copyright 2026.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
"os"
"path/filepath"
"testing"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
rigv1alpha1 "vanderlande.com/ittp/appstack/rig-operator/api/v1alpha1"
// +kubebuilder:scaffold:imports
)
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
var (
ctx context.Context
cancel context.CancelFunc
testEnv *envtest.Environment
cfg *rest.Config
k8sClient client.Client
)
func TestControllers(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Controller Suite")
}
var _ = BeforeSuite(func() {
logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
ctx, cancel = context.WithCancel(context.TODO())
var err error
err = rigv1alpha1.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
// +kubebuilder:scaffold:scheme
By("bootstrapping test environment")
testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")},
ErrorIfCRDPathMissing: true,
}
// Retrieve the first found binary directory to allow running tests from IDEs
if getFirstFoundEnvTestBinaryDir() != "" {
testEnv.BinaryAssetsDirectory = getFirstFoundEnvTestBinaryDir()
}
// cfg is defined in this file globally.
cfg, err = testEnv.Start()
Expect(err).NotTo(HaveOccurred())
Expect(cfg).NotTo(BeNil())
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
Expect(err).NotTo(HaveOccurred())
Expect(k8sClient).NotTo(BeNil())
})
var _ = AfterSuite(func() {
By("tearing down the test environment")
cancel()
err := testEnv.Stop()
Expect(err).NotTo(HaveOccurred())
})
// getFirstFoundEnvTestBinaryDir locates the first binary in the specified path.
// ENVTEST-based tests depend on specific binaries, usually located in paths set by
// controller-runtime. When running tests directly (e.g., via an IDE) without using
// Makefile targets, the 'BinaryAssetsDirectory' must be explicitly configured.
//
// This function streamlines the process by finding the required binaries, similar to
// setting the 'KUBEBUILDER_ASSETS' environment variable. To ensure the binaries are
// properly set up, run 'make setup-envtest' beforehand.
func getFirstFoundEnvTestBinaryDir() string {
basePath := filepath.Join("..", "..", "bin", "k8s")
entries, err := os.ReadDir(basePath)
if err != nil {
logf.Log.Error(err, "Failed to read directory", "path", basePath)
return ""
}
for _, entry := range entries {
if entry.IsDir() {
return filepath.Join(basePath, entry.Name())
}
}
return ""
}