package controller import ( "context" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" rigv1 "vanderlande.com/ittp/appstack/rig-operator/api/v1alpha1" ) // InfraBlueprintReconciler reconciles a InfraBlueprint object type InfraBlueprintReconciler struct { client.Client Scheme *runtime.Scheme } // +kubebuilder:rbac:groups=rig.appstack.io,resources=infrablueprints,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=rig.appstack.io,resources=infrablueprints/status,verbs=get;update;patch // +kubebuilder:rbac:groups=rig.appstack.io,resources=clusterblueprints,verbs=get;list;watch func (r *InfraBlueprintReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { l := log.FromContext(ctx) // 1. Fetch the InfraBlueprint infra := &rigv1.InfraBlueprint{} if err := r.Get(ctx, req.NamespacedName, infra); err != nil { return ctrl.Result{}, client.IgnoreNotFound(err) } // 2. List ALL ClusterBlueprints in the same namespace // (We assume Infra and Clusters live in the same namespace for security/tenancy) var clusterList rigv1.ClusterBlueprintList if err := r.List(ctx, &clusterList, client.InNamespace(req.Namespace)); err != nil { l.Error(err, "Failed to list clusters for quota calculation") return ctrl.Result{}, err } // 3. Calculate Usage (The Accountant Logic) var usedCpu, usedMem, usedDisk int for _, cluster := range clusterList.Items { // Only count clusters that belong to THIS Infra if cluster.Spec.InfraBlueprintRef != infra.Name { continue } // Sum Control Plane if cluster.Spec.ControlPlaneHA { // Hardcoded fallback or we could duplicate the defaults logic here. // Ideally, we'd read the templates, but for accounting, safe estimates are usually okay. // Or better: The Cluster status could report its own "ResourcesConsumed". // For now, we use the standard defaults we know: usedCpu += 3 * 4 // 3 nodes * 4 cores usedMem += 3 * 8 // 3 nodes * 8 GB usedDisk += 3 * 40 // 3 nodes * 40 GB } else { usedCpu += 1 * 4 usedMem += 1 * 8 usedDisk += 1 * 40 } // Sum Worker Pools for _, pool := range cluster.Spec.WorkerPools { usedCpu += pool.Quantity * pool.CpuCores usedMem += pool.Quantity * pool.MemoryGB usedDisk += pool.Quantity * pool.DiskGB } } // 4. Update Status if changed if infra.Status.Usage.UsedCPU != usedCpu || infra.Status.Usage.UsedMemoryGB != usedMem || infra.Status.Usage.UsedDiskGB != usedDisk { infra.Status.Usage.UsedCPU = usedCpu infra.Status.Usage.UsedMemoryGB = usedMem infra.Status.Usage.UsedDiskGB = usedDisk l.Info("Updating Infra Quota Usage", "Infra", infra.Name, "CPU", usedCpu, "Mem", usedMem) if err := r.Status().Update(ctx, infra); err != nil { return ctrl.Result{}, err } } // 5. Verify Connectivity (Optional) // We could check if the ProviderRef exists here and set Ready=true return ctrl.Result{}, nil } // SetupWithManager sets up the controller with the Manager. func (r *InfraBlueprintReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&rigv1.InfraBlueprint{}). // Watch ClusterBlueprints too! // If a Cluster is added/modified, we need to Reconcile the Infra it points to. Watches( &rigv1.ClusterBlueprint{}, handler.EnqueueRequestsFromMapFunc(r.findInfraForCluster), ). Complete(r) } // findInfraForCluster maps a Cluster change event to a Reconcile request for its parent Infra func (r *InfraBlueprintReconciler) findInfraForCluster(ctx context.Context, obj client.Object) []reconcile.Request { cluster, ok := obj.(*rigv1.ClusterBlueprint) if !ok { return nil } if cluster.Spec.InfraBlueprintRef != "" { return []reconcile.Request{ { NamespacedName: types.NamespacedName{ Name: cluster.Spec.InfraBlueprintRef, Namespace: cluster.Namespace, }, }, } } return nil }