Drop initial code
This commit is contained in:
126
deploy/rig-operator/internal/provider/harvester/manager.go
Normal file
126
deploy/rig-operator/internal/provider/harvester/manager.go
Normal file
@@ -0,0 +1,126 @@
|
||||
package harvester
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
"vanderlande.com/ittp/appstack/rig-operator/api/v1alpha1"
|
||||
)
|
||||
|
||||
type IdentityManager struct {
|
||||
client client.Client
|
||||
scheme *runtime.Scheme
|
||||
}
|
||||
|
||||
func NewIdentityManager(c client.Client, s *runtime.Scheme) *IdentityManager {
|
||||
return &IdentityManager{client: c, scheme: s}
|
||||
}
|
||||
|
||||
// Ensure checks if an identity exists. If not, it fetches master creds, mints a new one, and updates Status.
|
||||
func (m *IdentityManager) Ensure(ctx context.Context, cbp *v1alpha1.ClusterBlueprint, ibp *v1alpha1.InfraBlueprint, hbp *v1alpha1.HarvesterBlueprint) (string, error) {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
// 1. Fast Path: If identity already exists in Status, return it
|
||||
if cbp.Status.Identity != nil && cbp.Status.Identity.SecretRef != "" {
|
||||
return cbp.Status.Identity.SecretRef, nil
|
||||
}
|
||||
|
||||
l.Info("Minting Harvester identity", "Cluster", cbp.Name)
|
||||
|
||||
// 2. Fetch Master Credential (from Infra)
|
||||
rancherCredName := ibp.Spec.CloudCredentialSecret
|
||||
if rancherCredName == "" {
|
||||
return "", fmt.Errorf("CloudCredentialSecret is missing in InfraBlueprint %s", ibp.Name)
|
||||
}
|
||||
|
||||
var rancherSecret corev1.Secret
|
||||
// Note: Rancher secrets are expected in cattle-global-data
|
||||
if err := m.client.Get(ctx, types.NamespacedName{Name: rancherCredName, Namespace: "cattle-global-data"}, &rancherSecret); err != nil {
|
||||
return "", fmt.Errorf("failed to fetch rancher credential %s: %w", rancherCredName, err)
|
||||
}
|
||||
|
||||
// 3. Extract Kubeconfig
|
||||
const kubeconfigKey = "harvestercredentialConfig-kubeconfigContent"
|
||||
adminKubeconfigBytes := rancherSecret.Data[kubeconfigKey]
|
||||
if len(adminKubeconfigBytes) == 0 {
|
||||
if len(rancherSecret.Data["credential"]) > 0 {
|
||||
adminKubeconfigBytes = rancherSecret.Data["credential"]
|
||||
} else {
|
||||
return "", fmt.Errorf("secret %s missing kubeconfig data", rancherCredName)
|
||||
}
|
||||
}
|
||||
|
||||
// 4. Call Factory (low-level)
|
||||
newSecret, saName, _, err := EnsureCredential(
|
||||
ctx,
|
||||
adminKubeconfigBytes,
|
||||
cbp.Name,
|
||||
cbp.Namespace, // Target Namespace (where secret goes)
|
||||
hbp.Spec.VmNamespace, // Harvester Namespace (where VM goes)
|
||||
hbp.Spec.HarvesterURL, // Explicit URL from HBP
|
||||
)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to mint harvester credential: %w", err)
|
||||
}
|
||||
|
||||
// 5. Persist Secret
|
||||
// Set OwnerRef so if CBP is deleted, Secret is deleted automatically
|
||||
if err := controllerutil.SetControllerReference(cbp, newSecret, m.scheme); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
patchOpts := []client.PatchOption{client.ForceOwnership, client.FieldOwner("rig-operator")}
|
||||
if err := m.client.Patch(ctx, newSecret, client.Apply, patchOpts...); err != nil {
|
||||
return "", fmt.Errorf("failed to patch new secret: %w", err)
|
||||
}
|
||||
|
||||
// 6. Update CBP Status
|
||||
// We do this here so the identity is "locked" to the object immediately
|
||||
if cbp.Status.Identity == nil {
|
||||
cbp.Status.Identity = &v1alpha1.IdentityStatus{}
|
||||
}
|
||||
cbp.Status.Identity.SecretRef = newSecret.Name
|
||||
cbp.Status.Identity.ServiceAccount = saName
|
||||
|
||||
if err := m.client.Status().Update(ctx, cbp); err != nil {
|
||||
return "", fmt.Errorf("failed to update cluster status: %w", err)
|
||||
}
|
||||
|
||||
return newSecret.Name, nil
|
||||
}
|
||||
|
||||
// Cleanup removes the ServiceAccount from Harvester when the Cluster is deleted
|
||||
func (m *IdentityManager) Cleanup(ctx context.Context, cbp *v1alpha1.ClusterBlueprint, ibp *v1alpha1.InfraBlueprint, hbp *v1alpha1.HarvesterBlueprint) {
|
||||
if cbp.Status.Identity == nil || cbp.Status.Identity.ServiceAccount == "" {
|
||||
return
|
||||
}
|
||||
|
||||
// Fetch Master Secret again to get connection details
|
||||
rancherCredName := ibp.Spec.CloudCredentialSecret
|
||||
var rancherSecret corev1.Secret
|
||||
if err := m.client.Get(ctx, types.NamespacedName{Name: rancherCredName, Namespace: "cattle-global-data"}, &rancherSecret); err != nil {
|
||||
log.FromContext(ctx).V(1).Info("Cleanup: Could not fetch master secret (connection lost), skipping manual cleanup")
|
||||
return
|
||||
}
|
||||
|
||||
var kubeBytes []byte
|
||||
if len(rancherSecret.Data["harvestercredentialConfig-kubeconfigContent"]) > 0 {
|
||||
kubeBytes = rancherSecret.Data["harvestercredentialConfig-kubeconfigContent"]
|
||||
} else if len(rancherSecret.Data["credential"]) > 0 {
|
||||
kubeBytes = rancherSecret.Data["credential"]
|
||||
} else {
|
||||
return
|
||||
}
|
||||
|
||||
// Delegate to low-level cleanup
|
||||
if err := DeleteCredentialResources(ctx, kubeBytes, cbp.Status.Identity.ServiceAccount, hbp.Spec.VmNamespace); err != nil {
|
||||
log.FromContext(ctx).Error(err, "Failed to cleanup Harvester resources (best effort)")
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user