Drop initial code

This commit is contained in:
Danny Bessems
2026-01-15 09:58:01 +00:00
parent 227d957219
commit 1e7c9ba5cb
228 changed files with 19883 additions and 1 deletions

View File

@@ -0,0 +1,176 @@
package harvester
import (
"context"
"encoding/base64"
"fmt"
"time"
authenticationv1 "k8s.io/api/authentication/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
)
// DeleteCredentialResources connects to Harvester and removes the specific SA and bindings
func DeleteCredentialResources(ctx context.Context, masterKubeconfig []byte, serviceAccountName, vmNamespace string) error {
restConfig, err := clientcmd.RESTConfigFromKubeConfig(masterKubeconfig)
if err != nil {
return err
}
hvClient, err := kubernetes.NewForConfig(restConfig)
if err != nil {
return err
}
deletePolicy := metav1.DeletePropagationBackground
deleteOpts := metav1.DeleteOptions{PropagationPolicy: &deletePolicy}
// 1. Delete Global CSI Binding (ClusterRoleBinding)
csiBindingName := fmt.Sprintf("%s-csi-binding", serviceAccountName)
// We ignore NotFound errors to make this idempotent
if err := hvClient.RbacV1().ClusterRoleBindings().Delete(ctx, csiBindingName, deleteOpts); err != nil && !apierrors.IsNotFound(err) {
return err
}
// 2. Delete Cloud Provider Binding (RoleBinding in VM Namespace)
cpBindingName := fmt.Sprintf("%s-cloud-binding", serviceAccountName)
if err := hvClient.RbacV1().RoleBindings(vmNamespace).Delete(ctx, cpBindingName, deleteOpts); err != nil && !apierrors.IsNotFound(err) {
return err
}
// 3. Delete ServiceAccount (VM Namespace)
if err := hvClient.CoreV1().ServiceAccounts(vmNamespace).Delete(ctx, serviceAccountName, deleteOpts); err != nil && !apierrors.IsNotFound(err) {
return err
}
return nil
}
// EnsureCredential mints a dedicated ServiceAccount in the specific VM Namespace
func EnsureCredential(ctx context.Context, masterKubeconfig []byte, clusterName, targetNamespace, vmNamespace, harvesterURL string) (*corev1.Secret, string, time.Time, error) {
// --- PHASE 1: Connect ---
restConfig, err := clientcmd.RESTConfigFromKubeConfig(masterKubeconfig)
if err != nil {
return nil, "", time.Time{}, fmt.Errorf("invalid rancher cloud credential kubeconfig: %w", err)
}
hvClient, err := kubernetes.NewForConfig(restConfig)
if err != nil {
return nil, "", time.Time{}, err
}
// --- PHASE 2: Create Identity ---
if vmNamespace == "" {
vmNamespace = "default"
}
saName := fmt.Sprintf("prov-%s", clusterName)
// A. Create ServiceAccount
sa := &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: saName, Namespace: vmNamespace}}
if _, err := hvClient.CoreV1().ServiceAccounts(vmNamespace).Create(ctx, sa, metav1.CreateOptions{}); err != nil {
if !apierrors.IsAlreadyExists(err) {
return nil, "", time.Time{}, err
}
}
// B. Create RoleBinding (Cloud Provider)
rb := &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{Name: saName + "-cloud-binding", Namespace: vmNamespace},
Subjects: []rbacv1.Subject{{Kind: "ServiceAccount", Name: saName, Namespace: vmNamespace}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "harvesterhci.io:cloudprovider", APIGroup: "rbac.authorization.k8s.io"},
}
if _, err := hvClient.RbacV1().RoleBindings(vmNamespace).Create(ctx, rb, metav1.CreateOptions{}); err != nil {
if !apierrors.IsAlreadyExists(err) { /* Ignore */
}
}
// C. Create ClusterRoleBinding (CSI Driver)
crb := &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{Name: saName + "-csi-binding"},
Subjects: []rbacv1.Subject{{Kind: "ServiceAccount", Name: saName, Namespace: vmNamespace}},
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "harvesterhci.io:csi-driver", APIGroup: "rbac.authorization.k8s.io"},
}
if _, err := hvClient.RbacV1().ClusterRoleBindings().Create(ctx, crb, metav1.CreateOptions{}); err != nil {
if !apierrors.IsAlreadyExists(err) { /* Ignore */
}
}
// D. Mint Token
ttlSeconds := int64(315360000) // ~10 years
tokenRequest, err := hvClient.CoreV1().ServiceAccounts(vmNamespace).CreateToken(ctx, saName, &authenticationv1.TokenRequest{
Spec: authenticationv1.TokenRequestSpec{ExpirationSeconds: &ttlSeconds},
}, metav1.CreateOptions{})
if err != nil {
return nil, "", time.Time{}, fmt.Errorf("failed to mint harvester token: %w", err)
}
expiryTime := time.Now().Add(time.Duration(ttlSeconds) * time.Second)
// --- PHASE 3: Determine URL & CA ---
if harvesterURL == "" {
harvesterURL = restConfig.Host
}
// Fetch internal CA (required because proxy CA != internal CA)
harvesterCA := restConfig.CAData
caConfigMap, err := hvClient.CoreV1().ConfigMaps("default").Get(ctx, "kube-root-ca.crt", metav1.GetOptions{})
if err == nil {
if caStr, ok := caConfigMap.Data["ca.crt"]; ok {
harvesterCA = []byte(caStr)
}
}
// --- PHASE 4: Construct Kubeconfig ---
caData := base64.StdEncoding.EncodeToString(harvesterCA)
token := tokenRequest.Status.Token
newKubeconfig := fmt.Sprintf(
`apiVersion: v1
kind: Config
clusters:
- name: harvester
cluster:
server: %s
certificate-authority-data: %s
users:
- name: provisioner
user:
token: %s
contexts:
- name: default
context:
cluster: harvester
user: provisioner
namespace: %s
current-context: default
`, harvesterURL, caData, token, vmNamespace)
// --- PHASE 5: Create Secret Object ---
secretName := fmt.Sprintf("harvesterconfig-%s", clusterName)
secret := &corev1.Secret{
TypeMeta: metav1.TypeMeta{Kind: "Secret", APIVersion: "v1"},
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
Namespace: targetNamespace,
Annotations: map[string]string{
// [CRITICAL] These annotations authorize the guest cluster to use this secret
"v2prov-secret-authorized-for-cluster": clusterName,
"v2prov-authorized-secret-deletes-on-cluster-removal": "true",
},
Labels: map[string]string{
"cattle.io/creator": "rig-operator", // Updated creator
"rig.appstack.io/cluster": clusterName,
},
},
Type: "Opaque",
StringData: map[string]string{
"credential": newKubeconfig,
},
}
return secret, saName, expiryTime, nil
}

View File

@@ -0,0 +1,126 @@
package harvester
import (
"context"
"fmt"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/log"
"vanderlande.com/ittp/appstack/rig-operator/api/v1alpha1"
)
type IdentityManager struct {
client client.Client
scheme *runtime.Scheme
}
func NewIdentityManager(c client.Client, s *runtime.Scheme) *IdentityManager {
return &IdentityManager{client: c, scheme: s}
}
// Ensure checks if an identity exists. If not, it fetches master creds, mints a new one, and updates Status.
func (m *IdentityManager) Ensure(ctx context.Context, cbp *v1alpha1.ClusterBlueprint, ibp *v1alpha1.InfraBlueprint, hbp *v1alpha1.HarvesterBlueprint) (string, error) {
l := log.FromContext(ctx)
// 1. Fast Path: If identity already exists in Status, return it
if cbp.Status.Identity != nil && cbp.Status.Identity.SecretRef != "" {
return cbp.Status.Identity.SecretRef, nil
}
l.Info("Minting Harvester identity", "Cluster", cbp.Name)
// 2. Fetch Master Credential (from Infra)
rancherCredName := ibp.Spec.CloudCredentialSecret
if rancherCredName == "" {
return "", fmt.Errorf("CloudCredentialSecret is missing in InfraBlueprint %s", ibp.Name)
}
var rancherSecret corev1.Secret
// Note: Rancher secrets are expected in cattle-global-data
if err := m.client.Get(ctx, types.NamespacedName{Name: rancherCredName, Namespace: "cattle-global-data"}, &rancherSecret); err != nil {
return "", fmt.Errorf("failed to fetch rancher credential %s: %w", rancherCredName, err)
}
// 3. Extract Kubeconfig
const kubeconfigKey = "harvestercredentialConfig-kubeconfigContent"
adminKubeconfigBytes := rancherSecret.Data[kubeconfigKey]
if len(adminKubeconfigBytes) == 0 {
if len(rancherSecret.Data["credential"]) > 0 {
adminKubeconfigBytes = rancherSecret.Data["credential"]
} else {
return "", fmt.Errorf("secret %s missing kubeconfig data", rancherCredName)
}
}
// 4. Call Factory (low-level)
newSecret, saName, _, err := EnsureCredential(
ctx,
adminKubeconfigBytes,
cbp.Name,
cbp.Namespace, // Target Namespace (where secret goes)
hbp.Spec.VmNamespace, // Harvester Namespace (where VM goes)
hbp.Spec.HarvesterURL, // Explicit URL from HBP
)
if err != nil {
return "", fmt.Errorf("failed to mint harvester credential: %w", err)
}
// 5. Persist Secret
// Set OwnerRef so if CBP is deleted, Secret is deleted automatically
if err := controllerutil.SetControllerReference(cbp, newSecret, m.scheme); err != nil {
return "", err
}
patchOpts := []client.PatchOption{client.ForceOwnership, client.FieldOwner("rig-operator")}
if err := m.client.Patch(ctx, newSecret, client.Apply, patchOpts...); err != nil {
return "", fmt.Errorf("failed to patch new secret: %w", err)
}
// 6. Update CBP Status
// We do this here so the identity is "locked" to the object immediately
if cbp.Status.Identity == nil {
cbp.Status.Identity = &v1alpha1.IdentityStatus{}
}
cbp.Status.Identity.SecretRef = newSecret.Name
cbp.Status.Identity.ServiceAccount = saName
if err := m.client.Status().Update(ctx, cbp); err != nil {
return "", fmt.Errorf("failed to update cluster status: %w", err)
}
return newSecret.Name, nil
}
// Cleanup removes the ServiceAccount from Harvester when the Cluster is deleted
func (m *IdentityManager) Cleanup(ctx context.Context, cbp *v1alpha1.ClusterBlueprint, ibp *v1alpha1.InfraBlueprint, hbp *v1alpha1.HarvesterBlueprint) {
if cbp.Status.Identity == nil || cbp.Status.Identity.ServiceAccount == "" {
return
}
// Fetch Master Secret again to get connection details
rancherCredName := ibp.Spec.CloudCredentialSecret
var rancherSecret corev1.Secret
if err := m.client.Get(ctx, types.NamespacedName{Name: rancherCredName, Namespace: "cattle-global-data"}, &rancherSecret); err != nil {
log.FromContext(ctx).V(1).Info("Cleanup: Could not fetch master secret (connection lost), skipping manual cleanup")
return
}
var kubeBytes []byte
if len(rancherSecret.Data["harvestercredentialConfig-kubeconfigContent"]) > 0 {
kubeBytes = rancherSecret.Data["harvestercredentialConfig-kubeconfigContent"]
} else if len(rancherSecret.Data["credential"]) > 0 {
kubeBytes = rancherSecret.Data["credential"]
} else {
return
}
// Delegate to low-level cleanup
if err := DeleteCredentialResources(ctx, kubeBytes, cbp.Status.Identity.ServiceAccount, hbp.Spec.VmNamespace); err != nil {
log.FromContext(ctx).Error(err, "Failed to cleanup Harvester resources (best effort)")
}
}

View File

@@ -0,0 +1,140 @@
package harvester
import (
"context"
"fmt"
"vanderlande.com/ittp/appstack/rig-operator/api/v1alpha1"
template "vanderlande.com/ittp/appstack/rig-operator/internal/templates/harvester"
)
// harvesterNodePool matches the exact JSON structure required by the Helm Chart
type harvesterNodePool struct {
Name string `json:"name"`
DisplayName string `json:"displayName"`
Quantity int `json:"quantity"`
Etcd bool `json:"etcd"`
ControlPlane bool `json:"controlplane"`
Worker bool `json:"worker"`
Paused bool `json:"paused"`
// Harvester Specific Fields
CpuCount int `json:"cpuCount"`
MemorySize int `json:"memorySize"` // GB
DiskSize int `json:"diskSize"` // GB
ImageName string `json:"imageName"`
NetworkName string `json:"networkName"`
SshUser string `json:"sshUser"`
VmNamespace string `json:"vmNamespace"`
UserData string `json:"userData"`
}
type Strategy struct {
blueprint *v1alpha1.HarvesterBlueprint
userData string
rancherURL string
defaults template.Defaults
}
// NewStrategy initializes the strategy with defaults and optional overrides
func NewStrategy(hbp *v1alpha1.HarvesterBlueprint, infraUserData string, infraRancherURL string, defaults template.Defaults) *Strategy { // 1. Determine UserData priority: Infra (IBP) > Template Default
finalUserData := infraUserData
if finalUserData == "" {
finalUserData = defaults.UserData
}
return &Strategy{
blueprint: hbp,
userData: finalUserData,
rancherURL: infraRancherURL,
defaults: defaults,
}
}
// GenerateNodePools implements provider.Strategy
func (s *Strategy) GenerateNodePools(ctx context.Context, cbp *v1alpha1.ClusterBlueprint) (interface{}, error) {
var pools []interface{}
// Helper to map generic req -> harvester specific struct
mapPool := func(name string, qty, cpu, memGB, diskGB int, isEtcd, isCp, isWk bool) harvesterNodePool {
return harvesterNodePool{
Name: name,
DisplayName: name,
Quantity: qty,
Etcd: isEtcd,
ControlPlane: isCp,
Worker: isWk,
Paused: false,
// Mapping: Generic (GB) -> Harvester (GB) [No conversion needed]
CpuCount: cpu,
MemorySize: memGB,
DiskSize: diskGB,
// Harvester Specifics from HBP
ImageName: s.blueprint.Spec.ImageName,
NetworkName: s.blueprint.Spec.NetworkName,
SshUser: s.blueprint.Spec.SshUser,
VmNamespace: s.blueprint.Spec.VmNamespace,
UserData: s.userData,
}
}
// 1. Control Plane Pool
cpQty := 1
if cbp.Spec.ControlPlaneHA {
cpQty = 3
}
// Use Defaults from YAML for CP sizing
pools = append(pools, mapPool(
"cp-pool",
cpQty,
s.defaults.CP_CPU,
s.defaults.CP_Mem,
s.defaults.CP_Disk,
true, true, false,
))
// 2. Worker Pools
for _, wp := range cbp.Spec.WorkerPools {
pools = append(pools, mapPool(
wp.Name,
wp.Quantity,
wp.CpuCores,
wp.MemoryGB,
wp.DiskGB,
false, false, true,
))
}
return pools, nil
}
// GetGlobalOverrides implements provider.Strategy
func (s *Strategy) GetGlobalOverrides(ctx context.Context, cbp *v1alpha1.ClusterBlueprint, credentialSecretName string) (map[string]interface{}, error) {
// secret://<namespace>:<secretName>
secretURI := fmt.Sprintf("secret://%s:%s", cbp.Namespace, credentialSecretName)
overrides := map[string]interface{}{
"cloud_provider_name": "harvester",
"cloud_provider_config": secretURI,
// Inject Rancher URL
"rancher": map[string]interface{}{
"cattle": map[string]interface{}{
"url": s.rancherURL,
},
},
"chartValues": map[string]interface{}{
"harvester-cloud-provider": map[string]interface{}{
"global": map[string]interface{}{
"cattle": map[string]interface{}{
"clusterName": cbp.Name,
},
},
},
},
}
return overrides, nil
}