230 lines
7.5 KiB
Go
230 lines
7.5 KiB
Go
|
|
package harvester
|
||
|
|
|
||
|
|
import (
|
||
|
|
"context"
|
||
|
|
"encoding/base64"
|
||
|
|
"fmt"
|
||
|
|
"time"
|
||
|
|
|
||
|
|
authenticationv1 "k8s.io/api/authentication/v1"
|
||
|
|
corev1 "k8s.io/api/core/v1"
|
||
|
|
rbacv1 "k8s.io/api/rbac/v1"
|
||
|
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||
|
|
"k8s.io/apimachinery/pkg/types"
|
||
|
|
"k8s.io/client-go/kubernetes"
|
||
|
|
"k8s.io/client-go/tools/clientcmd"
|
||
|
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||
|
|
"sigs.k8s.io/controller-runtime/pkg/log"
|
||
|
|
|
||
|
|
k8sprovisionerv1alpha1 "vanderlande.com/appstack/k8s-provisioner/api/v1alpha1"
|
||
|
|
)
|
||
|
|
|
||
|
|
// TryCleanup performs a "Best Effort" cleanup of Harvester resources.
|
||
|
|
func TryCleanup(ctx context.Context, k8sClient client.Client, infraRefName, namespace, saName string) {
|
||
|
|
l := log.FromContext(ctx)
|
||
|
|
|
||
|
|
// 1. Fetch Infra
|
||
|
|
var infra k8sprovisionerv1alpha1.Infra
|
||
|
|
if err := k8sClient.Get(ctx, types.NamespacedName{Name: infraRefName, Namespace: namespace}, &infra); err != nil {
|
||
|
|
l.Info("Cleanup skipped: Infra object not found")
|
||
|
|
return
|
||
|
|
}
|
||
|
|
|
||
|
|
vmNamespace := infra.Spec.VmNamespace
|
||
|
|
if vmNamespace == "" {
|
||
|
|
vmNamespace = "default"
|
||
|
|
}
|
||
|
|
|
||
|
|
// 2. Fetch Master Credential
|
||
|
|
rancherCredName := infra.Spec.CloudCredentialSecret
|
||
|
|
var rancherSecret corev1.Secret
|
||
|
|
if err := k8sClient.Get(ctx, types.NamespacedName{Name: rancherCredName, Namespace: "cattle-global-data"}, &rancherSecret); err != nil {
|
||
|
|
l.Info("Cleanup skipped: Master Credential Secret not found")
|
||
|
|
return
|
||
|
|
}
|
||
|
|
|
||
|
|
// 3. Extract Kubeconfig
|
||
|
|
var kubeBytes []byte
|
||
|
|
if len(rancherSecret.Data["harvestercredentialConfig-kubeconfigContent"]) > 0 {
|
||
|
|
kubeBytes = rancherSecret.Data["harvestercredentialConfig-kubeconfigContent"]
|
||
|
|
} else if len(rancherSecret.Data["credential"]) > 0 {
|
||
|
|
kubeBytes = rancherSecret.Data["credential"]
|
||
|
|
} else {
|
||
|
|
return
|
||
|
|
}
|
||
|
|
|
||
|
|
// 4. Cleanup
|
||
|
|
if err := deleteHarvesterResources(ctx, kubeBytes, saName, vmNamespace); err != nil {
|
||
|
|
l.Error(err, "Failed to cleanup Harvester resources (ignoring)")
|
||
|
|
} else {
|
||
|
|
l.Info("Harvester resources deleted successfully")
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
// Internal helper for cleanup
|
||
|
|
func deleteHarvesterResources(ctx context.Context, masterKubeconfig []byte, serviceAccountName, vmNamespace string) error {
|
||
|
|
restConfig, err := clientcmd.RESTConfigFromKubeConfig(masterKubeconfig)
|
||
|
|
if err != nil {
|
||
|
|
return err
|
||
|
|
}
|
||
|
|
hvClient, err := kubernetes.NewForConfig(restConfig)
|
||
|
|
if err != nil {
|
||
|
|
return err
|
||
|
|
}
|
||
|
|
|
||
|
|
deletePolicy := metav1.DeletePropagationBackground
|
||
|
|
deleteOpts := metav1.DeleteOptions{PropagationPolicy: &deletePolicy}
|
||
|
|
|
||
|
|
// 1. Delete Global CSI Binding (ClusterRoleBinding)
|
||
|
|
csiBindingName := fmt.Sprintf("%s-csi-binding", serviceAccountName)
|
||
|
|
err = hvClient.RbacV1().ClusterRoleBindings().Delete(ctx, csiBindingName, deleteOpts)
|
||
|
|
if err != nil && !apierrors.IsNotFound(err) {
|
||
|
|
return err
|
||
|
|
}
|
||
|
|
|
||
|
|
// 2. Delete Cloud Provider Binding (RoleBinding in VM Namespace)
|
||
|
|
cpBindingName := fmt.Sprintf("%s-cloud-binding", serviceAccountName)
|
||
|
|
err = hvClient.RbacV1().RoleBindings(vmNamespace).Delete(ctx, cpBindingName, deleteOpts)
|
||
|
|
if err != nil && !apierrors.IsNotFound(err) {
|
||
|
|
return err
|
||
|
|
}
|
||
|
|
|
||
|
|
// 3. Delete ServiceAccount (VM Namespace)
|
||
|
|
err = hvClient.CoreV1().ServiceAccounts(vmNamespace).Delete(ctx, serviceAccountName, deleteOpts)
|
||
|
|
if err != nil && !apierrors.IsNotFound(err) {
|
||
|
|
return err
|
||
|
|
}
|
||
|
|
|
||
|
|
return nil
|
||
|
|
}
|
||
|
|
|
||
|
|
// EnsureCredential mints a dedicated ServiceAccount in the specific VM Namespace
|
||
|
|
func EnsureCredential(ctx context.Context, masterKubeconfig []byte, clusterName, targetNamespace, vmNamespace, harvesterURL string) (*corev1.Secret, string, time.Time, error) {
|
||
|
|
|
||
|
|
// --- PHASE 1: Connect (Proxy/Master Config) ---
|
||
|
|
restConfig, err := clientcmd.RESTConfigFromKubeConfig(masterKubeconfig)
|
||
|
|
if err != nil {
|
||
|
|
return nil, "", time.Time{}, fmt.Errorf("invalid rancher cloud credential kubeconfig: %w", err)
|
||
|
|
}
|
||
|
|
hvClient, err := kubernetes.NewForConfig(restConfig)
|
||
|
|
if err != nil {
|
||
|
|
return nil, "", time.Time{}, err
|
||
|
|
}
|
||
|
|
|
||
|
|
// --- PHASE 2: Create Identity (SA & Bindings) ---
|
||
|
|
if vmNamespace == "" {
|
||
|
|
vmNamespace = "default"
|
||
|
|
}
|
||
|
|
saName := fmt.Sprintf("prov-%s", clusterName)
|
||
|
|
|
||
|
|
// A. Create ServiceAccount
|
||
|
|
sa := &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: saName, Namespace: vmNamespace}}
|
||
|
|
if _, err := hvClient.CoreV1().ServiceAccounts(vmNamespace).Create(ctx, sa, metav1.CreateOptions{}); err != nil {
|
||
|
|
if !apierrors.IsAlreadyExists(err) {
|
||
|
|
return nil, "", time.Time{}, err
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
// B. Create RoleBinding (VM Namespace)
|
||
|
|
rb := &rbacv1.RoleBinding{
|
||
|
|
ObjectMeta: metav1.ObjectMeta{Name: saName + "-cloud-binding", Namespace: vmNamespace},
|
||
|
|
Subjects: []rbacv1.Subject{{Kind: "ServiceAccount", Name: saName, Namespace: vmNamespace}},
|
||
|
|
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "harvesterhci.io:cloudprovider", APIGroup: "rbac.authorization.k8s.io"},
|
||
|
|
}
|
||
|
|
if _, err := hvClient.RbacV1().RoleBindings(vmNamespace).Create(ctx, rb, metav1.CreateOptions{}); err != nil {
|
||
|
|
if !apierrors.IsAlreadyExists(err) { /* Ignore */
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
// C. Create ClusterRoleBinding (Global)
|
||
|
|
crb := &rbacv1.ClusterRoleBinding{
|
||
|
|
ObjectMeta: metav1.ObjectMeta{Name: saName + "-csi-binding"},
|
||
|
|
Subjects: []rbacv1.Subject{{Kind: "ServiceAccount", Name: saName, Namespace: vmNamespace}},
|
||
|
|
RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "harvesterhci.io:csi-driver", APIGroup: "rbac.authorization.k8s.io"},
|
||
|
|
}
|
||
|
|
if _, err := hvClient.RbacV1().ClusterRoleBindings().Create(ctx, crb, metav1.CreateOptions{}); err != nil {
|
||
|
|
if !apierrors.IsAlreadyExists(err) { /* Ignore */
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
// D. Mint Token
|
||
|
|
ttlSeconds := int64(315360000)
|
||
|
|
tokenRequest, err := hvClient.CoreV1().ServiceAccounts(vmNamespace).CreateToken(ctx, saName, &authenticationv1.TokenRequest{
|
||
|
|
Spec: authenticationv1.TokenRequestSpec{ExpirationSeconds: &ttlSeconds},
|
||
|
|
}, metav1.CreateOptions{})
|
||
|
|
if err != nil {
|
||
|
|
return nil, "", time.Time{}, fmt.Errorf("failed to mint harvester token: %w", err)
|
||
|
|
}
|
||
|
|
expiryTime := time.Now().Add(time.Duration(ttlSeconds) * time.Second)
|
||
|
|
|
||
|
|
// --- PHASE 3: Determine URL & CA ---
|
||
|
|
|
||
|
|
// 1. URL: Use the explicitly provided HarvesterURL
|
||
|
|
if harvesterURL == "" {
|
||
|
|
// Fallback to Proxy if user forgot to set it (Safety net)
|
||
|
|
harvesterURL = restConfig.Host
|
||
|
|
}
|
||
|
|
|
||
|
|
// 2. CA: Fetch the internal Harvester CA
|
||
|
|
// (Required because the proxy CA won't match the direct IP/URL)
|
||
|
|
harvesterCA := restConfig.CAData
|
||
|
|
|
||
|
|
caConfigMap, err := hvClient.CoreV1().ConfigMaps("default").Get(ctx, "kube-root-ca.crt", metav1.GetOptions{})
|
||
|
|
if err == nil {
|
||
|
|
if caStr, ok := caConfigMap.Data["ca.crt"]; ok {
|
||
|
|
harvesterCA = []byte(caStr)
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
// --- PHASE 4: Construct Kubeconfig ---
|
||
|
|
caData := base64.StdEncoding.EncodeToString(harvesterCA)
|
||
|
|
token := tokenRequest.Status.Token
|
||
|
|
|
||
|
|
// Ensure "namespace" aligns vertically with "cluster" and "user"
|
||
|
|
newKubeconfig := fmt.Sprintf(
|
||
|
|
`apiVersion: v1
|
||
|
|
kind: Config
|
||
|
|
clusters:
|
||
|
|
- name: harvester
|
||
|
|
cluster:
|
||
|
|
server: %s
|
||
|
|
certificate-authority-data: %s
|
||
|
|
users:
|
||
|
|
- name: provisioner
|
||
|
|
user:
|
||
|
|
token: %s
|
||
|
|
contexts:
|
||
|
|
- name: default
|
||
|
|
context:
|
||
|
|
cluster: harvester
|
||
|
|
user: provisioner
|
||
|
|
namespace: %s
|
||
|
|
current-context: default
|
||
|
|
`, harvesterURL, caData, token, vmNamespace)
|
||
|
|
|
||
|
|
// --- PHASE 5: Create Secret ---
|
||
|
|
secretName := fmt.Sprintf("harvesterconfig-%s", clusterName)
|
||
|
|
|
||
|
|
secret := &corev1.Secret{
|
||
|
|
TypeMeta: metav1.TypeMeta{Kind: "Secret", APIVersion: "v1"},
|
||
|
|
ObjectMeta: metav1.ObjectMeta{
|
||
|
|
Name: secretName,
|
||
|
|
Namespace: targetNamespace,
|
||
|
|
Annotations: map[string]string{
|
||
|
|
"v2prov-secret-authorized-for-cluster": clusterName,
|
||
|
|
"v2prov-authorized-secret-deletes-on-cluster-removal": "true",
|
||
|
|
},
|
||
|
|
Labels: map[string]string{
|
||
|
|
"cattle.io/creator": "k8s-provisioner",
|
||
|
|
},
|
||
|
|
},
|
||
|
|
Type: "Opaque",
|
||
|
|
StringData: map[string]string{
|
||
|
|
"credential": newKubeconfig,
|
||
|
|
},
|
||
|
|
}
|
||
|
|
|
||
|
|
return secret, saName, expiryTime, nil
|
||
|
|
}
|