Fix a garbage collection bug

- Previously the golang code would create a Service and an APIService.
  The APIService would be given an owner reference which pointed to
  the namespace in which the app was installed.
- This prevented the app from being uninstalled. The namespace would
  refuse to delete, so `kapp delete` or `kubectl delete` would fail.
- The new approach is to statically define the Service and an APIService
  in the deployment.yaml, except for the caBundle of the APIService.
  Then the golang code will perform an update to add the caBundle at
  runtime.
- When the user uses `kapp deploy` or `kubectl apply` either tool will
  notice that the caBundle is not declared in the yaml and will
  therefore avoid editing that field.
- When the user uses `kapp delete` or `kubectl delete` either tool will
  destroy the objects because they are statically declared with names
  in the yaml, just like all of the other objects. There are no
  ownerReferences used, so nothing should prevent the namespace from
  being deleted.
- This approach also allows us to have less golang code to maintain.
- In the future, if our golang controllers want to dynamically add
  an Ingress or other objects, they can still do that. An Ingress
  would point to our statically defined Service as its backend.

Signed-off-by: Andrew Keesler <akeesler@vmware.com>
This commit is contained in:
Ryan Richard 2020-08-04 16:46:27 -07:00 committed by Andrew Keesler
parent 92939cf118
commit 08961919b5
4 changed files with 126 additions and 615 deletions

View File

@ -102,3 +102,36 @@ spec:
operator: Exists
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiVersion: v1
kind: Service
metadata:
name: placeholder-name-api #! the golang code assumes this specific name as part of the common name during cert generation
namespace: #@ data.values.namespace
labels:
app: #@ data.values.app_name
spec:
type: ClusterIP
selector:
app: #@ data.values.app_name
ports:
- protocol: TCP
port: 443
targetPort: 443
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
name: v1alpha1.placeholder.suzerain-io.github.io
labels:
app: #@ data.values.app_name
spec:
version: v1alpha1
group: placeholder.suzerain-io.github.io
groupPriorityMinimum: 2500 #! TODO what is the right value? https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#apiservicespec-v1beta1-apiregistration-k8s-io
versionPriority: 10 #! TODO what is the right value? https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#apiservicespec-v1beta1-apiregistration-k8s-io
#! caBundle: Do not include this key here. Starts out null, will be updated/owned by the golang code.
service:
name: placeholder-name-api
namespace: #@ data.values.namespace
port: 443

View File

@ -3,151 +3,37 @@ Copyright 2020 VMware, Inc.
SPDX-License-Identifier: Apache-2.0
*/
// Package autoregistration registers a Kubernetes APIService pointing at the current pod.
// Package autoregistration updates the pre-registered APIService.
package autoregistration
import (
"context"
"errors"
"fmt"
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/scheme"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/util/retry"
apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
aggregatationv1client "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
placeholderv1alpha1 "github.com/suzerain-io/placeholder-name-api/pkg/apis/placeholder/v1alpha1"
)
// ErrInvalidServiceTemplate is returned by Setup when the provided ServiceTemplate is not valid.
var ErrInvalidServiceTemplate = errors.New("invalid service template")
// SetupOptions specifies the inputs for Setup().
type SetupOptions struct {
CoreV1 corev1client.CoreV1Interface
AggregationV1 aggregatationv1client.Interface
Namespace string
ServiceTemplate corev1.Service
APIServiceTemplate apiregistrationv1.APIService
}
// Setup registers a Kubernetes Service, and an aggregation APIService which points to it.
func Setup(ctx context.Context, options SetupOptions) error {
// Get the namespace so we can use its UID set owner references on other objects.
ns, err := options.CoreV1.Namespaces().Get(ctx, options.Namespace, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("could not get namespace: %w", err)
}
// runtime.WithoutVersionDecoder clears the GVK set on the namespace because Clayton ... 😒
// https://github.com/kubernetes/kubernetes/pull/26251/files#diff-71b26e1e133ec6d3c4da26366b6502acR360-R361
// I think this is legacy cruft from the internal rest clients combined with using the same codec
// in places where implicit conversion occurs from some external version to an internal or different external version
// i.e. the type meta we saw on the wire may not match the type meta of the struct in all cases
// however, in our case, we know that we directly called the rest API at a particular version and that no conversion occurred
// thus we know that the type meta we saw on the wire directly matches the struct that we are using
// said in a different way, we know that our GVR to GVK (and vice versa) mapping is 1:1
// this means we can recover the type meta by asking the Kube client-go scheme for it
// the below code will only error if some generated Kube client-go code is broken
gvks, _, err := scheme.Scheme.ObjectKinds(ns)
if err != nil || len(gvks) == 0 {
return fmt.Errorf("could not get GVK: %w", err)
}
gvk := gvks[0]
apiVersion, kind := gvk.ToAPIVersionAndKind()
// Make a copy of the Service template.
svc := options.ServiceTemplate.DeepCopy()
svc.Namespace = ns.Name
// Validate that the Service meets our expectations.
if len(svc.Spec.Ports) != 1 {
return fmt.Errorf("%w: must have 1 port (found %d)", ErrInvalidServiceTemplate, len(svc.Spec.Ports))
}
if port := svc.Spec.Ports[0]; port.Protocol != corev1.ProtocolTCP || port.Port != 443 {
return fmt.Errorf("%w: must expose TCP/443 (found %s/%d)", ErrInvalidServiceTemplate, port.Protocol, port.Port)
}
// Create or update the Service.
if err := createOrUpdateService(ctx, options.CoreV1, svc); err != nil {
return err
}
apiSvc := options.APIServiceTemplate.DeepCopy()
apiSvc.Spec.Service = &apiregistrationv1.ServiceReference{
Namespace: ns.Name,
Name: svc.Name,
Port: &svc.Spec.Ports[0].Port,
}
apiSvc.ObjectMeta.OwnerReferences = []metav1.OwnerReference{{
APIVersion: apiVersion,
Kind: kind,
UID: ns.UID,
Name: ns.Name,
}}
if err := createOrUpdateAPIService(ctx, options.AggregationV1, apiSvc); err != nil {
return err
}
return nil
}
func createOrUpdateService(ctx context.Context, client corev1client.CoreV1Interface, svc *corev1.Service) error {
services := client.Services(svc.Namespace)
_, err := services.Create(ctx, svc, metav1.CreateOptions{})
if err == nil {
return nil
}
if !k8serrors.IsAlreadyExists(err) {
return fmt.Errorf("could not create service: %w", err)
}
// UpdateAPIService updates the APIService's CA bundle.
func UpdateAPIService(ctx context.Context, aggregationV1 aggregatationv1client.Interface, aggregatedAPIServerCA []byte) error {
apiServices := aggregationV1.ApiregistrationV1().APIServices()
apiServiceName := placeholderv1alpha1.SchemeGroupVersion.Version + "." + placeholderv1alpha1.GroupName
if err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
// Retrieve the latest version of the Service before attempting update
// RetryOnConflict uses exponential backoff to avoid exhausting the apiserver
result, err := services.Get(ctx, svc.Name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("could not get existing version of service: %w", err)
}
// Update just the fields we care about.
result.Spec.Ports = svc.Spec.Ports
result.Spec.Selector = svc.Spec.Selector
_, updateErr := services.Update(ctx, result, metav1.UpdateOptions{})
return updateErr
}); err != nil {
return fmt.Errorf("could not update service: %w", err)
}
return nil
}
func createOrUpdateAPIService(ctx context.Context, client aggregatationv1client.Interface, apiSvc *apiregistrationv1.APIService) error {
apiServices := client.ApiregistrationV1().APIServices()
_, err := apiServices.Create(ctx, apiSvc, metav1.CreateOptions{})
if err == nil {
return nil
}
if !k8serrors.IsAlreadyExists(err) {
return fmt.Errorf("could not create API service: %w", err)
}
if err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
// Retrieve the latest version of the Service before attempting update
// RetryOnConflict uses exponential backoff to avoid exhausting the apiserver
result, err := apiServices.Get(ctx, apiSvc.Name, metav1.GetOptions{})
// Retrieve the latest version of the Service before attempting update.
// RetryOnConflict uses exponential backoff to avoid exhausting the API server.
fetchedAPIService, err := apiServices.Get(ctx, apiServiceName, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("could not get existing version of API service: %w", err)
}
// Update just the fields we care about.
apiSvc.Spec.DeepCopyInto(&result.Spec)
apiSvc.OwnerReferences = result.OwnerReferences
// Update just the field we care about.
fetchedAPIService.Spec.CABundle = aggregatedAPIServerCA
_, updateErr := apiServices.Update(ctx, result, metav1.UpdateOptions{})
_, updateErr := apiServices.Update(ctx, fetchedAPIService, metav1.UpdateOptions{})
return updateErr
}); err != nil {
return fmt.Errorf("could not update API service: %w", err)

View File

@ -11,507 +11,136 @@ import (
"testing"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
kubefake "k8s.io/client-go/kubernetes/fake"
"k8s.io/apimachinery/pkg/runtime/schema"
kubetesting "k8s.io/client-go/testing"
apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
aggregationv1fake "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake"
"k8s.io/utils/pointer"
)
func TestSetup(t *testing.T) {
tests := []struct {
name string
input SetupOptions
mocks func(*kubefake.Clientset, *aggregationv1fake.Clientset)
wantErr string
wantServices []corev1.Service
wantAPIServices []apiregistrationv1.APIService
}{
{
name: "no such namespace",
input: SetupOptions{
Namespace: "foo",
},
wantErr: `could not get namespace: namespaces "foo" not found`,
},
{
name: "service template missing port",
input: SetupOptions{
Namespace: "test-namespace",
},
mocks: func(kube *kubefake.Clientset, agg *aggregationv1fake.Clientset) {
_ = kube.Tracker().Add(&corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{Name: "test-namespace", UID: "test-namespace-uid"},
})
},
wantErr: `invalid service template: must have 1 port (found 0)`,
},
{
name: "service template missing port",
input: SetupOptions{
Namespace: "test-namespace",
ServiceTemplate: corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "test-service",
Namespace: "replaceme",
},
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
{
Protocol: "UDP",
Port: 1234,
TargetPort: intstr.IntOrString{IntVal: 1234},
},
},
},
},
},
mocks: func(kube *kubefake.Clientset, agg *aggregationv1fake.Clientset) {
_ = kube.Tracker().Add(&corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{Name: "test-namespace", UID: "test-namespace-uid"},
})
},
wantErr: `invalid service template: must expose TCP/443 (found UDP/1234)`,
},
{
name: "fail to create service",
input: SetupOptions{
Namespace: "test-namespace",
ServiceTemplate: corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "test-service",
Namespace: "replaceme",
},
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
{
Protocol: "TCP",
Port: 443,
TargetPort: intstr.IntOrString{IntVal: 1234},
},
},
},
},
},
mocks: func(kube *kubefake.Clientset, agg *aggregationv1fake.Clientset) {
_ = kube.Tracker().Add(&corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{Name: "test-namespace", UID: "test-namespace-uid"},
})
kube.PrependReactor("create", "services", func(_ kubetesting.Action) (bool, runtime.Object, error) {
return true, nil, fmt.Errorf("some Service creation failure")
})
},
wantErr: `could not create service: some Service creation failure`,
},
{
name: "fail to create API service",
input: SetupOptions{
Namespace: "test-namespace",
ServiceTemplate: corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "test-service",
Namespace: "replaceme",
},
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
{
Protocol: "TCP",
Port: 443,
TargetPort: intstr.IntOrString{IntVal: 1234},
},
},
},
},
},
mocks: func(kube *kubefake.Clientset, agg *aggregationv1fake.Clientset) {
_ = kube.Tracker().Add(&corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{Name: "test-namespace", UID: "test-namespace-uid"},
})
agg.PrependReactor("create", "apiservices", func(_ kubetesting.Action) (bool, runtime.Object, error) {
return true, nil, fmt.Errorf("some APIService creation failure")
})
},
wantErr: `could not create API service: some APIService creation failure`,
},
{
name: "success",
input: SetupOptions{
Namespace: "test-namespace",
ServiceTemplate: corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "test-service",
Namespace: "replaceme",
},
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
{
Protocol: "TCP",
Port: 443,
TargetPort: intstr.IntOrString{IntVal: 1234},
},
},
},
},
APIServiceTemplate: apiregistrationv1.APIService{
ObjectMeta: metav1.ObjectMeta{Name: "test-api-service"},
Spec: apiregistrationv1.APIServiceSpec{
Group: "test-api-group",
Version: "test-version",
CABundle: []byte("test-ca-bundle"),
GroupPriorityMinimum: 1234,
VersionPriority: 4321,
},
},
},
mocks: func(kube *kubefake.Clientset, agg *aggregationv1fake.Clientset) {
_ = kube.Tracker().Add(&corev1.Namespace{
TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "Namespace"},
ObjectMeta: metav1.ObjectMeta{Name: "test-namespace", UID: "test-namespace-uid"},
})
},
wantServices: []corev1.Service{{
ObjectMeta: metav1.ObjectMeta{
Name: "test-service",
Namespace: "test-namespace",
},
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
{
Protocol: "TCP",
Port: 443,
TargetPort: intstr.IntOrString{IntVal: 1234},
},
},
},
}},
wantAPIServices: []apiregistrationv1.APIService{{
ObjectMeta: metav1.ObjectMeta{
Name: "test-api-service",
OwnerReferences: []metav1.OwnerReference{{
APIVersion: "v1",
Kind: "Namespace",
Name: "test-namespace",
UID: "test-namespace-uid",
}},
},
Spec: apiregistrationv1.APIServiceSpec{
Service: &apiregistrationv1.ServiceReference{
Namespace: "test-namespace",
Name: "test-service",
Port: pointer.Int32Ptr(443),
},
Group: "test-api-group",
Version: "test-version",
CABundle: []byte("test-ca-bundle"),
GroupPriorityMinimum: 1234,
VersionPriority: 4321,
},
}},
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
ctx := context.Background()
func TestUpdateAPIService(t *testing.T) {
const apiServiceName = "v1alpha1.placeholder.suzerain-io.github.io"
kubeClient := kubefake.NewSimpleClientset()
aggregationClient := aggregationv1fake.NewSimpleClientset()
if tt.mocks != nil {
tt.mocks(kubeClient, aggregationClient)
}
tt.input.CoreV1 = kubeClient.CoreV1()
tt.input.AggregationV1 = aggregationClient
err := Setup(context.Background(), tt.input)
if tt.wantErr != "" {
require.EqualError(t, err, tt.wantErr)
return
}
require.NoError(t, err)
if tt.wantServices != nil {
objects, err := kubeClient.CoreV1().Services(tt.input.Namespace).List(ctx, metav1.ListOptions{})
require.NoError(t, err)
require.Equal(t, tt.wantServices, objects.Items)
}
if tt.wantAPIServices != nil {
objects, err := aggregationClient.ApiregistrationV1().APIServices().List(ctx, metav1.ListOptions{})
require.NoError(t, err)
require.Equal(t, tt.wantAPIServices, objects.Items)
}
})
}
}
func TestCreateOrUpdateService(t *testing.T) {
tests := []struct {
name string
input *corev1.Service
mocks func(*kubefake.Clientset)
wantObjects []corev1.Service
wantErr string
}{
{
name: "error on create",
input: &corev1.Service{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "ns"},
Spec: corev1.ServiceSpec{
Type: corev1.ServiceTypeClusterIP,
ClusterIP: "1.2.3.4",
},
},
mocks: func(c *kubefake.Clientset) {
c.PrependReactor("create", "services", func(_ kubetesting.Action) (bool, runtime.Object, error) {
return true, nil, fmt.Errorf("error on create")
})
},
wantErr: "could not create service: error on create",
},
{
name: "new",
input: &corev1.Service{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "ns"},
Spec: corev1.ServiceSpec{
Type: corev1.ServiceTypeClusterIP,
ClusterIP: "1.2.3.4",
},
},
wantObjects: []corev1.Service{{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "ns"},
Spec: corev1.ServiceSpec{
Type: corev1.ServiceTypeClusterIP,
ClusterIP: "1.2.3.4",
},
}},
},
{
name: "update",
mocks: func(c *kubefake.Clientset) {
_ = c.Tracker().Add(&corev1.Service{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "ns"},
Spec: corev1.ServiceSpec{
Type: corev1.ServiceTypeClusterIP,
ClusterIP: "1.2.3.4",
},
})
},
input: &corev1.Service{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "ns"},
Spec: corev1.ServiceSpec{
Type: corev1.ServiceTypeClusterIP,
ClusterIP: "1.2.3.4",
},
},
wantObjects: []corev1.Service{{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "ns"},
Spec: corev1.ServiceSpec{
Type: corev1.ServiceTypeClusterIP,
ClusterIP: "1.2.3.4",
},
}},
},
{
name: "error on get",
mocks: func(c *kubefake.Clientset) {
_ = c.Tracker().Add(&corev1.Service{
ObjectMeta: metav1.ObjectMeta{Name: "foo"},
Spec: corev1.ServiceSpec{
Type: corev1.ServiceTypeClusterIP,
ClusterIP: "1.2.3.4",
},
})
c.PrependReactor("get", "services", func(_ kubetesting.Action) (bool, runtime.Object, error) {
return true, nil, fmt.Errorf("error on get")
})
},
input: &corev1.Service{
ObjectMeta: metav1.ObjectMeta{Name: "foo"},
Spec: corev1.ServiceSpec{
Type: corev1.ServiceTypeClusterIP,
ClusterIP: "1.2.3.4",
},
},
wantErr: "could not update service: could not get existing version of service: error on get",
},
{
name: "error on get, successful retry",
mocks: func(c *kubefake.Clientset) {
_ = c.Tracker().Add(&corev1.Service{
ObjectMeta: metav1.ObjectMeta{Name: "foo"},
Spec: corev1.ServiceSpec{
Type: corev1.ServiceTypeClusterIP,
ClusterIP: "1.2.3.4",
},
})
hit := false
c.PrependReactor("get", "services", func(_ kubetesting.Action) (bool, runtime.Object, error) {
// Return an error on the first call, then fall through to the default (successful) response.
if !hit {
hit = true
return true, nil, fmt.Errorf("error on get")
}
return false, nil, nil
})
},
input: &corev1.Service{
ObjectMeta: metav1.ObjectMeta{Name: "foo"},
Spec: corev1.ServiceSpec{
Type: corev1.ServiceTypeClusterIP,
ClusterIP: "1.2.3.4",
},
},
wantErr: "could not update service: could not get existing version of service: error on get",
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
ctx := context.Background()
client := kubefake.NewSimpleClientset()
if tt.mocks != nil {
tt.mocks(client)
}
err := createOrUpdateService(ctx, client.CoreV1(), tt.input)
if tt.wantErr != "" {
require.EqualError(t, err, tt.wantErr)
return
}
require.NoError(t, err)
if tt.wantObjects != nil {
objects, err := client.CoreV1().Services(tt.input.ObjectMeta.Namespace).List(ctx, metav1.ListOptions{})
require.NoError(t, err)
require.Equal(t, tt.wantObjects, objects.Items)
}
})
}
}
func TestCreateOrUpdateAPIService(t *testing.T) {
tests := []struct {
name string
input *apiregistrationv1.APIService
mocks func(*aggregationv1fake.Clientset)
caInput []byte
wantObjects []apiregistrationv1.APIService
wantErr string
}{
{
name: "error on create",
input: &apiregistrationv1.APIService{
ObjectMeta: metav1.ObjectMeta{Name: "foo"},
Spec: apiregistrationv1.APIServiceSpec{
GroupPriorityMinimum: 123,
VersionPriority: 456,
},
},
mocks: func(c *aggregationv1fake.Clientset) {
c.PrependReactor("create", "apiservices", func(_ kubetesting.Action) (bool, runtime.Object, error) {
return true, nil, fmt.Errorf("error on create")
})
},
wantErr: "could not create API service: error on create",
},
{
name: "new",
input: &apiregistrationv1.APIService{
ObjectMeta: metav1.ObjectMeta{Name: "foo"},
Spec: apiregistrationv1.APIServiceSpec{
GroupPriorityMinimum: 123,
VersionPriority: 456,
},
},
wantObjects: []apiregistrationv1.APIService{{
ObjectMeta: metav1.ObjectMeta{Name: "foo"},
Spec: apiregistrationv1.APIServiceSpec{
GroupPriorityMinimum: 123,
VersionPriority: 456,
},
}},
},
{
name: "update",
name: "happy path update when the pre-existing APIService did not already have a CA bundle",
mocks: func(c *aggregationv1fake.Clientset) {
_ = c.Tracker().Add(&apiregistrationv1.APIService{
ObjectMeta: metav1.ObjectMeta{Name: "foo"},
ObjectMeta: metav1.ObjectMeta{Name: apiServiceName},
Spec: apiregistrationv1.APIServiceSpec{
GroupPriorityMinimum: 999,
VersionPriority: 999,
CABundle: nil,
},
})
},
input: &apiregistrationv1.APIService{
ObjectMeta: metav1.ObjectMeta{Name: "foo"},
Spec: apiregistrationv1.APIServiceSpec{
GroupPriorityMinimum: 123,
VersionPriority: 456,
},
},
caInput: []byte("some-ca-bundle"),
wantObjects: []apiregistrationv1.APIService{{
ObjectMeta: metav1.ObjectMeta{Name: "foo"},
ObjectMeta: metav1.ObjectMeta{Name: apiServiceName},
Spec: apiregistrationv1.APIServiceSpec{
GroupPriorityMinimum: 123,
VersionPriority: 456,
GroupPriorityMinimum: 999,
CABundle: []byte("some-ca-bundle"),
},
}},
},
{
name: "happy path update when the pre-existing APIService already had a CA bundle",
mocks: func(c *aggregationv1fake.Clientset) {
_ = c.Tracker().Add(&apiregistrationv1.APIService{
ObjectMeta: metav1.ObjectMeta{Name: apiServiceName},
Spec: apiregistrationv1.APIServiceSpec{
GroupPriorityMinimum: 999,
CABundle: []byte("some-other-different-ca-bundle"),
},
})
},
caInput: []byte("some-ca-bundle"),
wantObjects: []apiregistrationv1.APIService{{
ObjectMeta: metav1.ObjectMeta{Name: apiServiceName},
Spec: apiregistrationv1.APIServiceSpec{
GroupPriorityMinimum: 999,
CABundle: []byte("some-ca-bundle"),
},
}},
},
{
name: "error on update",
mocks: func(c *aggregationv1fake.Clientset) {
_ = c.Tracker().Add(&apiregistrationv1.APIService{
ObjectMeta: metav1.ObjectMeta{Name: apiServiceName},
Spec: apiregistrationv1.APIServiceSpec{},
})
c.PrependReactor("update", "apiservices", func(_ kubetesting.Action) (bool, runtime.Object, error) {
return true, nil, fmt.Errorf("error on update")
})
},
wantErr: "could not update API service: error on update",
},
{
name: "error on get",
mocks: func(c *aggregationv1fake.Clientset) {
_ = c.Tracker().Add(&apiregistrationv1.APIService{
ObjectMeta: metav1.ObjectMeta{Name: "foo"},
Spec: apiregistrationv1.APIServiceSpec{
GroupPriorityMinimum: 999,
VersionPriority: 999,
},
ObjectMeta: metav1.ObjectMeta{Name: apiServiceName},
Spec: apiregistrationv1.APIServiceSpec{},
})
c.PrependReactor("get", "apiservices", func(_ kubetesting.Action) (bool, runtime.Object, error) {
return true, nil, fmt.Errorf("error on get")
})
},
input: &apiregistrationv1.APIService{
ObjectMeta: metav1.ObjectMeta{Name: "foo"},
Spec: apiregistrationv1.APIServiceSpec{
GroupPriorityMinimum: 123,
VersionPriority: 456,
},
},
caInput: []byte("some-ca-bundle"),
wantErr: "could not update API service: could not get existing version of API service: error on get",
},
{
name: "error on get, successful retry",
name: "conflict error on update, followed by successful retry",
mocks: func(c *aggregationv1fake.Clientset) {
_ = c.Tracker().Add(&apiregistrationv1.APIService{
ObjectMeta: metav1.ObjectMeta{Name: "foo"},
ObjectMeta: metav1.ObjectMeta{Name: apiServiceName},
Spec: apiregistrationv1.APIServiceSpec{
GroupPriorityMinimum: 999,
VersionPriority: 999,
GroupPriorityMinimum: 111,
CABundle: nil,
},
})
hit := false
c.PrependReactor("get", "apiservices", func(_ kubetesting.Action) (bool, runtime.Object, error) {
c.PrependReactor("update", "apiservices", func(_ kubetesting.Action) (bool, runtime.Object, error) {
// Return an error on the first call, then fall through to the default (successful) response.
if !hit {
// Before the update fails, also change the object that will be returned by the next Get(),
// to make sure that the production code does a fresh Get() after detecting a conflict.
_ = c.Tracker().Update(schema.GroupVersionResource{
Group: apiregistrationv1.GroupName,
Version: apiregistrationv1.SchemeGroupVersion.Version,
Resource: "apiservices",
}, &apiregistrationv1.APIService{
ObjectMeta: metav1.ObjectMeta{Name: apiServiceName},
Spec: apiregistrationv1.APIServiceSpec{
GroupPriorityMinimum: 222,
CABundle: nil,
},
}, "")
hit = true
return true, nil, fmt.Errorf("error on get")
return true, nil, apierrors.NewConflict(schema.GroupResource{
Group: apiregistrationv1.GroupName,
Resource: "apiservices",
}, apiServiceName, fmt.Errorf("there was a conflict"))
}
return false, nil, nil
})
},
input: &apiregistrationv1.APIService{
ObjectMeta: metav1.ObjectMeta{Name: "foo"},
caInput: []byte("some-ca-bundle"),
wantObjects: []apiregistrationv1.APIService{{
ObjectMeta: metav1.ObjectMeta{Name: apiServiceName},
Spec: apiregistrationv1.APIServiceSpec{
GroupPriorityMinimum: 123,
VersionPriority: 456,
GroupPriorityMinimum: 222,
CABundle: []byte("some-ca-bundle"),
},
},
wantErr: "could not update API service: could not get existing version of API service: error on get",
}},
},
}
for _, tt := range tests {
@ -524,7 +153,7 @@ func TestCreateOrUpdateAPIService(t *testing.T) {
tt.mocks(client)
}
err := createOrUpdateAPIService(ctx, client, tt.input)
err := UpdateAPIService(ctx, client, tt.caInput)
if tt.wantErr != "" {
require.EqualError(t, err, tt.wantErr)
return

View File

@ -17,10 +17,7 @@ import (
"time"
"github.com/spf13/cobra"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
genericapiserver "k8s.io/apiserver/pkg/server"
"k8s.io/apiserver/pkg/server/dynamiccertificates"
genericoptions "k8s.io/apiserver/pkg/server/options"
@ -28,7 +25,6 @@ import (
k8sinformers "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
aggregationv1client "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
"github.com/suzerain-io/controller-go"
@ -188,14 +184,14 @@ func (a *App) run(
// TODO use the postStart hook to generate certs?
apiCA, err := certauthority.New(pkix.Name{CommonName: "Placeholder CA"})
aggregatedAPIServerCA, err := certauthority.New(pkix.Name{CommonName: "Placeholder CA"})
if err != nil {
return fmt.Errorf("could not initialize CA: %w", err)
}
const serviceName = "placeholder-name-api"
cert, err := apiCA.Issue(
cert, err := aggregatedAPIServerCA.Issue(
pkix.Name{CommonName: serviceName + "." + serverInstallationNamespace + ".svc"},
[]string{},
24*365*time.Hour,
@ -204,40 +200,7 @@ func (a *App) run(
return fmt.Errorf("could not issue serving certificate: %w", err)
}
// Dynamically register our v1alpha1 API service.
service := corev1.Service{
ObjectMeta: metav1.ObjectMeta{Name: serviceName},
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
{
Protocol: corev1.ProtocolTCP,
Port: 443,
TargetPort: intstr.IntOrString{IntVal: 443},
},
},
Selector: podinfo.Labels,
Type: corev1.ServiceTypeClusterIP,
},
}
apiService := apiregistrationv1.APIService{
ObjectMeta: metav1.ObjectMeta{
Name: placeholderv1alpha1.SchemeGroupVersion.Version + "." + placeholderv1alpha1.GroupName,
},
Spec: apiregistrationv1.APIServiceSpec{
Group: placeholderv1alpha1.GroupName,
Version: placeholderv1alpha1.SchemeGroupVersion.Version,
CABundle: apiCA.Bundle(),
GroupPriorityMinimum: 2500, // TODO what is the right value? https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#apiservicespec-v1beta1-apiregistration-k8s-io
VersionPriority: 10, // TODO what is the right value? https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#apiservicespec-v1beta1-apiregistration-k8s-io
},
}
if err := autoregistration.Setup(ctx, autoregistration.SetupOptions{
CoreV1: k8sClient.CoreV1(),
AggregationV1: aggregationClient,
Namespace: serverInstallationNamespace,
ServiceTemplate: service,
APIServiceTemplate: apiService,
}); err != nil {
if err := autoregistration.UpdateAPIService(ctx, aggregationClient, aggregatedAPIServerCA.Bundle()); err != nil {
return fmt.Errorf("could not register API service: %w", err)
}