From dfcc2a1eb851214f0cac23e24e2219c44674750e Mon Sep 17 00:00:00 2001 From: Margo Crawford Date: Fri, 5 Feb 2021 17:01:39 -0800 Subject: [PATCH] Introduce clusterhost package to determine whether a cluster has control plane nodes Also added hasExternalLoadBalancerProvider key to cluster capabilities for integration testing. Signed-off-by: Ryan Richard --- internal/clusterhost/clusterhost.go | 63 +++++++++ internal/clusterhost/clusterhost_test.go | 169 +++++++++++++++++++++++ internal/concierge/server/server.go | 36 +++++ test/cluster_capabilities/gke.yaml | 5 +- test/cluster_capabilities/kind.yaml | 5 +- test/cluster_capabilities/tkgs.yaml | 5 +- test/library/env.go | 3 +- 7 files changed, 282 insertions(+), 4 deletions(-) create mode 100644 internal/clusterhost/clusterhost.go create mode 100644 internal/clusterhost/clusterhost_test.go diff --git a/internal/clusterhost/clusterhost.go b/internal/clusterhost/clusterhost.go new file mode 100644 index 00000000..bfdbd6c7 --- /dev/null +++ b/internal/clusterhost/clusterhost.go @@ -0,0 +1,63 @@ +// Copyright 2021 the Pinniped contributors. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package clusterhost + +import ( + "context" + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +const ( + labelNodeRolePrefix = "node-role.kubernetes.io/" + nodeLabelRole = "kubernetes.io/node-role" + controlPlaneNodeRole = "control-plane" + // this role was deprecated by kubernetes 1.20. + masterNodeRole = "master" +) + +type ClusterHost struct { + client kubernetes.Interface +} + +func New(client kubernetes.Interface) *ClusterHost { + return &ClusterHost{client: client} +} + +func (c *ClusterHost) HasControlPlaneNodes(ctx context.Context) (bool, error) { + nodes, err := c.client.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) + if err != nil { + return false, fmt.Errorf("error fetching nodes: %v", err) + } + if len(nodes.Items) == 0 { + return false, fmt.Errorf("no nodes found") + } + for _, node := range nodes.Items { + for k, v := range node.Labels { + if isControlPlaneNodeRole(k, v) { + return true, nil + } + } + } + + return false, nil +} + +func isControlPlaneNodeRole(k string, v string) bool { + if k == labelNodeRolePrefix+controlPlaneNodeRole { + return true + } + if k == labelNodeRolePrefix+masterNodeRole { + return true + } + if k == nodeLabelRole && v == controlPlaneNodeRole { + return true + } + if k == nodeLabelRole && v == masterNodeRole { + return true + } + return false +} diff --git a/internal/clusterhost/clusterhost_test.go b/internal/clusterhost/clusterhost_test.go new file mode 100644 index 00000000..11405096 --- /dev/null +++ b/internal/clusterhost/clusterhost_test.go @@ -0,0 +1,169 @@ +// Copyright 2021 the Pinniped contributors. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package clusterhost + +import ( + "context" + "errors" + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/runtime" + kubernetesfake "k8s.io/client-go/kubernetes/fake" + coretesting "k8s.io/client-go/testing" + + v1 "k8s.io/api/core/v1" +) + +func TestHasControlPlaneNodes(t *testing.T) { + tests := []struct { + name string + nodes []*v1.Node + listNodesErr error + wantErr error + wantReturnValue bool + }{ + { + name: "Fetching nodes returns an error", + listNodesErr: errors.New("couldn't get nodes"), + wantErr: errors.New("error fetching nodes: couldn't get nodes"), + }, + { + name: "Fetching nodes returns an empty array", + nodes: []*v1.Node{}, + wantErr: errors.New("no nodes found"), + }, + { + name: "Nodes found, but not control plane nodes", + nodes: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node-1", + Labels: map[string]string{ + "not-control-plane-label": "some-value", + "kubernetes.io/node-role": "worker", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node-2", + Labels: map[string]string{"node-role.kubernetes.io/worker": ""}, + }, + }, + }, + wantReturnValue: false, + }, + { + name: "Nodes found, including a control-plane role in node-role.kubernetes.io/ format", + nodes: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node-1", + Labels: map[string]string{"unrelated-label": "some-value"}, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node-2", + Labels: map[string]string{ + "some-other-label": "some-value", + "node-role.kubernetes.io/control-plane": "", + }, + }, + }, + }, + wantReturnValue: true, + }, + { + name: "Nodes found, including a master role in node-role.kubernetes.io/ format", + nodes: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node-1", + Labels: map[string]string{"unrelated-label": "some-value"}, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node-2", + Labels: map[string]string{ + "some-other-label": "some-value", + "node-role.kubernetes.io/master": "", + }, + }, + }, + }, + wantReturnValue: true, + }, + { + name: "Nodes found, including a control-plane role in kubernetes.io/node-role= format", + nodes: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node-1", + Labels: map[string]string{"unrelated-label": "some-value"}, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node-2", + Labels: map[string]string{ + "some-other-label": "some-value", + "kubernetes.io/node-role": "control-plane", + }, + }, + }, + }, + wantReturnValue: true, + }, + { + name: "Nodes found, including a master role in kubernetes.io/node-role= format", + nodes: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node-1", + Labels: map[string]string{"unrelated-label": "some-value"}, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node-2", + Labels: map[string]string{ + "some-other-label": "some-value", + "kubernetes.io/node-role": "master", + }, + }, + }, + }, + wantReturnValue: true, + }, + } + for _, tt := range tests { + test := tt + t.Run(test.name, func(t *testing.T) { + kubeClient := kubernetesfake.NewSimpleClientset() + if test.listNodesErr != nil { + listNodesErr := test.listNodesErr + kubeClient.PrependReactor( + "list", + "nodes", + func(_ coretesting.Action) (bool, runtime.Object, error) { + return true, nil, listNodesErr + }, + ) + } + for _, node := range test.nodes { + err := kubeClient.Tracker().Add(node) + require.NoError(t, err) + } + clusterHost := New(kubeClient) + hasControlPlaneNodes, err := clusterHost.HasControlPlaneNodes(context.Background()) + require.Equal(t, test.wantErr, err) + require.Equal(t, test.wantReturnValue, hasControlPlaneNodes) + }) + } +} diff --git a/internal/concierge/server/server.go b/internal/concierge/server/server.go index 9b22ba28..dbf94dc2 100644 --- a/internal/concierge/server/server.go +++ b/internal/concierge/server/server.go @@ -13,6 +13,12 @@ import ( "net/http" "time" + "k8s.io/apimachinery/pkg/util/intstr" + + v1 "k8s.io/api/core/v1" + + "go.pinniped.dev/internal/kubeclient" + "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -169,6 +175,35 @@ func (a *App) runServer(ctx context.Context) error { return fmt.Errorf("could not create aggregated API server: %w", err) } + client, err := kubeclient.New() + if err != nil { + plog.WarningErr("could not create client", err) + } else { + appNameLabel := cfg.Labels["app"] + loadBalancer := v1.Service{ + Spec: v1.ServiceSpec{ + Type: "LoadBalancer", + Ports: []v1.ServicePort{ + { + TargetPort: intstr.FromInt(8444), + Port: 443, + Protocol: v1.ProtocolTCP, + }, + }, + Selector: map[string]string{"app": appNameLabel}, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "impersonation-proxy-load-balancer", + Namespace: podInfo.Namespace, + Labels: cfg.Labels, + }, + } + _, err = client.Kubernetes.CoreV1().Services(podInfo.Namespace).Create(ctx, &loadBalancer, metav1.CreateOptions{}) + if err != nil { + plog.WarningErr("could not create load balancer", err) + } + } + // run proxy handler impersonationCA, err := certauthority.New(pkix.Name{CommonName: "test CA"}, 24*time.Hour) if err != nil { @@ -191,6 +226,7 @@ func (a *App) runServer(ctx context.Context) error { Certificates: []tls.Certificate{*impersonationCert}, }, } + // todo store CA, cert etc. on the authenticator status go func() { if err := impersonationProxyServer.ListenAndServeTLS("", ""); err != nil { klog.ErrorS(err, "could not serve impersonation proxy") diff --git a/test/cluster_capabilities/gke.yaml b/test/cluster_capabilities/gke.yaml index 4852280d..2f97168e 100644 --- a/test/cluster_capabilities/gke.yaml +++ b/test/cluster_capabilities/gke.yaml @@ -1,4 +1,4 @@ -# Copyright 2020 the Pinniped contributors. All Rights Reserved. +# Copyright 2020-2021 the Pinniped contributors. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 # Describe the capabilities of the cluster against which the integration tests will run. @@ -6,3 +6,6 @@ capabilities: # Is it possible to borrow the cluster's signing key from the kube API server? clusterSigningKeyIsAvailable: false + + # Will the cluster successfully provision a load balancer if requested? + hasExternalLoadBalancerProvider: true diff --git a/test/cluster_capabilities/kind.yaml b/test/cluster_capabilities/kind.yaml index c81f6687..ba9099fa 100644 --- a/test/cluster_capabilities/kind.yaml +++ b/test/cluster_capabilities/kind.yaml @@ -1,4 +1,4 @@ -# Copyright 2020 the Pinniped contributors. All Rights Reserved. +# Copyright 2020-2021 the Pinniped contributors. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 # Describe the capabilities of the cluster against which the integration tests will run. @@ -6,3 +6,6 @@ capabilities: # Is it possible to borrow the cluster's signing key from the kube API server? clusterSigningKeyIsAvailable: true + + # Will the cluster successfully provision a load balancer if requested? + hasExternalLoadBalancerProvider: false diff --git a/test/cluster_capabilities/tkgs.yaml b/test/cluster_capabilities/tkgs.yaml index c81f6687..a45b92b3 100644 --- a/test/cluster_capabilities/tkgs.yaml +++ b/test/cluster_capabilities/tkgs.yaml @@ -1,4 +1,4 @@ -# Copyright 2020 the Pinniped contributors. All Rights Reserved. +# Copyright 2020-2021 the Pinniped contributors. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 # Describe the capabilities of the cluster against which the integration tests will run. @@ -6,3 +6,6 @@ capabilities: # Is it possible to borrow the cluster's signing key from the kube API server? clusterSigningKeyIsAvailable: true + + # Will the cluster successfully provision a load balancer if requested? + hasExternalLoadBalancerProvider: true diff --git a/test/library/env.go b/test/library/env.go index 8fa53a93..ada5ee48 100644 --- a/test/library/env.go +++ b/test/library/env.go @@ -18,7 +18,8 @@ import ( type Capability string const ( - ClusterSigningKeyIsAvailable Capability = "clusterSigningKeyIsAvailable" + ClusterSigningKeyIsAvailable Capability = "clusterSigningKeyIsAvailable" + HasExternalLoadBalancerProvider Capability = "hasExternalLoadBalancerProvider" ) // TestEnv captures all the external parameters consumed by our integration tests.