Introduce clusterhost package to determine whether a cluster has control plane nodes
Also added hasExternalLoadBalancerProvider key to cluster capabilities for integration testing. Signed-off-by: Ryan Richard <richardry@vmware.com>
This commit is contained in:
parent
812f5084a1
commit
dfcc2a1eb8
63
internal/clusterhost/clusterhost.go
Normal file
63
internal/clusterhost/clusterhost.go
Normal file
@ -0,0 +1,63 @@
|
||||
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package clusterhost
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
const (
|
||||
labelNodeRolePrefix = "node-role.kubernetes.io/"
|
||||
nodeLabelRole = "kubernetes.io/node-role"
|
||||
controlPlaneNodeRole = "control-plane"
|
||||
// this role was deprecated by kubernetes 1.20.
|
||||
masterNodeRole = "master"
|
||||
)
|
||||
|
||||
type ClusterHost struct {
|
||||
client kubernetes.Interface
|
||||
}
|
||||
|
||||
func New(client kubernetes.Interface) *ClusterHost {
|
||||
return &ClusterHost{client: client}
|
||||
}
|
||||
|
||||
func (c *ClusterHost) HasControlPlaneNodes(ctx context.Context) (bool, error) {
|
||||
nodes, err := c.client.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error fetching nodes: %v", err)
|
||||
}
|
||||
if len(nodes.Items) == 0 {
|
||||
return false, fmt.Errorf("no nodes found")
|
||||
}
|
||||
for _, node := range nodes.Items {
|
||||
for k, v := range node.Labels {
|
||||
if isControlPlaneNodeRole(k, v) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func isControlPlaneNodeRole(k string, v string) bool {
|
||||
if k == labelNodeRolePrefix+controlPlaneNodeRole {
|
||||
return true
|
||||
}
|
||||
if k == labelNodeRolePrefix+masterNodeRole {
|
||||
return true
|
||||
}
|
||||
if k == nodeLabelRole && v == controlPlaneNodeRole {
|
||||
return true
|
||||
}
|
||||
if k == nodeLabelRole && v == masterNodeRole {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
169
internal/clusterhost/clusterhost_test.go
Normal file
169
internal/clusterhost/clusterhost_test.go
Normal file
@ -0,0 +1,169 @@
|
||||
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package clusterhost
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
kubernetesfake "k8s.io/client-go/kubernetes/fake"
|
||||
coretesting "k8s.io/client-go/testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func TestHasControlPlaneNodes(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
nodes []*v1.Node
|
||||
listNodesErr error
|
||||
wantErr error
|
||||
wantReturnValue bool
|
||||
}{
|
||||
{
|
||||
name: "Fetching nodes returns an error",
|
||||
listNodesErr: errors.New("couldn't get nodes"),
|
||||
wantErr: errors.New("error fetching nodes: couldn't get nodes"),
|
||||
},
|
||||
{
|
||||
name: "Fetching nodes returns an empty array",
|
||||
nodes: []*v1.Node{},
|
||||
wantErr: errors.New("no nodes found"),
|
||||
},
|
||||
{
|
||||
name: "Nodes found, but not control plane nodes",
|
||||
nodes: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node-1",
|
||||
Labels: map[string]string{
|
||||
"not-control-plane-label": "some-value",
|
||||
"kubernetes.io/node-role": "worker",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node-2",
|
||||
Labels: map[string]string{"node-role.kubernetes.io/worker": ""},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantReturnValue: false,
|
||||
},
|
||||
{
|
||||
name: "Nodes found, including a control-plane role in node-role.kubernetes.io/<role> format",
|
||||
nodes: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node-1",
|
||||
Labels: map[string]string{"unrelated-label": "some-value"},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node-2",
|
||||
Labels: map[string]string{
|
||||
"some-other-label": "some-value",
|
||||
"node-role.kubernetes.io/control-plane": "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantReturnValue: true,
|
||||
},
|
||||
{
|
||||
name: "Nodes found, including a master role in node-role.kubernetes.io/<role> format",
|
||||
nodes: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node-1",
|
||||
Labels: map[string]string{"unrelated-label": "some-value"},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node-2",
|
||||
Labels: map[string]string{
|
||||
"some-other-label": "some-value",
|
||||
"node-role.kubernetes.io/master": "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantReturnValue: true,
|
||||
},
|
||||
{
|
||||
name: "Nodes found, including a control-plane role in kubernetes.io/node-role=<role> format",
|
||||
nodes: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node-1",
|
||||
Labels: map[string]string{"unrelated-label": "some-value"},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node-2",
|
||||
Labels: map[string]string{
|
||||
"some-other-label": "some-value",
|
||||
"kubernetes.io/node-role": "control-plane",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantReturnValue: true,
|
||||
},
|
||||
{
|
||||
name: "Nodes found, including a master role in kubernetes.io/node-role=<role> format",
|
||||
nodes: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node-1",
|
||||
Labels: map[string]string{"unrelated-label": "some-value"},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node-2",
|
||||
Labels: map[string]string{
|
||||
"some-other-label": "some-value",
|
||||
"kubernetes.io/node-role": "master",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantReturnValue: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
test := tt
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
kubeClient := kubernetesfake.NewSimpleClientset()
|
||||
if test.listNodesErr != nil {
|
||||
listNodesErr := test.listNodesErr
|
||||
kubeClient.PrependReactor(
|
||||
"list",
|
||||
"nodes",
|
||||
func(_ coretesting.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, listNodesErr
|
||||
},
|
||||
)
|
||||
}
|
||||
for _, node := range test.nodes {
|
||||
err := kubeClient.Tracker().Add(node)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
clusterHost := New(kubeClient)
|
||||
hasControlPlaneNodes, err := clusterHost.HasControlPlaneNodes(context.Background())
|
||||
require.Equal(t, test.wantErr, err)
|
||||
require.Equal(t, test.wantReturnValue, hasControlPlaneNodes)
|
||||
})
|
||||
}
|
||||
}
|
@ -13,6 +13,12 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
|
||||
"go.pinniped.dev/internal/kubeclient"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@ -169,6 +175,35 @@ func (a *App) runServer(ctx context.Context) error {
|
||||
return fmt.Errorf("could not create aggregated API server: %w", err)
|
||||
}
|
||||
|
||||
client, err := kubeclient.New()
|
||||
if err != nil {
|
||||
plog.WarningErr("could not create client", err)
|
||||
} else {
|
||||
appNameLabel := cfg.Labels["app"]
|
||||
loadBalancer := v1.Service{
|
||||
Spec: v1.ServiceSpec{
|
||||
Type: "LoadBalancer",
|
||||
Ports: []v1.ServicePort{
|
||||
{
|
||||
TargetPort: intstr.FromInt(8444),
|
||||
Port: 443,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
Selector: map[string]string{"app": appNameLabel},
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "impersonation-proxy-load-balancer",
|
||||
Namespace: podInfo.Namespace,
|
||||
Labels: cfg.Labels,
|
||||
},
|
||||
}
|
||||
_, err = client.Kubernetes.CoreV1().Services(podInfo.Namespace).Create(ctx, &loadBalancer, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
plog.WarningErr("could not create load balancer", err)
|
||||
}
|
||||
}
|
||||
|
||||
// run proxy handler
|
||||
impersonationCA, err := certauthority.New(pkix.Name{CommonName: "test CA"}, 24*time.Hour)
|
||||
if err != nil {
|
||||
@ -191,6 +226,7 @@ func (a *App) runServer(ctx context.Context) error {
|
||||
Certificates: []tls.Certificate{*impersonationCert},
|
||||
},
|
||||
}
|
||||
// todo store CA, cert etc. on the authenticator status
|
||||
go func() {
|
||||
if err := impersonationProxyServer.ListenAndServeTLS("", ""); err != nil {
|
||||
klog.ErrorS(err, "could not serve impersonation proxy")
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
# Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Describe the capabilities of the cluster against which the integration tests will run.
|
||||
@ -6,3 +6,6 @@ capabilities:
|
||||
|
||||
# Is it possible to borrow the cluster's signing key from the kube API server?
|
||||
clusterSigningKeyIsAvailable: false
|
||||
|
||||
# Will the cluster successfully provision a load balancer if requested?
|
||||
hasExternalLoadBalancerProvider: true
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
# Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Describe the capabilities of the cluster against which the integration tests will run.
|
||||
@ -6,3 +6,6 @@ capabilities:
|
||||
|
||||
# Is it possible to borrow the cluster's signing key from the kube API server?
|
||||
clusterSigningKeyIsAvailable: true
|
||||
|
||||
# Will the cluster successfully provision a load balancer if requested?
|
||||
hasExternalLoadBalancerProvider: false
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
# Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Describe the capabilities of the cluster against which the integration tests will run.
|
||||
@ -6,3 +6,6 @@ capabilities:
|
||||
|
||||
# Is it possible to borrow the cluster's signing key from the kube API server?
|
||||
clusterSigningKeyIsAvailable: true
|
||||
|
||||
# Will the cluster successfully provision a load balancer if requested?
|
||||
hasExternalLoadBalancerProvider: true
|
||||
|
@ -18,7 +18,8 @@ import (
|
||||
type Capability string
|
||||
|
||||
const (
|
||||
ClusterSigningKeyIsAvailable Capability = "clusterSigningKeyIsAvailable"
|
||||
ClusterSigningKeyIsAvailable Capability = "clusterSigningKeyIsAvailable"
|
||||
HasExternalLoadBalancerProvider Capability = "hasExternalLoadBalancerProvider"
|
||||
)
|
||||
|
||||
// TestEnv captures all the external parameters consumed by our integration tests.
|
||||
|
Loading…
Reference in New Issue
Block a user