Allow app to start despite failing to borrow the cluster signing key
- Controller and aggregated API server are allowed to run - Keep retrying to borrow the cluster signing key in case the failure to get it was caused by a transient failure - The CredentialRequest endpoint will always return an authentication failure as long as the cluster signing key cannot be borrowed - Update which integration tests are skipped to reflect what should and should not work based on the cluster's capability under this new behavior - Move CreateOrUpdateCredentialIssuerConfig() and related methods to their own file - Update the CredentialIssuerConfig's Status every time we try to refresh the cluster signing key
This commit is contained in:
parent
4306599396
commit
80153f9a80
1
go.sum
1
go.sum
@ -106,6 +106,7 @@ github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3
|
||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc=
|
||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk=
|
||||
|
@ -30,6 +30,7 @@ import (
|
||||
|
||||
// ErrNoKubeControllerManagerPod is returned when no kube-controller-manager pod is found on the cluster.
|
||||
const ErrNoKubeControllerManagerPod = constable.Error("did not find kube-controller-manager pod")
|
||||
const ErrIncapableOfIssuingCertificates = constable.Error("this cluster is not currently capable of issuing certificates")
|
||||
|
||||
const k8sAPIServerCACertPEMDefaultPath = "/etc/kubernetes/ca/ca.pem"
|
||||
const k8sAPIServerCAKeyPEMDefaultPath = "/etc/kubernetes/ca/ca.key"
|
||||
@ -86,31 +87,50 @@ type CA struct {
|
||||
|
||||
shutdown, done chan struct{}
|
||||
|
||||
onSuccessfulRefresh SuccessCallback
|
||||
onFailedRefresh FailureCallback
|
||||
|
||||
lock sync.RWMutex
|
||||
activeSigner signer
|
||||
}
|
||||
|
||||
type ShutdownFunc func()
|
||||
type SuccessCallback func()
|
||||
type FailureCallback func(error)
|
||||
|
||||
// New creates a new instance of a CA which is has loaded the kube API server's private key
|
||||
// and is ready to issue certs, or an error. When successful, it also starts a goroutine
|
||||
// to periodically reload the kube API server's private key in case it changed, and returns
|
||||
// a function that can be used to shut down that goroutine.
|
||||
func New(kubeClient kubernetes.Interface, podCommandExecutor PodCommandExecutor, tick <-chan time.Time) (*CA, ShutdownFunc, error) {
|
||||
// New creates a new instance of a CA. It tries to load the kube API server's private key
|
||||
// immediately. If that succeeds then it calls the success callback and it is ready to issue certs.
|
||||
// When it fails to get the kube API server's private key, then it calls the failure callback and
|
||||
// it will try again on the next tick. It starts a goroutine to periodically reload the kube
|
||||
// API server's private key in case it failed previously or case the key has changed. It returns
|
||||
// a function that can be used to shut down that goroutine. Future attempts made by that goroutine
|
||||
// to get the key will also result in success or failure callbacks.
|
||||
func New(
|
||||
kubeClient kubernetes.Interface,
|
||||
podCommandExecutor PodCommandExecutor,
|
||||
tick <-chan time.Time,
|
||||
onSuccessfulRefresh SuccessCallback,
|
||||
onFailedRefresh FailureCallback,
|
||||
) (*CA, ShutdownFunc) {
|
||||
signer, err := createSignerWithAPIServerSecret(kubeClient, podCommandExecutor)
|
||||
if err != nil {
|
||||
// The initial load failed, so give up
|
||||
return nil, nil, err
|
||||
klog.Errorf("could not initially fetch the API server's signing key: %s", err)
|
||||
signer = nil
|
||||
onFailedRefresh(err)
|
||||
} else {
|
||||
onSuccessfulRefresh()
|
||||
}
|
||||
result := &CA{
|
||||
kubeClient: kubeClient,
|
||||
podCommandExecutor: podCommandExecutor,
|
||||
activeSigner: signer,
|
||||
shutdown: make(chan struct{}),
|
||||
done: make(chan struct{}),
|
||||
kubeClient: kubeClient,
|
||||
podCommandExecutor: podCommandExecutor,
|
||||
shutdown: make(chan struct{}),
|
||||
done: make(chan struct{}),
|
||||
onSuccessfulRefresh: onSuccessfulRefresh,
|
||||
onFailedRefresh: onFailedRefresh,
|
||||
activeSigner: signer,
|
||||
}
|
||||
go result.refreshLoop(tick)
|
||||
return result, result.shutdownRefresh, nil
|
||||
return result, result.shutdownRefresh
|
||||
}
|
||||
|
||||
func createSignerWithAPIServerSecret(kubeClient kubernetes.Interface, podCommandExecutor PodCommandExecutor) (signer, error) {
|
||||
@ -152,11 +172,13 @@ func (c *CA) updateSigner() {
|
||||
newSigner, err := createSignerWithAPIServerSecret(c.kubeClient, c.podCommandExecutor)
|
||||
if err != nil {
|
||||
klog.Errorf("could not create signer with API server secret: %s", err)
|
||||
c.onFailedRefresh(err)
|
||||
return
|
||||
}
|
||||
c.lock.Lock()
|
||||
c.activeSigner = newSigner
|
||||
c.lock.Unlock()
|
||||
c.onSuccessfulRefresh()
|
||||
}
|
||||
|
||||
func (c *CA) shutdownRefresh() {
|
||||
@ -171,6 +193,10 @@ func (c *CA) IssuePEM(subject pkix.Name, dnsNames []string, ttl time.Duration) (
|
||||
signer := c.activeSigner
|
||||
c.lock.RUnlock()
|
||||
|
||||
if signer == nil {
|
||||
return nil, nil, ErrIncapableOfIssuingCertificates
|
||||
}
|
||||
|
||||
return signer.IssuePEM(subject, dnsNames, ttl)
|
||||
}
|
||||
|
||||
|
@ -9,9 +9,9 @@ import (
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -53,6 +53,46 @@ func (s *fakePodExecutor) Exec(podNamespace string, podName string, commandAndAr
|
||||
return result, nil
|
||||
}
|
||||
|
||||
type callbackRecorder struct {
|
||||
numberOfTimesSuccessCalled int
|
||||
numberOfTimesFailureCalled int
|
||||
failureErrors []error
|
||||
mutex sync.Mutex
|
||||
}
|
||||
|
||||
func (c *callbackRecorder) OnSuccess() {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
c.numberOfTimesSuccessCalled++
|
||||
}
|
||||
|
||||
func (c *callbackRecorder) OnFailure(err error) {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
c.numberOfTimesFailureCalled++
|
||||
c.failureErrors = append(c.failureErrors, err)
|
||||
}
|
||||
|
||||
func (c *callbackRecorder) NumberOfTimesSuccessCalled() int {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
return c.numberOfTimesSuccessCalled
|
||||
}
|
||||
|
||||
func (c *callbackRecorder) NumberOfTimesFailureCalled() int {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
return c.numberOfTimesFailureCalled
|
||||
}
|
||||
|
||||
func (c *callbackRecorder) FailureErrors() []error {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
var errs = make([]error, len(c.failureErrors))
|
||||
copy(errs, c.failureErrors)
|
||||
return errs
|
||||
}
|
||||
|
||||
func TestCA(t *testing.T) {
|
||||
spec.Run(t, "CA", func(t *testing.T, when spec.G, it spec.S) {
|
||||
var r *require.Assertions
|
||||
@ -62,9 +102,29 @@ func TestCA(t *testing.T) {
|
||||
var kubeAPIClient *kubernetesfake.Clientset
|
||||
var fakeExecutor *fakePodExecutor
|
||||
var neverTicker <-chan time.Time
|
||||
|
||||
var callbacks *callbackRecorder
|
||||
var logger *testutil.TranscriptLogger
|
||||
|
||||
var requireInitialFailureLogMessage = func(specificErrorMessage string) {
|
||||
r.Len(logger.Transcript(), 1)
|
||||
r.Equal(
|
||||
fmt.Sprintf("could not initially fetch the API server's signing key: %s\n", specificErrorMessage),
|
||||
logger.Transcript()[0].Message,
|
||||
)
|
||||
r.Equal(logger.Transcript()[0].Level, "error")
|
||||
}
|
||||
|
||||
var requireNotCapableOfIssuingCerts = func(subject *CA) {
|
||||
certPEM, keyPEM, err := subject.IssuePEM(
|
||||
pkix.Name{CommonName: "Test Server"},
|
||||
[]string{"example.com"},
|
||||
10*time.Minute,
|
||||
)
|
||||
r.Nil(certPEM)
|
||||
r.Nil(keyPEM)
|
||||
r.EqualError(err, "this cluster is not currently capable of issuing certificates")
|
||||
}
|
||||
|
||||
it.Before(func() {
|
||||
r = require.New(t)
|
||||
|
||||
@ -104,6 +164,8 @@ func TestCA(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
callbacks = &callbackRecorder{}
|
||||
|
||||
logger = testutil.NewTranscriptLogger(t)
|
||||
klog.SetLogger(logger) // this is unfortunately a global logger, so can't run these tests in parallel :(
|
||||
})
|
||||
@ -122,9 +184,7 @@ func TestCA(t *testing.T) {
|
||||
it("finds the API server's signing key and uses it to issue certificates", func() {
|
||||
fakeTicker := make(chan time.Time)
|
||||
|
||||
subject, shutdownFunc, err := New(kubeAPIClient, fakeExecutor, fakeTicker)
|
||||
r.NoError(err)
|
||||
r.NotNil(shutdownFunc)
|
||||
subject, shutdownFunc := New(kubeAPIClient, fakeExecutor, fakeTicker, callbacks.OnSuccess, callbacks.OnFailure)
|
||||
defer shutdownFunc()
|
||||
|
||||
r.Equal(2, fakeExecutor.callCount)
|
||||
@ -137,6 +197,9 @@ func TestCA(t *testing.T) {
|
||||
r.Equal("fake-pod", fakeExecutor.calledWithPodName[1])
|
||||
r.Equal([]string{"cat", "/etc/kubernetes/ca/ca.key"}, fakeExecutor.calledWithCommandAndArgs[1])
|
||||
|
||||
r.Equal(1, callbacks.NumberOfTimesSuccessCalled())
|
||||
r.Equal(0, callbacks.NumberOfTimesFailureCalled())
|
||||
|
||||
// Validate that we can issue a certificate signed by the original API server CA.
|
||||
certPEM, keyPEM, err := subject.IssuePEM(
|
||||
pkix.Name{CommonName: "Test Server"},
|
||||
@ -152,6 +215,10 @@ func TestCA(t *testing.T) {
|
||||
// Tick the timer and wait for another refresh loop to complete.
|
||||
fakeTicker <- time.Now()
|
||||
|
||||
r.Equal(1, callbacks.NumberOfTimesSuccessCalled())
|
||||
r.Equal(0, callbacks.NumberOfTimesFailureCalled())
|
||||
|
||||
// Eventually it starts issuing certs using the new signing key.
|
||||
var secondCertPEM, secondKeyPEM string
|
||||
r.Eventually(func() bool {
|
||||
certPEM, keyPEM, err := subject.IssuePEM(
|
||||
@ -191,11 +258,11 @@ func TestCA(t *testing.T) {
|
||||
it("logs an error message", func() {
|
||||
fakeTicker := make(chan time.Time)
|
||||
|
||||
subject, shutdownFunc, err := New(kubeAPIClient, fakeExecutor, fakeTicker)
|
||||
r.NoError(err)
|
||||
r.NotNil(shutdownFunc)
|
||||
subject, shutdownFunc := New(kubeAPIClient, fakeExecutor, fakeTicker, callbacks.OnSuccess, callbacks.OnFailure)
|
||||
defer shutdownFunc()
|
||||
r.Equal(2, fakeExecutor.callCount)
|
||||
r.Equal(1, callbacks.NumberOfTimesSuccessCalled())
|
||||
r.Equal(0, callbacks.NumberOfTimesFailureCalled())
|
||||
|
||||
// Tick the timer and wait for another refresh loop to complete.
|
||||
fakeTicker <- time.Now()
|
||||
@ -205,6 +272,10 @@ func TestCA(t *testing.T) {
|
||||
r.Contains(logger.Transcript()[0].Message, "could not create signer with API server secret: some exec error")
|
||||
r.Equal(logger.Transcript()[0].Level, "error")
|
||||
|
||||
r.Equal(1, callbacks.NumberOfTimesSuccessCalled())
|
||||
r.Equal(1, callbacks.NumberOfTimesFailureCalled())
|
||||
r.EqualError(callbacks.FailureErrors()[0], "some exec error")
|
||||
|
||||
// Validate that we can still issue a certificate signed by the original API server CA.
|
||||
certPEM, _, err := subject.IssuePEM(
|
||||
pkix.Name{CommonName: "Test Server"},
|
||||
@ -216,16 +287,62 @@ func TestCA(t *testing.T) {
|
||||
})
|
||||
})
|
||||
|
||||
when("the exec commands fail the first time but subsequently returns the API server's keypair", func() {
|
||||
it.Before(func() {
|
||||
fakeExecutor.errorsToReturn = []error{fmt.Errorf("some exec error"), nil, nil}
|
||||
fakeExecutor.resultsToReturn = []string{"", fakeCertPEM, fakeKeyPEM}
|
||||
})
|
||||
|
||||
it("logs an error message and fails to issue certs until it can get the API server's keypair", func() {
|
||||
fakeTicker := make(chan time.Time)
|
||||
|
||||
subject, shutdownFunc := New(kubeAPIClient, fakeExecutor, fakeTicker, callbacks.OnSuccess, callbacks.OnFailure)
|
||||
defer shutdownFunc()
|
||||
r.Equal(1, fakeExecutor.callCount)
|
||||
r.Equal(0, callbacks.NumberOfTimesSuccessCalled())
|
||||
r.Equal(1, callbacks.NumberOfTimesFailureCalled())
|
||||
r.EqualError(callbacks.FailureErrors()[0], "some exec error")
|
||||
|
||||
requireInitialFailureLogMessage("some exec error")
|
||||
requireNotCapableOfIssuingCerts(subject)
|
||||
|
||||
// Tick the timer and wait for another refresh loop to complete.
|
||||
fakeTicker <- time.Now()
|
||||
|
||||
// Wait until it can start to issue certs, and then validate the issued cert.
|
||||
var certPEM, keyPEM []byte
|
||||
r.Eventually(func() bool {
|
||||
var err error
|
||||
certPEM, keyPEM, err = subject.IssuePEM(
|
||||
pkix.Name{CommonName: "Test Server"},
|
||||
[]string{"example.com"},
|
||||
10*time.Minute,
|
||||
)
|
||||
return err == nil
|
||||
}, 5*time.Second, 10*time.Millisecond)
|
||||
validCert := testutil.ValidateCertificate(t, fakeCertPEM, string(certPEM))
|
||||
validCert.RequireDNSName("example.com")
|
||||
validCert.RequireLifetime(time.Now(), time.Now().Add(10*time.Minute), 2*time.Minute)
|
||||
validCert.RequireMatchesPrivateKey(string(keyPEM))
|
||||
|
||||
r.Equal(1, callbacks.NumberOfTimesSuccessCalled())
|
||||
r.Equal(1, callbacks.NumberOfTimesFailureCalled())
|
||||
})
|
||||
})
|
||||
|
||||
when("the exec commands succeed but return garbage", func() {
|
||||
it.Before(func() {
|
||||
fakeExecutor.resultsToReturn = []string{"not a cert", "not a private key"}
|
||||
})
|
||||
|
||||
it("returns an error", func() {
|
||||
subject, shutdownFunc, err := New(kubeAPIClient, fakeExecutor, neverTicker)
|
||||
r.Nil(subject)
|
||||
r.Nil(shutdownFunc)
|
||||
r.EqualError(err, "could not load CA: tls: failed to find any PEM data in certificate input")
|
||||
it("returns a CA who cannot issue certs", func() {
|
||||
subject, shutdownFunc := New(kubeAPIClient, fakeExecutor, neverTicker, callbacks.OnSuccess, callbacks.OnFailure)
|
||||
defer shutdownFunc()
|
||||
requireInitialFailureLogMessage("could not load CA: tls: failed to find any PEM data in certificate input")
|
||||
requireNotCapableOfIssuingCerts(subject)
|
||||
r.Equal(0, callbacks.NumberOfTimesSuccessCalled())
|
||||
r.Equal(1, callbacks.NumberOfTimesFailureCalled())
|
||||
r.EqualError(callbacks.FailureErrors()[0], "could not load CA: tls: failed to find any PEM data in certificate input")
|
||||
})
|
||||
})
|
||||
|
||||
@ -234,11 +351,14 @@ func TestCA(t *testing.T) {
|
||||
fakeExecutor.errorsToReturn = []error{fmt.Errorf("some error"), nil}
|
||||
})
|
||||
|
||||
it("returns an error", func() {
|
||||
subject, shutdownFunc, err := New(kubeAPIClient, fakeExecutor, neverTicker)
|
||||
r.Nil(subject)
|
||||
r.Nil(shutdownFunc)
|
||||
r.EqualError(err, "some error")
|
||||
it("returns a CA who cannot issue certs", func() {
|
||||
subject, shutdownFunc := New(kubeAPIClient, fakeExecutor, neverTicker, callbacks.OnSuccess, callbacks.OnFailure)
|
||||
defer shutdownFunc()
|
||||
requireInitialFailureLogMessage("some error")
|
||||
requireNotCapableOfIssuingCerts(subject)
|
||||
r.Equal(0, callbacks.NumberOfTimesSuccessCalled())
|
||||
r.Equal(1, callbacks.NumberOfTimesFailureCalled())
|
||||
r.EqualError(callbacks.FailureErrors()[0], "some error")
|
||||
})
|
||||
})
|
||||
|
||||
@ -247,11 +367,14 @@ func TestCA(t *testing.T) {
|
||||
fakeExecutor.errorsToReturn = []error{nil, fmt.Errorf("some error")}
|
||||
})
|
||||
|
||||
it("returns an error", func() {
|
||||
subject, shutdownFunc, err := New(kubeAPIClient, fakeExecutor, neverTicker)
|
||||
r.Nil(subject)
|
||||
r.Nil(shutdownFunc)
|
||||
r.EqualError(err, "some error")
|
||||
it("returns a CA who cannot issue certs", func() {
|
||||
subject, shutdownFunc := New(kubeAPIClient, fakeExecutor, neverTicker, callbacks.OnSuccess, callbacks.OnFailure)
|
||||
defer shutdownFunc()
|
||||
requireInitialFailureLogMessage("some error")
|
||||
requireNotCapableOfIssuingCerts(subject)
|
||||
r.Equal(0, callbacks.NumberOfTimesSuccessCalled())
|
||||
r.Equal(1, callbacks.NumberOfTimesFailureCalled())
|
||||
r.EqualError(callbacks.FailureErrors()[0], "some error")
|
||||
})
|
||||
})
|
||||
})
|
||||
@ -270,9 +393,7 @@ func TestCA(t *testing.T) {
|
||||
})
|
||||
|
||||
it("finds the API server's signing key and uses it to issue certificates", func() {
|
||||
_, shutdownFunc, err := New(kubeAPIClient, fakeExecutor, neverTicker)
|
||||
r.NoError(err)
|
||||
r.NotNil(shutdownFunc)
|
||||
_, shutdownFunc := New(kubeAPIClient, fakeExecutor, neverTicker, callbacks.OnSuccess, callbacks.OnFailure)
|
||||
defer shutdownFunc()
|
||||
|
||||
r.Equal(2, fakeExecutor.callCount)
|
||||
@ -300,9 +421,7 @@ func TestCA(t *testing.T) {
|
||||
})
|
||||
|
||||
it("finds the API server's signing key and uses it to issue certificates", func() {
|
||||
_, shutdownFunc, err := New(kubeAPIClient, fakeExecutor, neverTicker)
|
||||
r.NoError(err)
|
||||
r.NotNil(shutdownFunc)
|
||||
_, shutdownFunc := New(kubeAPIClient, fakeExecutor, neverTicker, callbacks.OnSuccess, callbacks.OnFailure)
|
||||
defer shutdownFunc()
|
||||
|
||||
r.Equal(2, fakeExecutor.callCount)
|
||||
@ -319,11 +438,14 @@ func TestCA(t *testing.T) {
|
||||
|
||||
when("the kube-controller-manager pod is not found", func() {
|
||||
it("returns an error", func() {
|
||||
subject, shutdownFunc, err := New(kubeAPIClient, fakeExecutor, neverTicker)
|
||||
r.Nil(subject)
|
||||
r.Nil(shutdownFunc)
|
||||
r.True(errors.Is(err, ErrNoKubeControllerManagerPod))
|
||||
subject, shutdownFunc := New(kubeAPIClient, fakeExecutor, neverTicker, callbacks.OnSuccess, callbacks.OnFailure)
|
||||
defer shutdownFunc()
|
||||
requireInitialFailureLogMessage("did not find kube-controller-manager pod")
|
||||
requireNotCapableOfIssuingCerts(subject)
|
||||
r.Equal(0, callbacks.NumberOfTimesSuccessCalled())
|
||||
r.Equal(1, callbacks.NumberOfTimesFailureCalled())
|
||||
r.EqualError(callbacks.FailureErrors()[0], "did not find kube-controller-manager pod")
|
||||
})
|
||||
})
|
||||
}, spec.Report(report.Terminal{}))
|
||||
}, spec.Sequential(), spec.Report(report.Terminal{}))
|
||||
}
|
||||
|
@ -0,0 +1,100 @@
|
||||
/*
|
||||
Copyright 2020 VMware, Inc.
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package issuerconfig
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
crdpinnipedv1alpha1 "github.com/suzerain-io/pinniped/kubernetes/1.19/api/apis/crdpinniped/v1alpha1"
|
||||
pinnipedclientset "github.com/suzerain-io/pinniped/kubernetes/1.19/client-go/clientset/versioned"
|
||||
)
|
||||
|
||||
func CreateOrUpdateCredentialIssuerConfig(
|
||||
ctx context.Context,
|
||||
credentialIssuerConfigNamespace string,
|
||||
pinnipedClient pinnipedclientset.Interface,
|
||||
applyUpdatesToCredentialIssuerConfigFunc func(configToUpdate *crdpinnipedv1alpha1.CredentialIssuerConfig),
|
||||
) error {
|
||||
credentialIssuerConfigName := configName
|
||||
|
||||
existingCredentialIssuerConfig, err := pinnipedClient.
|
||||
CrdV1alpha1().
|
||||
CredentialIssuerConfigs(credentialIssuerConfigNamespace).
|
||||
Get(ctx, credentialIssuerConfigName, metav1.GetOptions{})
|
||||
|
||||
notFound := k8serrors.IsNotFound(err)
|
||||
if err != nil && !notFound {
|
||||
return fmt.Errorf("could not get credentialissuerconfig: %w", err)
|
||||
}
|
||||
|
||||
return createOrUpdateCredentialIssuerConfig(
|
||||
ctx,
|
||||
existingCredentialIssuerConfig,
|
||||
notFound,
|
||||
credentialIssuerConfigName,
|
||||
credentialIssuerConfigNamespace,
|
||||
pinnipedClient,
|
||||
applyUpdatesToCredentialIssuerConfigFunc)
|
||||
}
|
||||
|
||||
func createOrUpdateCredentialIssuerConfig(
|
||||
ctx context.Context,
|
||||
existingCredentialIssuerConfig *crdpinnipedv1alpha1.CredentialIssuerConfig,
|
||||
notFound bool,
|
||||
credentialIssuerConfigName string,
|
||||
credentialIssuerConfigNamespace string,
|
||||
pinnipedClient pinnipedclientset.Interface,
|
||||
applyUpdatesToCredentialIssuerConfigFunc func(configToUpdate *crdpinnipedv1alpha1.CredentialIssuerConfig),
|
||||
) error {
|
||||
credentialIssuerConfigsClient := pinnipedClient.CrdV1alpha1().CredentialIssuerConfigs(credentialIssuerConfigNamespace)
|
||||
|
||||
if notFound {
|
||||
// Create it
|
||||
credentialIssuerConfig := minimalValidCredentialIssuerConfig(credentialIssuerConfigName, credentialIssuerConfigNamespace)
|
||||
applyUpdatesToCredentialIssuerConfigFunc(credentialIssuerConfig)
|
||||
|
||||
if _, err := credentialIssuerConfigsClient.Create(ctx, credentialIssuerConfig, metav1.CreateOptions{}); err != nil {
|
||||
return fmt.Errorf("could not create credentialissuerconfig: %w", err)
|
||||
}
|
||||
} else {
|
||||
// Already exists, so check to see if we need to update it
|
||||
credentialIssuerConfig := existingCredentialIssuerConfig.DeepCopy()
|
||||
applyUpdatesToCredentialIssuerConfigFunc(credentialIssuerConfig)
|
||||
|
||||
if reflect.DeepEqual(existingCredentialIssuerConfig.Status, credentialIssuerConfig.Status) {
|
||||
// Nothing interesting would change as a result of this update, so skip it
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, err := credentialIssuerConfigsClient.Update(ctx, credentialIssuerConfig, metav1.UpdateOptions{}); err != nil {
|
||||
return fmt.Errorf("could not update credentialissuerconfig: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func minimalValidCredentialIssuerConfig(
|
||||
credentialIssuerConfigName string,
|
||||
credentialIssuerConfigNamespace string,
|
||||
) *crdpinnipedv1alpha1.CredentialIssuerConfig {
|
||||
return &crdpinnipedv1alpha1.CredentialIssuerConfig{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: credentialIssuerConfigName,
|
||||
Namespace: credentialIssuerConfigNamespace,
|
||||
},
|
||||
Status: crdpinnipedv1alpha1.CredentialIssuerConfigStatus{
|
||||
Strategies: []crdpinnipedv1alpha1.CredentialIssuerConfigStrategy{},
|
||||
KubeConfigInfo: nil,
|
||||
},
|
||||
}
|
||||
}
|
@ -6,13 +6,10 @@ SPDX-License-Identifier: Apache-2.0
|
||||
package issuerconfig
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/klog/v2"
|
||||
@ -135,85 +132,3 @@ func (c *publisherController) Sync(ctx controller.Context) error {
|
||||
updateServerAndCAFunc)
|
||||
return err
|
||||
}
|
||||
|
||||
func CreateOrUpdateCredentialIssuerConfig(
|
||||
ctx context.Context,
|
||||
credentialIssuerConfigNamespace string,
|
||||
pinnipedClient pinnipedclientset.Interface,
|
||||
applyUpdatesToCredentialIssuerConfigFunc func(configToUpdate *crdpinnipedv1alpha1.CredentialIssuerConfig),
|
||||
) error {
|
||||
credentialIssuerConfigName := configName
|
||||
|
||||
existingCredentialIssuerConfig, err := pinnipedClient.
|
||||
CrdV1alpha1().
|
||||
CredentialIssuerConfigs(credentialIssuerConfigNamespace).
|
||||
Get(ctx, credentialIssuerConfigName, metav1.GetOptions{})
|
||||
|
||||
notFound := k8serrors.IsNotFound(err)
|
||||
if err != nil && !notFound {
|
||||
return fmt.Errorf("could not get credentialissuerconfig: %w", err)
|
||||
}
|
||||
|
||||
return createOrUpdateCredentialIssuerConfig(
|
||||
ctx,
|
||||
existingCredentialIssuerConfig,
|
||||
notFound,
|
||||
credentialIssuerConfigName,
|
||||
credentialIssuerConfigNamespace,
|
||||
pinnipedClient,
|
||||
applyUpdatesToCredentialIssuerConfigFunc)
|
||||
}
|
||||
|
||||
func createOrUpdateCredentialIssuerConfig(
|
||||
ctx context.Context,
|
||||
existingCredentialIssuerConfig *crdpinnipedv1alpha1.CredentialIssuerConfig,
|
||||
notFound bool,
|
||||
credentialIssuerConfigName string,
|
||||
credentialIssuerConfigNamespace string,
|
||||
pinnipedClient pinnipedclientset.Interface,
|
||||
applyUpdatesToCredentialIssuerConfigFunc func(configToUpdate *crdpinnipedv1alpha1.CredentialIssuerConfig),
|
||||
) error {
|
||||
credentialIssuerConfigsClient := pinnipedClient.CrdV1alpha1().CredentialIssuerConfigs(credentialIssuerConfigNamespace)
|
||||
|
||||
if notFound {
|
||||
// Create it
|
||||
credentialIssuerConfig := minimalValidCredentialIssuerConfig(credentialIssuerConfigName, credentialIssuerConfigNamespace)
|
||||
applyUpdatesToCredentialIssuerConfigFunc(credentialIssuerConfig)
|
||||
|
||||
if _, err := credentialIssuerConfigsClient.Create(ctx, credentialIssuerConfig, metav1.CreateOptions{}); err != nil {
|
||||
return fmt.Errorf("could not create credentialissuerconfig: %w", err)
|
||||
}
|
||||
} else {
|
||||
// Already exists, so check to see if we need to update it
|
||||
credentialIssuerConfig := existingCredentialIssuerConfig.DeepCopy()
|
||||
applyUpdatesToCredentialIssuerConfigFunc(credentialIssuerConfig)
|
||||
|
||||
if reflect.DeepEqual(existingCredentialIssuerConfig.Status, credentialIssuerConfig.Status) {
|
||||
// Nothing interesting would change as a result of this update, so skip it
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, err := credentialIssuerConfigsClient.Update(ctx, credentialIssuerConfig, metav1.UpdateOptions{}); err != nil {
|
||||
return fmt.Errorf("could not update credentialissuerconfig: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func minimalValidCredentialIssuerConfig(
|
||||
credentialIssuerConfigName string,
|
||||
credentialIssuerConfigNamespace string,
|
||||
) *crdpinnipedv1alpha1.CredentialIssuerConfig {
|
||||
return &crdpinnipedv1alpha1.CredentialIssuerConfig{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: credentialIssuerConfigName,
|
||||
Namespace: credentialIssuerConfigNamespace,
|
||||
},
|
||||
Status: crdpinnipedv1alpha1.CredentialIssuerConfigStatus{
|
||||
Strategies: []crdpinnipedv1alpha1.CredentialIssuerConfigStrategy{},
|
||||
KubeConfigInfo: nil,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -165,7 +165,7 @@ func (a *App) runServer(ctx context.Context) error {
|
||||
return server.GenericAPIServer.PrepareRun().Run(ctx.Done())
|
||||
}
|
||||
|
||||
func getClusterCASigner(ctx context.Context, serverInstallationNamespace string) (*kubecertauthority.CA, kubecertauthority.ShutdownFunc, error) {
|
||||
func getClusterCASigner(ctx context.Context, serverInstallationNamespace string) (credentialrequest.CertIssuer, kubecertauthority.ShutdownFunc, error) {
|
||||
// Load the Kubernetes client configuration.
|
||||
kubeConfig, err := restclient.InClusterConfig()
|
||||
if err != nil {
|
||||
@ -188,57 +188,52 @@ func getClusterCASigner(ctx context.Context, serverInstallationNamespace string)
|
||||
ticker := time.NewTicker(5 * time.Minute)
|
||||
|
||||
// Make a CA which uses the Kubernetes cluster API server's signing certs.
|
||||
k8sClusterCA, shutdownCA, err := kubecertauthority.New(
|
||||
k8sClusterCA, shutdownCA := kubecertauthority.New(
|
||||
kubeClient,
|
||||
kubecertauthority.NewPodCommandExecutor(kubeConfig, kubeClient),
|
||||
ticker.C,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
ticker.Stop()
|
||||
|
||||
if updateErr := issuerconfig.CreateOrUpdateCredentialIssuerConfig(
|
||||
ctx,
|
||||
serverInstallationNamespace,
|
||||
pinnipedClient,
|
||||
func(configToUpdate *crdpinnipedv1alpha1.CredentialIssuerConfig) {
|
||||
configToUpdate.Status.Strategies = []crdpinnipedv1alpha1.CredentialIssuerConfigStrategy{
|
||||
{
|
||||
Type: crdpinnipedv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||
Status: crdpinnipedv1alpha1.ErrorStrategyStatus,
|
||||
Reason: crdpinnipedv1alpha1.CouldNotFetchKeyStrategyReason,
|
||||
Message: err.Error(),
|
||||
LastUpdateTime: metav1.Now(),
|
||||
},
|
||||
}
|
||||
},
|
||||
); updateErr != nil {
|
||||
klog.Errorf("error performing create or update on CredentialIssuerConfig to add strategy error: %s", updateErr.Error())
|
||||
}
|
||||
|
||||
return nil, nil, fmt.Errorf("could not load cluster signing CA: %w", err)
|
||||
}
|
||||
|
||||
updateErr := issuerconfig.CreateOrUpdateCredentialIssuerConfig(
|
||||
ctx,
|
||||
serverInstallationNamespace,
|
||||
pinnipedClient,
|
||||
func(configToUpdate *crdpinnipedv1alpha1.CredentialIssuerConfig) {
|
||||
configToUpdate.Status.Strategies = []crdpinnipedv1alpha1.CredentialIssuerConfigStrategy{
|
||||
{
|
||||
Type: crdpinnipedv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||
Status: crdpinnipedv1alpha1.SuccessStrategyStatus,
|
||||
Reason: crdpinnipedv1alpha1.FetchedKeyStrategyReason,
|
||||
Message: "Key was fetched successfully",
|
||||
LastUpdateTime: metav1.Now(),
|
||||
func() { // success callback
|
||||
err = issuerconfig.CreateOrUpdateCredentialIssuerConfig(
|
||||
ctx,
|
||||
serverInstallationNamespace,
|
||||
pinnipedClient,
|
||||
func(configToUpdate *crdpinnipedv1alpha1.CredentialIssuerConfig) {
|
||||
configToUpdate.Status.Strategies = []crdpinnipedv1alpha1.CredentialIssuerConfigStrategy{
|
||||
{
|
||||
Type: crdpinnipedv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||
Status: crdpinnipedv1alpha1.SuccessStrategyStatus,
|
||||
Reason: crdpinnipedv1alpha1.FetchedKeyStrategyReason,
|
||||
Message: "Key was fetched successfully",
|
||||
LastUpdateTime: metav1.Now(),
|
||||
},
|
||||
}
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
klog.Errorf("error performing create or update on CredentialIssuerConfig to add strategy success: %s", err.Error())
|
||||
}
|
||||
},
|
||||
func(err error) { // error callback
|
||||
if updateErr := issuerconfig.CreateOrUpdateCredentialIssuerConfig(
|
||||
ctx,
|
||||
serverInstallationNamespace,
|
||||
pinnipedClient,
|
||||
func(configToUpdate *crdpinnipedv1alpha1.CredentialIssuerConfig) {
|
||||
configToUpdate.Status.Strategies = []crdpinnipedv1alpha1.CredentialIssuerConfigStrategy{
|
||||
{
|
||||
Type: crdpinnipedv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||
Status: crdpinnipedv1alpha1.ErrorStrategyStatus,
|
||||
Reason: crdpinnipedv1alpha1.CouldNotFetchKeyStrategyReason,
|
||||
Message: err.Error(),
|
||||
LastUpdateTime: metav1.Now(),
|
||||
},
|
||||
}
|
||||
},
|
||||
); updateErr != nil {
|
||||
klog.Errorf("error performing create or update on CredentialIssuerConfig to add strategy error: %s", updateErr.Error())
|
||||
}
|
||||
},
|
||||
)
|
||||
if updateErr != nil {
|
||||
//nolint:goerr113
|
||||
return nil, nil, fmt.Errorf("error performing create or update on CredentialIssuerConfig to add strategy success: %w", updateErr)
|
||||
}
|
||||
|
||||
return k8sClusterCA, func() { shutdownCA(); ticker.Stop() }, nil
|
||||
}
|
||||
|
@ -47,12 +47,12 @@ func (log *TranscriptLogger) Info(msg string, keysAndValues ...interface{}) {
|
||||
})
|
||||
}
|
||||
|
||||
func (log *TranscriptLogger) Error(err error, msg string, keysAndValues ...interface{}) {
|
||||
func (log *TranscriptLogger) Error(_ error, msg string, _ ...interface{}) {
|
||||
log.lock.Lock()
|
||||
defer log.lock.Unlock()
|
||||
log.transcript = append(log.transcript, TranscriptLogMessage{
|
||||
Level: "error",
|
||||
Message: fmt.Sprintf("%s: %v -- %v", msg, err, keysAndValues),
|
||||
Message: msg,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -4,7 +4,6 @@ go 1.14
|
||||
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/ghodss/yaml v1.0.0
|
||||
github.com/stretchr/testify v1.6.1
|
||||
github.com/suzerain-io/pinniped v0.0.0-20200819182107-1b9a70d089f4
|
||||
github.com/suzerain-io/pinniped/kubernetes/1.19/api v0.0.0-00010101000000-000000000000
|
||||
@ -14,6 +13,7 @@ require (
|
||||
k8s.io/apimachinery v0.19.0-rc.0
|
||||
k8s.io/client-go v0.19.0-rc.0
|
||||
k8s.io/kube-aggregator v0.19.0-rc.0
|
||||
sigs.k8s.io/yaml v1.2.0
|
||||
)
|
||||
|
||||
replace (
|
||||
|
@ -16,7 +16,6 @@ import (
|
||||
|
||||
func TestGetAPIResourceList(t *testing.T) {
|
||||
library.SkipUnlessIntegration(t)
|
||||
library.SkipUnlessClusterHasCapability(t, library.ClusterSigningKeyIsAvailable)
|
||||
|
||||
client := library.NewPinnipedClientset(t)
|
||||
|
||||
|
@ -22,7 +22,6 @@ import (
|
||||
|
||||
func TestAPIServingCertificateAutoCreationAndRotation(t *testing.T) {
|
||||
library.SkipUnlessIntegration(t)
|
||||
library.SkipUnlessClusterHasCapability(t, library.ClusterSigningKeyIsAvailable)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
|
@ -20,7 +20,6 @@ import (
|
||||
|
||||
func TestGetDeployment(t *testing.T) {
|
||||
library.SkipUnlessIntegration(t)
|
||||
library.SkipUnlessClusterHasCapability(t, library.ClusterSigningKeyIsAvailable)
|
||||
namespaceName := library.GetEnv(t, "PINNIPED_NAMESPACE")
|
||||
deploymentName := library.GetEnv(t, "PINNIPED_APP_NAME")
|
||||
|
||||
|
@ -74,24 +74,25 @@ func TestCredentialIssuerConfig(t *testing.T) {
|
||||
|
||||
// Mutate the existing object. Don't delete it because that would mess up its `Status.Strategies` array,
|
||||
// since the reconciling controller is not currently responsible for that field.
|
||||
existingConfig.Status.KubeConfigInfo.Server = "https://junk"
|
||||
updatedServerValue := "https://junk"
|
||||
existingConfig.Status.KubeConfigInfo.Server = updatedServerValue
|
||||
updatedConfig, err := client.
|
||||
CrdV1alpha1().
|
||||
CredentialIssuerConfigs(namespaceName).
|
||||
Update(ctx, existingConfig, metav1.UpdateOptions{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "https://junk", updatedConfig.Status.KubeConfigInfo.Server)
|
||||
require.Equal(t, updatedServerValue, updatedConfig.Status.KubeConfigInfo.Server)
|
||||
|
||||
// Expect that the object's mutated field is set back to what matches its source of truth.
|
||||
// Expect that the object's mutated field is set back to what matches its source of truth by the controller.
|
||||
var actualCredentialIssuerConfig *crdpinnipedv1alpha1.CredentialIssuerConfig
|
||||
var getConfig = func() bool {
|
||||
var configChangesServerField = func() bool {
|
||||
actualCredentialIssuerConfig, err = client.
|
||||
CrdV1alpha1().
|
||||
CredentialIssuerConfigs(namespaceName).
|
||||
Get(ctx, "pinniped-config", metav1.GetOptions{})
|
||||
return err == nil
|
||||
return err == nil && actualCredentialIssuerConfig.Status.KubeConfigInfo.Server != updatedServerValue
|
||||
}
|
||||
assert.Eventually(t, getConfig, 5*time.Second, 100*time.Millisecond)
|
||||
assert.Eventually(t, configChangesServerField, 10*time.Second, 100*time.Millisecond)
|
||||
require.NoError(t, err) // prints out the error and stops the test in case of failure
|
||||
actualStatusKubeConfigInfo := actualCredentialIssuerConfig.Status.KubeConfigInfo
|
||||
require.Equal(t, expectedStatusKubeConfigInfo(config), actualStatusKubeConfigInfo)
|
||||
|
@ -25,51 +25,11 @@ import (
|
||||
"github.com/suzerain-io/pinniped/test/library"
|
||||
)
|
||||
|
||||
func makeRequest(t *testing.T, spec v1alpha1.CredentialRequestSpec) (*v1alpha1.CredentialRequest, error) {
|
||||
t.Helper()
|
||||
|
||||
client := library.NewAnonymousPinnipedClientset(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
return client.PinnipedV1alpha1().CredentialRequests().Create(ctx, &v1alpha1.CredentialRequest{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{},
|
||||
Spec: spec,
|
||||
}, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
func addTestClusterRoleBinding(ctx context.Context, t *testing.T, adminClient kubernetes.Interface, binding *rbacv1.ClusterRoleBinding) {
|
||||
_, err := adminClient.RbacV1().ClusterRoleBindings().Get(ctx, binding.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
// "404 not found" errors are acceptable, but others would be unexpected
|
||||
statusError, isStatus := err.(*errors.StatusError)
|
||||
require.True(t, isStatus)
|
||||
require.Equal(t, http.StatusNotFound, int(statusError.Status().Code))
|
||||
|
||||
_, err = adminClient.RbacV1().ClusterRoleBindings().Create(ctx, binding, metav1.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
err = adminClient.RbacV1().ClusterRoleBindings().Delete(ctx, binding.Name, metav1.DeleteOptions{})
|
||||
require.NoError(t, err, "Test failed to clean up after itself")
|
||||
})
|
||||
}
|
||||
|
||||
func TestSuccessfulCredentialRequest(t *testing.T) {
|
||||
library.SkipUnlessIntegration(t)
|
||||
library.SkipUnlessClusterHasCapability(t, library.ClusterSigningKeyIsAvailable)
|
||||
tmcClusterToken := library.GetEnv(t, "PINNIPED_TMC_CLUSTER_TOKEN")
|
||||
|
||||
response, err := makeRequest(t, v1alpha1.CredentialRequestSpec{
|
||||
Type: v1alpha1.TokenCredentialType,
|
||||
Token: &v1alpha1.CredentialRequestTokenCredential{Value: tmcClusterToken},
|
||||
})
|
||||
|
||||
response, err := makeRequest(t, validCredentialRequestSpecWithRealToken(t))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Note: If this assertion fails then your TMC token might have expired. Get a fresh one and try again.
|
||||
@ -194,6 +154,63 @@ func TestCredentialRequest_ShouldFailWhenRequestDoesNotIncludeToken(t *testing.T
|
||||
require.Nil(t, response.Status.Credential)
|
||||
}
|
||||
|
||||
func TestCredentialRequest_OtherwiseValidRequestWithRealTokenShouldFailWhenTheClusterIsNotCapable(t *testing.T) {
|
||||
library.SkipUnlessIntegration(t)
|
||||
library.SkipWhenClusterHasCapability(t, library.ClusterSigningKeyIsAvailable)
|
||||
|
||||
response, err := makeRequest(t, validCredentialRequestSpecWithRealToken(t))
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Empty(t, response.Spec)
|
||||
require.Nil(t, response.Status.Credential)
|
||||
require.Equal(t, stringPtr("authentication failed"), response.Status.Message)
|
||||
}
|
||||
|
||||
func makeRequest(t *testing.T, spec v1alpha1.CredentialRequestSpec) (*v1alpha1.CredentialRequest, error) {
|
||||
t.Helper()
|
||||
|
||||
client := library.NewAnonymousPinnipedClientset(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
return client.PinnipedV1alpha1().CredentialRequests().Create(ctx, &v1alpha1.CredentialRequest{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{},
|
||||
Spec: spec,
|
||||
}, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
func validCredentialRequestSpecWithRealToken(t *testing.T) v1alpha1.CredentialRequestSpec {
|
||||
tmcClusterToken := library.GetEnv(t, "PINNIPED_TMC_CLUSTER_TOKEN")
|
||||
|
||||
return v1alpha1.CredentialRequestSpec{
|
||||
Type: v1alpha1.TokenCredentialType,
|
||||
Token: &v1alpha1.CredentialRequestTokenCredential{Value: tmcClusterToken},
|
||||
}
|
||||
}
|
||||
|
||||
func addTestClusterRoleBinding(ctx context.Context, t *testing.T, adminClient kubernetes.Interface, binding *rbacv1.ClusterRoleBinding) {
|
||||
_, err := adminClient.RbacV1().ClusterRoleBindings().Get(ctx, binding.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
// "404 not found" errors are acceptable, but others would be unexpected
|
||||
statusError, isStatus := err.(*errors.StatusError)
|
||||
require.True(t, isStatus)
|
||||
require.Equal(t, http.StatusNotFound, int(statusError.Status().Code))
|
||||
|
||||
_, err = adminClient.RbacV1().ClusterRoleBindings().Create(ctx, binding, metav1.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
err = adminClient.RbacV1().ClusterRoleBindings().Delete(ctx, binding.Name, metav1.DeleteOptions{})
|
||||
require.NoError(t, err, "Test failed to clean up after itself")
|
||||
})
|
||||
}
|
||||
|
||||
func stringPtr(s string) *string {
|
||||
return &s
|
||||
}
|
||||
|
@ -10,8 +10,8 @@ import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
"github.com/stretchr/testify/require"
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
type TestClusterCapability string
|
||||
@ -56,3 +56,10 @@ func SkipUnlessClusterHasCapability(t *testing.T, capability TestClusterCapabili
|
||||
t.Skipf(`skipping integration test because cluster lacks the "%s" capability`, capability)
|
||||
}
|
||||
}
|
||||
|
||||
func SkipWhenClusterHasCapability(t *testing.T, capability TestClusterCapability) {
|
||||
t.Helper()
|
||||
if ClusterHasCapability(t, capability) {
|
||||
t.Skipf(`skipping integration test because cluster has the "%s" capability`, capability)
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user