2020-09-16 14:19:51 +00:00
|
|
|
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0
|
2020-08-11 17:14:57 +00:00
|
|
|
|
|
|
|
package integration
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/stretchr/testify/assert"
|
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
2020-08-19 20:15:45 +00:00
|
|
|
"k8s.io/client-go/kubernetes"
|
2020-08-11 17:14:57 +00:00
|
|
|
|
2020-09-18 22:15:04 +00:00
|
|
|
loginv1alpha1 "go.pinniped.dev/generated/1.19/apis/login/v1alpha1"
|
2020-09-18 19:56:24 +00:00
|
|
|
"go.pinniped.dev/internal/testutil"
|
|
|
|
"go.pinniped.dev/test/library"
|
2020-08-11 17:14:57 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func TestAPIServingCertificateAutoCreationAndRotation(t *testing.T) {
|
2020-09-24 22:51:43 +00:00
|
|
|
env := library.IntegrationEnv(t)
|
2020-10-09 21:25:34 +00:00
|
|
|
defaultServingCertResourceName := env.ConciergeAppName + "-api-tls-serving-certificate"
|
Rename many of resources that are created in Kubernetes by Pinniped
New resource naming conventions:
- Do not repeat the Kind in the name,
e.g. do not call it foo-cluster-role-binding, just call it foo
- Names will generally start with a prefix to identify our component,
so when a user lists all objects of that kind, they can tell to which
component it is related,
e.g. `kubectl get configmaps` would list one named "pinniped-config"
- It should be possible for an operator to make the word "pinniped"
mostly disappear if they choose, by specifying the app_name in
values.yaml, to the extent that is practical (but not from APIService
names because those are hardcoded in golang)
- Each role/clusterrole and its corresponding binding have the same name
- Pinniped resource names that must be known by the server golang code
are passed to the code at run time via ConfigMap, rather than
hardcoded in the golang code. This also allows them to be prepended
with the app_name from values.yaml while creating the ConfigMap.
- Since the CLI `get-kubeconfig` command cannot guess the name of the
CredentialIssuerConfig resource in advance anymore, it lists all
CredentialIssuerConfig in the app's namespace and returns an error
if there is not exactly one found, and then uses that one regardless
of its name
2020-09-18 22:56:50 +00:00
|
|
|
|
2020-08-19 20:15:45 +00:00
|
|
|
tests := []struct {
|
|
|
|
name string
|
|
|
|
forceRotation func(context.Context, kubernetes.Interface, string) error
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "manual",
|
|
|
|
forceRotation: func(
|
|
|
|
ctx context.Context,
|
|
|
|
kubeClient kubernetes.Interface,
|
|
|
|
namespace string,
|
|
|
|
) error {
|
|
|
|
// Delete the Secret, simulating an end user doing `kubectl delete` to manually ask for an immediate rotation.
|
|
|
|
return kubeClient.
|
|
|
|
CoreV1().
|
|
|
|
Secrets(namespace).
|
Rename many of resources that are created in Kubernetes by Pinniped
New resource naming conventions:
- Do not repeat the Kind in the name,
e.g. do not call it foo-cluster-role-binding, just call it foo
- Names will generally start with a prefix to identify our component,
so when a user lists all objects of that kind, they can tell to which
component it is related,
e.g. `kubectl get configmaps` would list one named "pinniped-config"
- It should be possible for an operator to make the word "pinniped"
mostly disappear if they choose, by specifying the app_name in
values.yaml, to the extent that is practical (but not from APIService
names because those are hardcoded in golang)
- Each role/clusterrole and its corresponding binding have the same name
- Pinniped resource names that must be known by the server golang code
are passed to the code at run time via ConfigMap, rather than
hardcoded in the golang code. This also allows them to be prepended
with the app_name from values.yaml while creating the ConfigMap.
- Since the CLI `get-kubeconfig` command cannot guess the name of the
CredentialIssuerConfig resource in advance anymore, it lists all
CredentialIssuerConfig in the app's namespace and returns an error
if there is not exactly one found, and then uses that one regardless
of its name
2020-09-18 22:56:50 +00:00
|
|
|
Delete(ctx, defaultServingCertResourceName, metav1.DeleteOptions{})
|
2020-08-11 17:14:57 +00:00
|
|
|
},
|
2020-08-19 20:15:45 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "automatic",
|
|
|
|
forceRotation: func(
|
|
|
|
ctx context.Context,
|
|
|
|
kubeClient kubernetes.Interface,
|
|
|
|
namespace string,
|
|
|
|
) error {
|
|
|
|
// Create a cert that is expired - this should force the rotation controller
|
|
|
|
// to delete the cert, and therefore the cert should get rotated.
|
|
|
|
secret, err := kubeClient.
|
|
|
|
CoreV1().
|
|
|
|
Secrets(namespace).
|
Rename many of resources that are created in Kubernetes by Pinniped
New resource naming conventions:
- Do not repeat the Kind in the name,
e.g. do not call it foo-cluster-role-binding, just call it foo
- Names will generally start with a prefix to identify our component,
so when a user lists all objects of that kind, they can tell to which
component it is related,
e.g. `kubectl get configmaps` would list one named "pinniped-config"
- It should be possible for an operator to make the word "pinniped"
mostly disappear if they choose, by specifying the app_name in
values.yaml, to the extent that is practical (but not from APIService
names because those are hardcoded in golang)
- Each role/clusterrole and its corresponding binding have the same name
- Pinniped resource names that must be known by the server golang code
are passed to the code at run time via ConfigMap, rather than
hardcoded in the golang code. This also allows them to be prepended
with the app_name from values.yaml while creating the ConfigMap.
- Since the CLI `get-kubeconfig` command cannot guess the name of the
CredentialIssuerConfig resource in advance anymore, it lists all
CredentialIssuerConfig in the app's namespace and returns an error
if there is not exactly one found, and then uses that one regardless
of its name
2020-09-18 22:56:50 +00:00
|
|
|
Get(ctx, defaultServingCertResourceName, metav1.GetOptions{})
|
2020-08-19 20:15:45 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-08-20 19:17:18 +00:00
|
|
|
secret.Data["tlsCertificateChain"], err = createExpiredCertificate()
|
2020-08-19 20:15:45 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err = kubeClient.
|
|
|
|
CoreV1().
|
|
|
|
Secrets(namespace).
|
|
|
|
Update(ctx, secret, metav1.UpdateOptions{})
|
|
|
|
return err
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
for _, test := range tests {
|
|
|
|
test := test
|
|
|
|
t.Run(test.name, func(t *testing.T) {
|
|
|
|
kubeClient := library.NewClientset(t)
|
|
|
|
aggregatedClient := library.NewAggregatedClientset(t)
|
2020-08-20 17:54:15 +00:00
|
|
|
pinnipedClient := library.NewPinnipedClientset(t)
|
2020-08-19 20:15:45 +00:00
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
|
|
|
defer cancel()
|
|
|
|
|
2020-09-18 22:15:04 +00:00
|
|
|
const apiServiceName = "v1alpha1.login.pinniped.dev"
|
2020-08-19 20:15:45 +00:00
|
|
|
|
|
|
|
// Get the initial auto-generated version of the Secret.
|
2020-10-09 17:11:47 +00:00
|
|
|
secret, err := kubeClient.CoreV1().Secrets(env.ConciergeNamespace).Get(ctx, defaultServingCertResourceName, metav1.GetOptions{})
|
2020-08-19 20:15:45 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
initialCACert := secret.Data["caCertificate"]
|
|
|
|
initialPrivateKey := secret.Data["tlsPrivateKey"]
|
|
|
|
initialCertChain := secret.Data["tlsCertificateChain"]
|
|
|
|
require.NotEmpty(t, initialCACert)
|
|
|
|
require.NotEmpty(t, initialPrivateKey)
|
|
|
|
require.NotEmpty(t, initialCertChain)
|
|
|
|
|
|
|
|
// Check that the APIService has the same CA.
|
|
|
|
apiService, err := aggregatedClient.ApiregistrationV1().APIServices().Get(ctx, apiServiceName, metav1.GetOptions{})
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, initialCACert, apiService.Spec.CABundle)
|
|
|
|
|
|
|
|
// Force rotation to happen.
|
2020-10-09 17:11:47 +00:00
|
|
|
require.NoError(t, test.forceRotation(ctx, kubeClient, env.ConciergeNamespace))
|
2020-08-19 20:15:45 +00:00
|
|
|
|
|
|
|
// Expect that the Secret comes back right away with newly minted certs.
|
|
|
|
secretIsRegenerated := func() bool {
|
2020-10-09 17:11:47 +00:00
|
|
|
secret, err = kubeClient.CoreV1().Secrets(env.ConciergeNamespace).Get(ctx, defaultServingCertResourceName, metav1.GetOptions{})
|
2020-08-19 20:15:45 +00:00
|
|
|
return err == nil
|
|
|
|
}
|
|
|
|
assert.Eventually(t, secretIsRegenerated, 10*time.Second, 250*time.Millisecond)
|
2020-08-25 01:07:34 +00:00
|
|
|
require.NoError(t, err) // prints out the error and stops the test in case of failure
|
2020-08-19 20:15:45 +00:00
|
|
|
regeneratedCACert := secret.Data["caCertificate"]
|
|
|
|
regeneratedPrivateKey := secret.Data["tlsPrivateKey"]
|
|
|
|
regeneratedCertChain := secret.Data["tlsCertificateChain"]
|
|
|
|
require.NotEmpty(t, regeneratedCACert)
|
|
|
|
require.NotEmpty(t, regeneratedPrivateKey)
|
|
|
|
require.NotEmpty(t, regeneratedCertChain)
|
|
|
|
require.NotEqual(t, initialCACert, regeneratedCACert)
|
|
|
|
require.NotEqual(t, initialPrivateKey, regeneratedPrivateKey)
|
|
|
|
require.NotEqual(t, initialCertChain, regeneratedCertChain)
|
|
|
|
|
|
|
|
// Expect that the APIService was also updated with the new CA.
|
|
|
|
aggregatedAPIUpdated := func() bool {
|
|
|
|
apiService, err = aggregatedClient.ApiregistrationV1().APIServices().Get(ctx, apiServiceName, metav1.GetOptions{})
|
|
|
|
return err == nil
|
|
|
|
}
|
|
|
|
assert.Eventually(t, aggregatedAPIUpdated, 10*time.Second, 250*time.Millisecond)
|
2020-08-25 01:07:34 +00:00
|
|
|
require.NoError(t, err) // prints out the error and stops the test in case of failure
|
2020-08-19 20:15:45 +00:00
|
|
|
require.Equal(t, regeneratedCACert, apiService.Spec.CABundle)
|
|
|
|
|
|
|
|
// Check that we can still make requests to the aggregated API through the kube API server,
|
|
|
|
// because the kube API server uses these certs when proxying requests to the aggregated API server,
|
|
|
|
// so this is effectively checking that the aggregated API server is using these new certs.
|
2020-08-28 14:16:57 +00:00
|
|
|
// We ensure that 10 straight requests succeed so that we filter out false positives where a single
|
|
|
|
// pod has rotated their cert, but not the other ones sitting behind the service.
|
2020-08-19 20:15:45 +00:00
|
|
|
aggregatedAPIWorking := func() bool {
|
2020-08-28 14:16:57 +00:00
|
|
|
for i := 0; i < 10; i++ {
|
2020-10-09 17:11:47 +00:00
|
|
|
_, err = pinnipedClient.LoginV1alpha1().TokenCredentialRequests(env.ConciergeNamespace).Create(ctx, &loginv1alpha1.TokenCredentialRequest{
|
2020-08-28 14:16:57 +00:00
|
|
|
TypeMeta: metav1.TypeMeta{},
|
|
|
|
ObjectMeta: metav1.ObjectMeta{},
|
2020-09-18 22:15:04 +00:00
|
|
|
Spec: loginv1alpha1.TokenCredentialRequestSpec{Token: "not a good token"},
|
2020-08-28 14:16:57 +00:00
|
|
|
}, metav1.CreateOptions{})
|
|
|
|
if err != nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2020-08-19 20:15:45 +00:00
|
|
|
// Should have got a success response with an error message inside it complaining about the token value.
|
|
|
|
return err == nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Unfortunately, although our code changes all the certs immediately, it seems to take ~1 minute for
|
|
|
|
// the API machinery to notice that we updated our serving cert, causing 1 minute of downtime for our endpoint.
|
|
|
|
assert.Eventually(t, aggregatedAPIWorking, 2*time.Minute, 250*time.Millisecond)
|
2020-08-25 01:07:34 +00:00
|
|
|
require.NoError(t, err) // prints out the error and stops the test in case of failure
|
2020-08-19 20:15:45 +00:00
|
|
|
})
|
2020-08-11 17:14:57 +00:00
|
|
|
}
|
2020-08-19 20:15:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func createExpiredCertificate() ([]byte, error) {
|
|
|
|
return testutil.CreateCertificate(
|
2020-08-20 22:14:07 +00:00
|
|
|
time.Now().Add(-24*time.Hour), // notBefore
|
|
|
|
time.Now().Add(-time.Hour), // notAfter
|
2020-08-19 20:15:45 +00:00
|
|
|
)
|
2020-08-11 17:14:57 +00:00
|
|
|
}
|