Merge pull request #122 from ankeesler/1-19-exec-strategy

Fix exec strategy for Kubernetes 1.19
This commit is contained in:
Ryan Richard 2020-09-24 10:32:43 -07:00 committed by GitHub
commit 409d10baf8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
48 changed files with 4231 additions and 1161 deletions

View File

@ -37,7 +37,7 @@ import (
"go.pinniped.dev/internal/constable"
"go.pinniped.dev/internal/controller/apicerts"
"go.pinniped.dev/internal/controllerlib"
"go.pinniped.dev/internal/provider"
"go.pinniped.dev/internal/dynamiccert"
)
const (
@ -53,12 +53,12 @@ const (
)
type webhook struct {
certProvider provider.DynamicTLSServingCertProvider
certProvider dynamiccert.Provider
secretInformer corev1informers.SecretInformer
}
func newWebhook(
certProvider provider.DynamicTLSServingCertProvider,
certProvider dynamiccert.Provider,
secretInformer corev1informers.SecretInformer,
) *webhook {
return &webhook{
@ -295,7 +295,7 @@ func newK8sClient() (kubernetes.Interface, error) {
func startControllers(
ctx context.Context,
dynamicCertProvider provider.DynamicTLSServingCertProvider,
dynamicCertProvider dynamiccert.Provider,
kubeClient kubernetes.Interface,
kubeInformers kubeinformers.SharedInformerFactory,
) {
@ -339,7 +339,7 @@ func startControllers(
func startWebhook(
ctx context.Context,
l net.Listener,
dynamicCertProvider provider.DynamicTLSServingCertProvider,
dynamicCertProvider dynamiccert.Provider,
secretInformer corev1informers.SecretInformer,
) error {
return newWebhook(dynamicCertProvider, secretInformer).start(ctx, l)
@ -366,7 +366,7 @@ func run() error {
kubeinformers.WithNamespace(namespace),
)
dynamicCertProvider := provider.NewDynamicTLSServingCertProvider()
dynamicCertProvider := dynamiccert.New()
startControllers(ctx, dynamicCertProvider, kubeClient, kubeInformers)
klog.InfoS("controllers are ready")

View File

@ -33,7 +33,7 @@ import (
kubernetesfake "k8s.io/client-go/kubernetes/fake"
"go.pinniped.dev/internal/certauthority"
"go.pinniped.dev/internal/provider"
"go.pinniped.dev/internal/dynamiccert"
)
func TestWebhook(t *testing.T) {
@ -459,10 +459,10 @@ func createSecretInformer(t *testing.T, kubeClient kubernetes.Interface) corev1i
return secretInformer
}
// newClientProvider returns a provider.DynamicTLSServingCertProvider configured
// newClientProvider returns a dynamiccert.Provider configured
// with valid serving cert, the CA bundle that can be used to verify the serving
// cert, and the server name that can be used to verify the TLS peer.
func newCertProvider(t *testing.T) (provider.DynamicTLSServingCertProvider, []byte, string) {
func newCertProvider(t *testing.T) (dynamiccert.Provider, []byte, string) {
t.Helper()
serverName := "local-user-authenticator"
@ -476,7 +476,7 @@ func newCertProvider(t *testing.T) (provider.DynamicTLSServingCertProvider, []by
certPEM, keyPEM, err := certauthority.ToPEM(cert)
require.NoError(t, err)
certProvider := provider.NewDynamicTLSServingCertProvider()
certProvider := dynamiccert.New()
certProvider.Set(certPEM, keyPEM)
return certProvider, ca.Bundle(), serverName

View File

@ -38,6 +38,13 @@ data:
servingCertificateSecret: (@= data.values.app_name + "-api-tls-serving-certificate" @)
credentialIssuerConfig: (@= data.values.app_name + "-config" @)
apiService: (@= data.values.app_name + "-api" @)
kubeCertAgent:
namePrefix: (@= data.values.app_name + "-kube-cert-agent-" @)
(@ if data.values.image_digest: @)
image: (@= data.values.image_repo + "@" + data.values.image_digest @)
(@ else: @)
image: (@= data.values.image_repo + ":" + data.values.image_tag @)
(@ end @)
---
#@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "":
apiVersion: v1

View File

@ -47,6 +47,14 @@ rules:
- apiGroups: [""]
resources: [secrets]
verbs: [create, get, list, patch, update, watch, delete]
#! We need to be able to CRUD pods in our namespace so we can reconcile the kube-cert-agent pods.
- apiGroups: [""]
resources: [pods]
verbs: [create, get, list, patch, update, watch, delete]
#! We need to be able to exec into pods in our namespace so we can grab the API server's private key
- apiGroups: [""]
resources: [pods/exec]
verbs: [create]
- apiGroups: [config.pinniped.dev, idp.pinniped.dev]
resources: ["*"]
verbs: [create, get, list, update, watch]
@ -65,25 +73,22 @@ roleRef:
name: #@ data.values.app_name + "-aggregated-api-server"
apiGroup: rbac.authorization.k8s.io
#! Give permission to list pods and pod exec in the kube-system namespace so we can find the API server's private key
#! Give permission to read pods in the kube-system namespace so we can find the API server's private key
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: #@ data.values.app_name + "-kube-system-pod-exec"
name: #@ data.values.app_name + "-kube-system-pod-read"
namespace: kube-system
rules:
- apiGroups: [""]
resources: [pods]
verbs: [get, list]
- apiGroups: [""]
resources: [pods/exec]
verbs: [create]
verbs: [get, list, watch]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: #@ data.values.app_name + "-kube-system-pod-exec"
name: #@ data.values.app_name + "-kube-system-pod-read"
namespace: kube-system
subjects:
- kind: ServiceAccount
@ -91,7 +96,7 @@ subjects:
namespace: #@ data.values.namespace
roleRef:
kind: Role
name: #@ data.values.app_name + "-kube-system-pod-exec"
name: #@ data.values.app_name + "-kube-system-pod-read"
apiGroup: rbac.authorization.k8s.io
#! Allow both authenticated and unauthenticated TokenCredentialRequests (i.e. allow all requests)

View File

@ -0,0 +1,39 @@
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Package dynamiccertauthority implements a x509 certificate authority capable of issuing
// certificates from a dynamically updating CA keypair.
package dynamiccertauthority
import (
"crypto/x509/pkix"
"time"
"go.pinniped.dev/internal/certauthority"
"go.pinniped.dev/internal/dynamiccert"
)
// CA is a type capable of issuing certificates.
type CA struct {
provider dynamiccert.Provider
}
// New creates a new CA, ready to issue certs whenever the provided provider has a keypair to
// provide.
func New(provider dynamiccert.Provider) *CA {
return &CA{
provider: provider,
}
}
// IssuePEM issues a new server certificate for the given identity and duration, returning it as a
// pair of PEM-formatted byte slices for the certificate and private key.
func (c *CA) IssuePEM(subject pkix.Name, dnsNames []string, ttl time.Duration) ([]byte, []byte, error) {
caCrtPEM, caKeyPEM := c.provider.CurrentCertKeyContent()
ca, err := certauthority.Load(string(caCrtPEM), string(caKeyPEM))
if err != nil {
return nil, nil, err
}
return ca.IssuePEM(subject, dnsNames, ttl)
}

View File

@ -0,0 +1,127 @@
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package dynamiccertauthority
import (
"crypto/x509/pkix"
"io/ioutil"
"testing"
"time"
"github.com/stretchr/testify/require"
"go.pinniped.dev/internal/dynamiccert"
"go.pinniped.dev/internal/testutil"
)
func TestCAIssuePEM(t *testing.T) {
t.Parallel()
provider := dynamiccert.New()
ca := New(provider)
steps := []struct {
name string
caCrtPath, caKeyPath string
wantError string
}{
{
name: "no cert+key",
wantError: "could not load CA: tls: failed to find any PEM data in certificate input",
},
{
name: "only cert",
caCrtPath: "testdata/ca-0.crt",
wantError: "could not load CA: tls: failed to find any PEM data in key input",
},
{
name: "only key",
caKeyPath: "testdata/ca-0.key",
wantError: "could not load CA: tls: failed to find any PEM data in certificate input",
},
{
name: "new cert+key",
caCrtPath: "testdata/ca-0.crt",
caKeyPath: "testdata/ca-0.key",
},
{
name: "same cert+key",
},
{
name: "another new cert+key",
caCrtPath: "testdata/ca-1.crt",
caKeyPath: "testdata/ca-1.key",
},
{
name: "bad cert",
caCrtPath: "testdata/ca-bad.crt",
caKeyPath: "testdata/ca-0.key",
wantError: "could not load CA: tls: failed to find any PEM data in certificate input",
},
{
name: "bad key",
caCrtPath: "testdata/ca-0.crt",
caKeyPath: "testdata/ca-bad.key",
wantError: "could not load CA: tls: failed to find any PEM data in key input",
},
{
name: "mismatch cert+key",
caCrtPath: "testdata/ca-0.crt",
caKeyPath: "testdata/ca-1.key",
wantError: "could not load CA: tls: private key does not match public key",
},
{
name: "good cert+key again",
caCrtPath: "testdata/ca-1.crt",
caKeyPath: "testdata/ca-1.key",
},
}
for _, step := range steps {
step := step
t.Run(step.name, func(t *testing.T) {
var caCrtPEM, caKeyPEM []byte
var err error
if step.caCrtPath != "" {
caCrtPEM, err = ioutil.ReadFile(step.caCrtPath)
require.NoError(t, err)
}
if step.caKeyPath != "" {
caKeyPEM, err = ioutil.ReadFile(step.caKeyPath)
require.NoError(t, err)
}
if step.caCrtPath != "" || step.caKeyPath != "" {
provider.Set(caCrtPEM, caKeyPEM)
} else {
caCrtPEM, _ = provider.CurrentCertKeyContent()
}
crtPEM, keyPEM, err := ca.IssuePEM(
pkix.Name{
CommonName: "some-common-name",
},
[]string{"some-dns-name", "some-other-dns-name"},
time.Hour*24,
)
if step.wantError != "" {
require.EqualError(t, err, step.wantError)
require.Empty(t, crtPEM)
require.Empty(t, keyPEM)
} else {
require.NoError(t, err)
require.NotEmpty(t, crtPEM)
require.NotEmpty(t, keyPEM)
crtAssertions := testutil.ValidateCertificate(t, string(caCrtPEM), string(crtPEM))
crtAssertions.RequireCommonName("some-common-name")
crtAssertions.RequireDNSName("some-dns-name")
crtAssertions.RequireDNSName("some-other-dns-name")
crtAssertions.RequireLifetime(time.Now(), time.Now().Add(time.Hour*24), time.Minute*10)
crtAssertions.RequireMatchesPrivateKey(string(keyPEM))
}
})
}
}

View File

@ -0,0 +1,17 @@
-----BEGIN CERTIFICATE-----
MIICxjCCAa4CCQCawPQa0TyOmDANBgkqhkiG9w0BAQsFADAlMQ0wCwYDVQQDDAR0
dW5hMRQwEgYDVQQKDAtmaXNoLG1hcmxpbjAeFw0yMDA5MjMxMzMwMzZaFw0yMDEw
MjMxMzMwMzZaMCUxDTALBgNVBAMMBHR1bmExFDASBgNVBAoMC2Zpc2gsbWFybGlu
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxwLD08r2ZWh9FMGqq82O
CwraRKafmdgFk0qTpWS48di2A/Jz5R0zgx0fHYTs+pCL5FhfpDhTCYGTWMoXI2bW
7n6prDwrfNE+bsKdekEHM8MugdSFm2cPJONS7upIGODDIvK3zVGCg4Y0H/nE5Zf2
g+GrrY31a4tWYkS9b2J7ZxQlQ2ixsxICHvDN1JYwg6a90txOKJAJBK9lxtcY1//p
Dx0v/norGt92chMRq/QZYDELs2h2KoQeXmPYR7/MZYCavjV3Dcc55ueUksTQjusI
INYiYh/Jm3IKBSUjl0TjQaJz/q4+JOPb1D7aQIggOTuCTPXb858DHW0lgo64H3Zf
nQIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQB7qwje3H5Aw3OFssPuc7cIUrW7ohU9
mcKAq5T5KWRsX3buCkLRJO9BOhBpRIM2jcHSBZupC/JiwFwLMCA5/l0GE++9WLFE
9ta2eTjTV5Og7wwpWlljD0uqZ/IROEV206Nvs0k3L3XhM1eoz3MESh7Esmr3ZSwK
pNATzBhvhoYNRMYBM4Yy3poBzweLFlLnNW98jI4Gp+A/fA38cqVdmNXFsfNSpesu
nesMwBleDec2o8adOs3LtXhfufDLVrmpRJgYqRhgQe1X+PicdCeGRAEN+fH4PF2i
qza2f08YEWiobqdVsN9oIWupyobPrfEnuLjj1xYNSf4LTKvVUqeK7F8H
-----END CERTIFICATE-----

View File

@ -0,0 +1,28 @@
-----BEGIN PRIVATE KEY-----
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDHAsPTyvZlaH0U
waqrzY4LCtpEpp+Z2AWTSpOlZLjx2LYD8nPlHTODHR8dhOz6kIvkWF+kOFMJgZNY
yhcjZtbufqmsPCt80T5uwp16QQczwy6B1IWbZw8k41Lu6kgY4MMi8rfNUYKDhjQf
+cTll/aD4autjfVri1ZiRL1vYntnFCVDaLGzEgIe8M3UljCDpr3S3E4okAkEr2XG
1xjX/+kPHS/+eisa33ZyExGr9BlgMQuzaHYqhB5eY9hHv8xlgJq+NXcNxznm55SS
xNCO6wgg1iJiH8mbcgoFJSOXRONBonP+rj4k49vUPtpAiCA5O4JM9dvznwMdbSWC
jrgfdl+dAgMBAAECggEATe9OQyXLomDuAu/o54kDJF3mplXeUMAhRtJAydeM+eEV
Pqx3KHVsE9+WrNe+ek2bCxx0r/oCwslEq9PQheOjLhjaV4HkweernHKIezT2HhZE
o0FX5UjRzG9drgR/oYZ7xLbqtfRCuUEYLqGAsyz0tCbvNXNPdgNGHAuxZ+pYS0AN
6DPOJKG1gUNSAzjU7U6g1UtOxr/qXVSZvBM+F7nTsbRKlnq/RYvLvfGLfnkQZvom
QxcyZmiC6fgmrpiCPN9poUKmB6jt3IWQJ9qyxadWI612fYrHWMEcXgNcxxOvCxyI
S1cGAq9Vw51pvGLlXsGzci3i4qemEDl8TDIp46C0oQKBgQD3ImywgoJn+IvTwtml
vFNfJ3s/J61SFv32u4iR9lim118mzvOi7FphjCfCj72cppfiuzI/F3Bsv0eR2qv9
9eXahvQe8egZvWPnW+f2GRYOAbHq/SmYA5kH2hHXOPHdJvfYj+fnEU+y6XXbFtp6
MLvmbgrPL6le1UHri3XPpcOjNQKBgQDOJmS+yK+ozZTpkNXYq2GNaVr5LaUhOePV
y+hvNj1IlhU7R97PcBe59izO73sYIUeOCmGM2cX3zGy7+upSyT2K3+Jh5BVpnwME
OqyrYFXfMJxwf9SuD9GJRiOtxtglBxcjNZEcninV1qynuhC9SaHjyhPnzcyZR/bB
Ipy4+mKvyQKBgQDktqs4P9BIQLHHbRDYXY4bBgL9086HplerPKuLyh0Ja1DYLbc1
FOAgXwF9AmJM07DFWNGLqjmVqwClb2a1RhlPQI41BVP0Xl5TA6+NBnJuzArImzsf
QFUj+yF/uWe9cA74EVZhdpf30DAObvwLEYcUHstKK6Xn6h1zzEFfxt1j8QKBgHjr
2Gh4e2E+xbyDcoyXaq9yPySue5ATzurXos2pOSVcs7OEItP24lP4bKwtmTy8OKa+
vB3Ml+0Ugit5sP1CgdD2JmpZSZ4c4b4XVLVp2rUCZKEwumYnbTdiZwdF/f8qO44x
m573v35pX/k6kRsXF9jv7eEovHyk077SOK/gXwbRAoGBAO5HfelAfPG4CFZdvj5z
sCCf8rr1Jpyk+1hILrI8hQUriL3bwStRppQHJajfswjZKP9pjyJXi/cWp7DvKBpo
DFGikRWzbSP/xkRNRW35bvGlcy8oit395dQJZOemrf9KoHQPLGT2pEnLDtqOtJAQ
YsquGd2q0TmDBVolI62cHjr2
-----END PRIVATE KEY-----

View File

@ -0,0 +1,17 @@
-----BEGIN CERTIFICATE-----
MIICxjCCAa4CCQC6B88ml9yfhzANBgkqhkiG9w0BAQsFADAlMQ0wCwYDVQQDDAR0
dW5hMRQwEgYDVQQKDAtmaXNoLG1hcmxpbjAeFw0yMDA5MjMxMzMwNDRaFw0yMDEw
MjMxMzMwNDRaMCUxDTALBgNVBAMMBHR1bmExFDASBgNVBAoMC2Zpc2gsbWFybGlu
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4rabilzx1UHfD52mRquC
S1t7YyeTrPWgS2VgUgD2NcghL0TL85P17KUh+OcEJ5N5QQMXgs8WMotnP786pg/Z
yX9U+KGAMIRla3k+YgX5qd45nN4ZMsCspK2gnfQbqbaPKA6oXRxXbqgrVy+kTx0/
QylSHrwOEZacObPhQ5wM+RGlkSCUJ4hlRr6POOLOSwqlcw7Uxw0I482FTK11jAaY
0EgItpW0ROsv7j6RhomGGGSuPa58qe9zwzz9nR8urnCJ4m+5Kw/hae3FxTSrOMmc
OiOXwyzEjihTax7Afvpe9rfjjXLZpz7EXyCmeA0BlFwjvuDx2oZ+/WitC/zUMKvM
XQIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQCA0DJsNEJC6sZletC+c6qHRKRjJDMY
0nsXaV6rFPASk+956rI+1DtBuc54UmhgO+VhoVda6lYeBUiXC7lMSU5T9MtFOgLA
OhWVSwL9XveLOEc2JrBaI8NKG2V9E0b5d6TJeIvxotv2En0gJIvachQy4JwM3fKr
mITVqeqvhWczLoFNIXmwYpIwqzhF6t2QmV7ecwXi8g0hoX9d9JVDs6qtvudlplIz
TjefZ6tNpNpXKBQCSI44ZydyNelz5q/A9j7iyG9tmU8jrCouii2DkUoFblrX/x+Q
bZ7was9gqUshQ3IHBxfynuNkKvT0VMSfpBFcTipW7QVFatCxZHb8idFD
-----END CERTIFICATE-----

View File

@ -0,0 +1,28 @@
-----BEGIN PRIVATE KEY-----
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDitpuKXPHVQd8P
naZGq4JLW3tjJ5Os9aBLZWBSAPY1yCEvRMvzk/XspSH45wQnk3lBAxeCzxYyi2c/
vzqmD9nJf1T4oYAwhGVreT5iBfmp3jmc3hkywKykraCd9Bupto8oDqhdHFduqCtX
L6RPHT9DKVIevA4Rlpw5s+FDnAz5EaWRIJQniGVGvo844s5LCqVzDtTHDQjjzYVM
rXWMBpjQSAi2lbRE6y/uPpGGiYYYZK49rnyp73PDPP2dHy6ucInib7krD+Fp7cXF
NKs4yZw6I5fDLMSOKFNrHsB++l72t+ONctmnPsRfIKZ4DQGUXCO+4PHahn79aK0L
/NQwq8xdAgMBAAECggEADnosuowu4ThgnyWpDZA8NMW1vmIzmvLgdpAVs4beMhcC
j3ssLvS+2oq8/aD68fAH6S/iW3cP6tBeGoMCosIWXTilW28suWxq8Y7/fdD13XQU
Z8EDCOum2qk/vXZuIctHnv44oCGEL8vLYLjpHCg49vf3h4DowRTGCyVCeEfJyEnK
ZUZzKtmohS88IhOfOoNC8qvXGhdJmbNmFngJqW1Z+4qozxpAYzjr4HwqFJF/YeQh
xKxjKSS5ikxdPqySdRxuUsIqFe72ZPIVo2NxVPkY+w75DZAqKDkemrDETM8TirMb
3n1lEabpiY1kWTF+UcthKhN4RV4RwYIRNFw/kpS7vQKBgQD77OPOSBsHqEw+Gykd
kVhTXJxspYI8pNQft1HvnlCNAEUsdT5jSW9O2PGjkUwsKQvSXGuMmYtnHdRv84G7
/or+yyZw2P3T1V01220Vj+fQh2YCrHqSEcmHlA7XSZblj4qs9rOrj5jRQbM8T78H
r09bum1wxIjHdRebPDCUATwFNwKBgQDmYVNwtF+kCUQNYYSDolxJSH9jjCOfGkWR
RHtimsM75jzzg8TX5XGJCJMO0rQ1k2A4YdVEwNCGUEOx/uX9Ey8LSyEaUGp8bXJ9
1sAjjPY6xmEV0gyQTFWR7EQHc/F90i+QaYaAbOvq479jwoamcKkDoY9wJsrQsxX/
AZAvX66FCwKBgHSYwyaqALiekAr+jxz8NCEA7/To9UoUD4lOU0HPyAA6a8mTyfgx
+K5JgizuBo85wBrwY2aDeh2TlMrrrNPRj4s1gukfxPrR+X3/vJEKNYQx5mi+Y0xP
pzJ0uBm0GX7N3KUI2UdCArx25/m1/vgTj2so8ZYLoDKQYwiZ5vHZUGopAoGBAOAZ
MFdjvd0M/ludzo9Vcjv+/5vQCB6OKbnDSdqC/QLZVdTzCpmQoT5RTuuOXqn28BQo
ZpJ4lN7yWMdeUk45SPvNWygDrXHX7RUnvsNWLXbC3lGhf4MmHd7SWuJ9EB36RTZO
z/1A9sQIQnZCFUT2NhJIKAVKVuNoMS9bT+wlQNg9AoGBALBj/5zbc2swgh1rB0Ob
7AQgPi9KHnsJcpZrY0jVwzQgQkYwtuaL4PL5eduNM/feaEOl54U3BCm5txa04H8N
Anp6RGPM3wHKSa2k0e3PtvICp/mL767kbxCuKcb1NhSpgAXAV5GLZNLxsWN5yJnZ
TVgvHlTpm6RMm7zDYHYyGFiO
-----END PRIVATE KEY-----

View File

@ -0,0 +1 @@
this is not a certificate

View File

@ -0,0 +1 @@
this is not a key

View File

@ -1,233 +0,0 @@
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Package kubecertauthority implements a signer backed by the kubernetes controller-manager signing
// keys (accessed via the kubernetes Exec API).
package kubecertauthority
import (
"bytes"
"context"
"crypto/x509/pkix"
"fmt"
"sync"
"time"
"github.com/spf13/pflag"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/deprecated/scheme"
"k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/remotecommand"
"k8s.io/klog/v2"
"go.pinniped.dev/internal/certauthority"
"go.pinniped.dev/internal/constable"
)
// ErrNoKubeControllerManagerPod is returned when no kube-controller-manager pod is found on the cluster.
const ErrNoKubeControllerManagerPod = constable.Error("did not find kube-controller-manager pod")
const ErrIncapableOfIssuingCertificates = constable.Error("this cluster is not currently capable of issuing certificates")
const k8sAPIServerCACertPEMDefaultPath = "/etc/kubernetes/ca/ca.pem"
const k8sAPIServerCAKeyPEMDefaultPath = "/etc/kubernetes/ca/ca.key"
type signer interface {
IssuePEM(subject pkix.Name, dnsNames []string, ttl time.Duration) ([]byte, []byte, error)
}
type PodCommandExecutor interface {
Exec(podNamespace string, podName string, commandAndArgs ...string) (stdoutResult string, err error)
}
type kubeClientPodCommandExecutor struct {
kubeConfig *restclient.Config
kubeClient kubernetes.Interface
}
func NewPodCommandExecutor(kubeConfig *restclient.Config, kubeClient kubernetes.Interface) PodCommandExecutor {
return &kubeClientPodCommandExecutor{kubeConfig: kubeConfig, kubeClient: kubeClient}
}
func (s *kubeClientPodCommandExecutor) Exec(podNamespace string, podName string, commandAndArgs ...string) (string, error) {
request := s.kubeClient.
CoreV1().
RESTClient().
Post().
Namespace(podNamespace).
Resource("pods").
Name(podName).
SubResource("exec").
VersionedParams(&v1.PodExecOptions{
Stdin: false,
Stdout: true,
Stderr: false,
TTY: false,
Command: commandAndArgs,
}, scheme.ParameterCodec)
executor, err := remotecommand.NewSPDYExecutor(s.kubeConfig, "POST", request.URL())
if err != nil {
return "", err
}
var stdoutBuf bytes.Buffer
if err := executor.Stream(remotecommand.StreamOptions{Stdout: &stdoutBuf}); err != nil {
return "", err
}
return stdoutBuf.String(), nil
}
type CA struct {
kubeClient kubernetes.Interface
podCommandExecutor PodCommandExecutor
shutdown, done chan struct{}
onSuccessfulRefresh SuccessCallback
onFailedRefresh FailureCallback
lock sync.RWMutex
activeSigner signer
}
type ShutdownFunc func()
type SuccessCallback func()
type FailureCallback func(error)
// New creates a new instance of a CA. It tries to load the kube API server's private key
// immediately. If that succeeds then it calls the success callback and it is ready to issue certs.
// When it fails to get the kube API server's private key, then it calls the failure callback and
// it will try again on the next tick. It starts a goroutine to periodically reload the kube
// API server's private key in case it failed previously or case the key has changed. It returns
// a function that can be used to shut down that goroutine. Future attempts made by that goroutine
// to get the key will also result in success or failure callbacks.
func New(
kubeClient kubernetes.Interface,
podCommandExecutor PodCommandExecutor,
tick <-chan time.Time,
onSuccessfulRefresh SuccessCallback,
onFailedRefresh FailureCallback,
) (*CA, ShutdownFunc) {
signer, err := createSignerWithAPIServerSecret(kubeClient, podCommandExecutor)
if err != nil {
klog.Errorf("could not initially fetch the API server's signing key: %s", err)
signer = nil
onFailedRefresh(err)
} else {
onSuccessfulRefresh()
}
result := &CA{
kubeClient: kubeClient,
podCommandExecutor: podCommandExecutor,
shutdown: make(chan struct{}),
done: make(chan struct{}),
onSuccessfulRefresh: onSuccessfulRefresh,
onFailedRefresh: onFailedRefresh,
activeSigner: signer,
}
go result.refreshLoop(tick)
return result, result.shutdownRefresh
}
func createSignerWithAPIServerSecret(kubeClient kubernetes.Interface, podCommandExecutor PodCommandExecutor) (signer, error) {
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
pod, err := findControllerManagerPod(ctx, kubeClient)
if err != nil {
return nil, err
}
certPath, keyPath := getKeypairFilePaths(pod)
certPEM, err := podCommandExecutor.Exec(pod.Namespace, pod.Name, "cat", certPath)
if err != nil {
return nil, err
}
keyPEM, err := podCommandExecutor.Exec(pod.Namespace, pod.Name, "cat", keyPath)
if err != nil {
return nil, err
}
return certauthority.Load(certPEM, keyPEM)
}
func (c *CA) refreshLoop(tick <-chan time.Time) {
for {
select {
case <-c.shutdown:
close(c.done)
return
case <-tick:
c.updateSigner()
}
}
}
func (c *CA) updateSigner() {
newSigner, err := createSignerWithAPIServerSecret(c.kubeClient, c.podCommandExecutor)
if err != nil {
klog.Errorf("could not create signer with API server secret: %s", err)
c.onFailedRefresh(err)
return
}
c.lock.Lock()
c.activeSigner = newSigner
c.lock.Unlock()
c.onSuccessfulRefresh()
}
func (c *CA) shutdownRefresh() {
close(c.shutdown)
<-c.done
}
// IssuePEM issues a new server certificate for the given identity and duration, returning it as a pair of
// PEM-formatted byte slices for the certificate and private key.
func (c *CA) IssuePEM(subject pkix.Name, dnsNames []string, ttl time.Duration) ([]byte, []byte, error) {
c.lock.RLock()
signer := c.activeSigner
c.lock.RUnlock()
if signer == nil {
return nil, nil, ErrIncapableOfIssuingCertificates
}
return signer.IssuePEM(subject, dnsNames, ttl)
}
func findControllerManagerPod(ctx context.Context, kubeClient kubernetes.Interface) (*v1.Pod, error) {
pods, err := kubeClient.CoreV1().Pods("kube-system").List(ctx, metav1.ListOptions{
LabelSelector: "component=kube-controller-manager",
FieldSelector: "status.phase=Running",
})
if err != nil {
return nil, fmt.Errorf("could not check for kube-controller-manager pod: %w", err)
}
for _, pod := range pods.Items {
return &pod, nil
}
return nil, ErrNoKubeControllerManagerPod
}
func getKeypairFilePaths(pod *v1.Pod) (string, string) {
certPath := getContainerArgByName(pod, "cluster-signing-cert-file", k8sAPIServerCACertPEMDefaultPath)
keyPath := getContainerArgByName(pod, "cluster-signing-key-file", k8sAPIServerCAKeyPEMDefaultPath)
return certPath, keyPath
}
func getContainerArgByName(pod *v1.Pod, name string, defaultValue string) string {
for _, container := range pod.Spec.Containers {
flagset := pflag.NewFlagSet("", pflag.ContinueOnError)
flagset.ParseErrorsWhitelist = pflag.ParseErrorsWhitelist{UnknownFlags: true}
var val string
flagset.StringVar(&val, name, "", "")
_ = flagset.Parse(append(container.Command, container.Args...))
if val != "" {
return val
}
}
return defaultValue
}

View File

@ -1,449 +0,0 @@
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package kubecertauthority
import (
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"fmt"
"io/ioutil"
"sync"
"testing"
"time"
"github.com/sclevine/spec"
"github.com/sclevine/spec/report"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubernetesfake "k8s.io/client-go/kubernetes/fake"
"k8s.io/klog/v2"
"go.pinniped.dev/internal/testutil"
)
type fakePodExecutor struct {
resultsToReturn []string
errorsToReturn []error
calledWithPodName []string
calledWithPodNamespace []string
calledWithCommandAndArgs [][]string
callCount int
}
func (s *fakePodExecutor) Exec(podNamespace string, podName string, commandAndArgs ...string) (string, error) {
s.calledWithPodNamespace = append(s.calledWithPodNamespace, podNamespace)
s.calledWithPodName = append(s.calledWithPodName, podName)
s.calledWithCommandAndArgs = append(s.calledWithCommandAndArgs, commandAndArgs)
result := s.resultsToReturn[s.callCount]
var err error = nil
if s.errorsToReturn != nil {
err = s.errorsToReturn[s.callCount]
}
s.callCount++
if err != nil {
return "", err
}
return result, nil
}
type callbackRecorder struct {
numberOfTimesSuccessCalled int
numberOfTimesFailureCalled int
failureErrors []error
mutex sync.Mutex
}
func (c *callbackRecorder) OnSuccess() {
c.mutex.Lock()
defer c.mutex.Unlock()
c.numberOfTimesSuccessCalled++
}
func (c *callbackRecorder) OnFailure(err error) {
c.mutex.Lock()
defer c.mutex.Unlock()
c.numberOfTimesFailureCalled++
c.failureErrors = append(c.failureErrors, err)
}
func (c *callbackRecorder) NumberOfTimesSuccessCalled() int {
c.mutex.Lock()
defer c.mutex.Unlock()
return c.numberOfTimesSuccessCalled
}
func (c *callbackRecorder) NumberOfTimesFailureCalled() int {
c.mutex.Lock()
defer c.mutex.Unlock()
return c.numberOfTimesFailureCalled
}
func (c *callbackRecorder) FailureErrors() []error {
c.mutex.Lock()
defer c.mutex.Unlock()
var errs = make([]error, len(c.failureErrors))
copy(errs, c.failureErrors)
return errs
}
func TestCA(t *testing.T) {
spec.Run(t, "CA", func(t *testing.T, when spec.G, it spec.S) {
var r *require.Assertions
var fakeCertPEM, fakeKeyPEM string
var fakeCert2PEM, fakeKey2PEM string
var fakePod *corev1.Pod
var kubeAPIClient *kubernetesfake.Clientset
var fakeExecutor *fakePodExecutor
var neverTicker <-chan time.Time
var callbacks *callbackRecorder
var logger *testutil.TranscriptLogger
var requireInitialFailureLogMessage = func(specificErrorMessage string) {
r.Len(logger.Transcript(), 1)
r.Equal(
fmt.Sprintf("could not initially fetch the API server's signing key: %s\n", specificErrorMessage),
logger.Transcript()[0].Message,
)
r.Equal(logger.Transcript()[0].Level, "error")
}
var requireNotCapableOfIssuingCerts = func(subject *CA) {
certPEM, keyPEM, err := subject.IssuePEM(
pkix.Name{CommonName: "Test Server"},
[]string{"example.com"},
10*time.Minute,
)
r.Nil(certPEM)
r.Nil(keyPEM)
r.EqualError(err, "this cluster is not currently capable of issuing certificates")
}
it.Before(func() {
r = require.New(t)
loadFile := func(filename string) string {
bytes, err := ioutil.ReadFile(filename)
r.NoError(err)
return string(bytes)
}
fakeCertPEM = loadFile("./testdata/test.crt")
fakeKeyPEM = loadFile("./testdata/test.key")
fakeCert2PEM = loadFile("./testdata/test2.crt")
fakeKey2PEM = loadFile("./testdata/test2.key")
fakePod = &corev1.Pod{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
Name: "fake-pod",
Namespace: "kube-system",
Labels: map[string]string{"component": "kube-controller-manager"},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{{Name: "kube-controller-manager"}},
},
Status: corev1.PodStatus{
Phase: "Running",
},
}
kubeAPIClient = kubernetesfake.NewSimpleClientset()
fakeExecutor = &fakePodExecutor{
resultsToReturn: []string{
fakeCertPEM,
fakeKeyPEM,
fakeCert2PEM,
fakeKey2PEM,
},
}
callbacks = &callbackRecorder{}
logger = testutil.NewTranscriptLogger(t)
klog.SetLogger(logger) // this is unfortunately a global logger, so can't run these tests in parallel :(
})
it.After(func() {
klog.SetLogger(nil)
})
when("the kube-controller-manager pod is found with default CLI flag values", func() {
it.Before(func() {
err := kubeAPIClient.Tracker().Add(fakePod)
r.NoError(err)
})
when("the exec commands return the API server's keypair", func() {
it("finds the API server's signing key and uses it to issue certificates", func() {
fakeTicker := make(chan time.Time)
subject, shutdownFunc := New(kubeAPIClient, fakeExecutor, fakeTicker, callbacks.OnSuccess, callbacks.OnFailure)
defer shutdownFunc()
r.Equal(2, fakeExecutor.callCount)
r.Equal("kube-system", fakeExecutor.calledWithPodNamespace[0])
r.Equal("fake-pod", fakeExecutor.calledWithPodName[0])
r.Equal([]string{"cat", "/etc/kubernetes/ca/ca.pem"}, fakeExecutor.calledWithCommandAndArgs[0])
r.Equal("kube-system", fakeExecutor.calledWithPodNamespace[1])
r.Equal("fake-pod", fakeExecutor.calledWithPodName[1])
r.Equal([]string{"cat", "/etc/kubernetes/ca/ca.key"}, fakeExecutor.calledWithCommandAndArgs[1])
r.Equal(1, callbacks.NumberOfTimesSuccessCalled())
r.Equal(0, callbacks.NumberOfTimesFailureCalled())
// Validate that we can issue a certificate signed by the original API server CA.
certPEM, keyPEM, err := subject.IssuePEM(
pkix.Name{CommonName: "Test Server"},
[]string{"example.com"},
10*time.Minute,
)
r.NoError(err)
validCert := testutil.ValidateCertificate(t, fakeCertPEM, string(certPEM))
validCert.RequireDNSName("example.com")
validCert.RequireLifetime(time.Now(), time.Now().Add(10*time.Minute), 6*time.Minute)
validCert.RequireMatchesPrivateKey(string(keyPEM))
// Tick the timer and wait for another refresh loop to complete.
fakeTicker <- time.Now()
// Eventually it starts issuing certs using the new signing key.
var secondCertPEM, secondKeyPEM string
r.Eventually(func() bool {
certPEM, keyPEM, err := subject.IssuePEM(
pkix.Name{CommonName: "Test Server"},
[]string{"example.com"},
10*time.Minute,
)
r.NoError(err)
secondCertPEM = string(certPEM)
secondKeyPEM = string(keyPEM)
block, _ := pem.Decode(certPEM)
require.NotNil(t, block)
parsed, err := x509.ParseCertificate(block.Bytes)
require.NoError(t, err)
// Validate the created cert using the second API server CA.
roots := x509.NewCertPool()
require.True(t, roots.AppendCertsFromPEM([]byte(fakeCert2PEM)))
opts := x509.VerifyOptions{Roots: roots}
_, err = parsed.Verify(opts)
return err == nil
}, 5*time.Second, 100*time.Millisecond)
r.Equal(2, callbacks.NumberOfTimesSuccessCalled())
r.Equal(0, callbacks.NumberOfTimesFailureCalled())
validCert2 := testutil.ValidateCertificate(t, fakeCert2PEM, secondCertPEM)
validCert2.RequireDNSName("example.com")
validCert2.RequireLifetime(time.Now(), time.Now().Add(15*time.Minute), 6*time.Minute)
validCert2.RequireMatchesPrivateKey(secondKeyPEM)
})
})
when("the exec commands return the API server's keypair the first time but subsequently fails", func() {
it.Before(func() {
fakeExecutor.errorsToReturn = []error{nil, nil, fmt.Errorf("some exec error")}
})
it("logs an error message", func() {
fakeTicker := make(chan time.Time)
subject, shutdownFunc := New(kubeAPIClient, fakeExecutor, fakeTicker, callbacks.OnSuccess, callbacks.OnFailure)
defer shutdownFunc()
r.Equal(2, fakeExecutor.callCount)
r.Equal(1, callbacks.NumberOfTimesSuccessCalled())
r.Equal(0, callbacks.NumberOfTimesFailureCalled())
// Tick the timer and wait for another refresh loop to complete.
fakeTicker <- time.Now()
// Wait for there to be a log output and require that it matches our expectation.
r.Eventually(func() bool { return len(logger.Transcript()) >= 1 }, 5*time.Second, 10*time.Millisecond)
r.Contains(logger.Transcript()[0].Message, "could not create signer with API server secret: some exec error")
r.Equal(logger.Transcript()[0].Level, "error")
r.Equal(1, callbacks.NumberOfTimesSuccessCalled())
r.Equal(1, callbacks.NumberOfTimesFailureCalled())
r.EqualError(callbacks.FailureErrors()[0], "some exec error")
// Validate that we can still issue a certificate signed by the original API server CA.
certPEM, _, err := subject.IssuePEM(
pkix.Name{CommonName: "Test Server"},
[]string{"example.com"},
10*time.Minute,
)
r.NoError(err)
testutil.ValidateCertificate(t, fakeCertPEM, string(certPEM))
})
})
when("the exec commands fail the first time but subsequently returns the API server's keypair", func() {
it.Before(func() {
fakeExecutor.errorsToReturn = []error{fmt.Errorf("some exec error"), nil, nil}
fakeExecutor.resultsToReturn = []string{"", fakeCertPEM, fakeKeyPEM}
})
it("logs an error message and fails to issue certs until it can get the API server's keypair", func() {
fakeTicker := make(chan time.Time)
subject, shutdownFunc := New(kubeAPIClient, fakeExecutor, fakeTicker, callbacks.OnSuccess, callbacks.OnFailure)
defer shutdownFunc()
r.Equal(1, fakeExecutor.callCount)
r.Equal(0, callbacks.NumberOfTimesSuccessCalled())
r.Equal(1, callbacks.NumberOfTimesFailureCalled())
r.EqualError(callbacks.FailureErrors()[0], "some exec error")
requireInitialFailureLogMessage("some exec error")
requireNotCapableOfIssuingCerts(subject)
// Tick the timer and wait for another refresh loop to complete.
fakeTicker <- time.Now()
// Wait until it can start to issue certs, and then validate the issued cert.
var certPEM, keyPEM []byte
r.Eventually(func() bool {
var err error
certPEM, keyPEM, err = subject.IssuePEM(
pkix.Name{CommonName: "Test Server"},
[]string{"example.com"},
10*time.Minute,
)
return err == nil
}, 5*time.Second, 10*time.Millisecond)
validCert := testutil.ValidateCertificate(t, fakeCertPEM, string(certPEM))
validCert.RequireDNSName("example.com")
validCert.RequireLifetime(time.Now().Add(-5*time.Minute), time.Now().Add(10*time.Minute), 1*time.Minute)
validCert.RequireMatchesPrivateKey(string(keyPEM))
r.Equal(1, callbacks.NumberOfTimesSuccessCalled())
r.Equal(1, callbacks.NumberOfTimesFailureCalled())
})
})
when("the exec commands succeed but return garbage", func() {
it.Before(func() {
fakeExecutor.resultsToReturn = []string{"not a cert", "not a private key"}
})
it("returns a CA who cannot issue certs", func() {
subject, shutdownFunc := New(kubeAPIClient, fakeExecutor, neverTicker, callbacks.OnSuccess, callbacks.OnFailure)
defer shutdownFunc()
requireInitialFailureLogMessage("could not load CA: tls: failed to find any PEM data in certificate input")
requireNotCapableOfIssuingCerts(subject)
r.Equal(0, callbacks.NumberOfTimesSuccessCalled())
r.Equal(1, callbacks.NumberOfTimesFailureCalled())
r.EqualError(callbacks.FailureErrors()[0], "could not load CA: tls: failed to find any PEM data in certificate input")
})
})
when("the first exec command returns an error", func() {
it.Before(func() {
fakeExecutor.errorsToReturn = []error{fmt.Errorf("some error"), nil}
})
it("returns a CA who cannot issue certs", func() {
subject, shutdownFunc := New(kubeAPIClient, fakeExecutor, neverTicker, callbacks.OnSuccess, callbacks.OnFailure)
defer shutdownFunc()
requireInitialFailureLogMessage("some error")
requireNotCapableOfIssuingCerts(subject)
r.Equal(0, callbacks.NumberOfTimesSuccessCalled())
r.Equal(1, callbacks.NumberOfTimesFailureCalled())
r.EqualError(callbacks.FailureErrors()[0], "some error")
})
})
when("the second exec command returns an error", func() {
it.Before(func() {
fakeExecutor.errorsToReturn = []error{nil, fmt.Errorf("some error")}
})
it("returns a CA who cannot issue certs", func() {
subject, shutdownFunc := New(kubeAPIClient, fakeExecutor, neverTicker, callbacks.OnSuccess, callbacks.OnFailure)
defer shutdownFunc()
requireInitialFailureLogMessage("some error")
requireNotCapableOfIssuingCerts(subject)
r.Equal(0, callbacks.NumberOfTimesSuccessCalled())
r.Equal(1, callbacks.NumberOfTimesFailureCalled())
r.EqualError(callbacks.FailureErrors()[0], "some error")
})
})
})
when("the kube-controller-manager pod is found with non-default CLI flag values", func() {
it.Before(func() {
fakePod.Spec.Containers[0].Command = []string{
"kube-controller-manager",
"--cluster-signing-cert-file=/etc/kubernetes/ca/non-default.pem",
}
fakePod.Spec.Containers[0].Args = []string{
"--cluster-signing-key-file=/etc/kubernetes/ca/non-default.key",
}
err := kubeAPIClient.Tracker().Add(fakePod)
r.NoError(err)
})
it("finds the API server's signing key and uses it to issue certificates", func() {
_, shutdownFunc := New(kubeAPIClient, fakeExecutor, neverTicker, callbacks.OnSuccess, callbacks.OnFailure)
defer shutdownFunc()
r.Equal(2, fakeExecutor.callCount)
r.Equal("kube-system", fakeExecutor.calledWithPodNamespace[0])
r.Equal("fake-pod", fakeExecutor.calledWithPodName[0])
r.Equal([]string{"cat", "/etc/kubernetes/ca/non-default.pem"}, fakeExecutor.calledWithCommandAndArgs[0])
r.Equal("kube-system", fakeExecutor.calledWithPodNamespace[1])
r.Equal("fake-pod", fakeExecutor.calledWithPodName[1])
r.Equal([]string{"cat", "/etc/kubernetes/ca/non-default.key"}, fakeExecutor.calledWithCommandAndArgs[1])
})
})
when("the kube-controller-manager pod is found with non-default CLI flag values separated by spaces", func() {
it.Before(func() {
fakePod.Spec.Containers[0].Command = []string{
"kube-controller-manager",
"--cluster-signing-cert-file", "/etc/kubernetes/ca/non-default.pem",
"--cluster-signing-key-file", "/etc/kubernetes/ca/non-default.key",
"--foo=bar",
}
err := kubeAPIClient.Tracker().Add(fakePod)
r.NoError(err)
})
it("finds the API server's signing key and uses it to issue certificates", func() {
_, shutdownFunc := New(kubeAPIClient, fakeExecutor, neverTicker, callbacks.OnSuccess, callbacks.OnFailure)
defer shutdownFunc()
r.Equal(2, fakeExecutor.callCount)
r.Equal("kube-system", fakeExecutor.calledWithPodNamespace[0])
r.Equal("fake-pod", fakeExecutor.calledWithPodName[0])
r.Equal([]string{"cat", "/etc/kubernetes/ca/non-default.pem"}, fakeExecutor.calledWithCommandAndArgs[0])
r.Equal("kube-system", fakeExecutor.calledWithPodNamespace[1])
r.Equal("fake-pod", fakeExecutor.calledWithPodName[1])
r.Equal([]string{"cat", "/etc/kubernetes/ca/non-default.key"}, fakeExecutor.calledWithCommandAndArgs[1])
})
})
when("the kube-controller-manager pod is not found", func() {
it("returns an error", func() {
subject, shutdownFunc := New(kubeAPIClient, fakeExecutor, neverTicker, callbacks.OnSuccess, callbacks.OnFailure)
defer shutdownFunc()
requireInitialFailureLogMessage("did not find kube-controller-manager pod")
requireNotCapableOfIssuingCerts(subject)
r.Equal(0, callbacks.NumberOfTimesSuccessCalled())
r.Equal(1, callbacks.NumberOfTimesFailureCalled())
r.EqualError(callbacks.FailureErrors()[0], "did not find kube-controller-manager pod")
})
})
}, spec.Sequential(), spec.Report(report.Terminal{}))
}

View File

@ -1,17 +0,0 @@
-----BEGIN CERTIFICATE-----
MIICyDCCAbCgAwIBAgIBADANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwprdWJl
cm5ldGVzMB4XDTIwMDgxODE2NDEzNloXDTMwMDgxNjE2NDEzNlowFTETMBEGA1UE
AxMKa3ViZXJuZXRlczCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALH7
C2JpttDi3mxpD4bd+BZucCrS8XF2YwqYAr42HePp++PBnlUFqWmtPc9/bmo+7+7z
iAAlnAV0pJWP+HR/PskX8MRcFAA1HoXLa37Q4SuBBQG+JE+AeaOObmQYaCFv55ej
UF4+JIoQOdlbYEMYSI07el0cxQL4Io/CHJ3p7AtNElxjDuMK4B9W8NiCse3p7Uf+
Qje4we1TYOfcpAM0jpBPHK9vCBCpX+j52S5DUTRVIk9kye3lCDmWOXH/fhj/aJTM
1MP/hThbl2wIbFuv1bpa0kXNZs8xB63dtqROQ+lCghDmuayRmzwXl2PX6IgFFcjV
yAgjXrZqjihs+mY8eT0CAwEAAaMjMCEwDgYDVR0PAQH/BAQDAgKkMA8GA1UdEwEB
/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE+Saqk2EyuIx1rxFWrOwpTi5q/B
p/TwEtrmrFIRVPnGeBnhyfbGXPDMkzIY1mEvztu8H+pm5RPyhQYLsuwzYiYMQyxX
yL9VvO7uydn7+3zX7oknQ5qAvN3nmItNyOKw3MRIKGsySNuTQ5JPtU/ufGlEivbK
vNaDBqjKrBvwhIKMdV9/xYSyeBhSSWr/6W1tAk+XbHhQH1M78rdwGN5SI75L4FGu
13kn/W2n8pE17TAY88B1YGKhsLSvf8KrFNYv+UUmzh2WstECKSlnbrSM+boMlGJn
XahE8M23fieB+SaenQdOezrY4GAnXQ3qToDlhdYAOkWhcGDct47VRM93whY=
-----END CERTIFICATE-----

View File

@ -1,27 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEogIBAAKCAQEAsfsLYmm20OLebGkPht34Fm5wKtLxcXZjCpgCvjYd4+n748Ge
VQWpaa09z39uaj7v7vOIACWcBXSklY/4dH8+yRfwxFwUADUehctrftDhK4EFAb4k
T4B5o45uZBhoIW/nl6NQXj4kihA52VtgQxhIjTt6XRzFAvgij8IcnensC00SXGMO
4wrgH1bw2IKx7entR/5CN7jB7VNg59ykAzSOkE8cr28IEKlf6PnZLkNRNFUiT2TJ
7eUIOZY5cf9+GP9olMzUw/+FOFuXbAhsW6/VulrSRc1mzzEHrd22pE5D6UKCEOa5
rJGbPBeXY9foiAUVyNXICCNetmqOKGz6Zjx5PQIDAQABAoIBAD06klYO7De8dKxz
EEZjgn+lCq2Q2EMiaTwxw2/QikPoMSHPcDrrsbaLROJngoLGmCBqY3U5ew1dbWmO
l/jr9ZuUwt2ql67il1eL/bUpAu3GewR4d2FqX25nB48j3l7ycof2RSXG1ycwIdam
2tz6M6tytMvno9c7qhguvU2ONghEreXG3YYLdf9l97aB+p6GdXhwty22b7tAVwp1
GKn79kVYgmL86lph9hBPqtHuG1LHZUiFodr2iWXSu3H/265OD58a33ZO3iyfFI0s
PPy87ZN0r+1hGpoKKkDe63udOYgAG6xmIea/1Pdn9Eg87tueoeC7XcUpdaCJlKaF
tqCusEECgYEA60rWyXxTFKJ4QdVaqXoWMA4cQkT73RxznSKwkN/Svk8TVv+p5s5Y
oYKN4qyMzxvQzu+QNWpd1yTveCmmEynz457ELpGtidtiJdm7xZMdMGrU02eCL9mZ
ERbtfAkbEAKvN8D73fWyzghKv4dgcQptmsqZlYYc4vpwHveK+/N5lukCgYEAwaT3
iMTWCv7Vp87iKrzNUAH4iBWlazwbE+EDEnHVw26Y82fhgEgxiU2UvFSaIVhGpaCz
MYSXSdRcQTHgCoJLPfWHUHTJPqf36KfAJfdaxxjzTTbNYjUOkdcUD1bcNrm0yjoY
nR4zK1FPw86ODMYtBpfkyL7ZX8G1v5pRL/6/gzUCgYBzgwQ7Wmu3H6QGPeYKecNW
yDabWh6ECKnBpPwlw5xEjbGi7lTM2NSuRde+RpPCQZebYATeFGAJdTqTNW8wzVHM
l28cpawal7dxeZkzf+u+j1P4jUJel2cL+sOQNzAwBgFbT8TWzP6BI5T+vklcdZAl
g/0uaO7Zh7Vvnnt/AaLZsQKBgGfbHzuGPjoFdPecOKatPfxUIkRyP5bk1KzzuF8T
GI/JaFTbeREBJzg5mLTtNwD9RF6ecpzzPOTG9Xet1Tgtq0cewSUAjdKB6a8pESAL
qu8vTYYzBzJNvHOxg7u6XT8omHMBd6QEx3LLGFmvFXZ6bzmjC3wzB4iY7u5FSJfS
LEqlAoGAb0rbJ85vrJopbx8lzhJjyaJfM8+A3oQg1K3TphHrcgkUc8qx8QEosziM
wzYKSBlXd2bxMibyd0mTEMNl4/BqofaKoqof9gBIbkamwXOO8s7IgKxQAfr1R/z8
tHBW/g0QWPB+qtaVDtHwyQLlxjx8HD7atIo8d/do9ruwVaf+r6g=
-----END RSA PRIVATE KEY-----

View File

@ -12,20 +12,20 @@ import (
pinnipedcontroller "go.pinniped.dev/internal/controller"
"go.pinniped.dev/internal/controllerlib"
"go.pinniped.dev/internal/provider"
"go.pinniped.dev/internal/dynamiccert"
)
type certsObserverController struct {
namespace string
certsSecretResourceName string
dynamicCertProvider provider.DynamicTLSServingCertProvider
dynamicCertProvider dynamiccert.Provider
secretInformer corev1informers.SecretInformer
}
func NewCertsObserverController(
namespace string,
certsSecretResourceName string,
dynamicCertProvider provider.DynamicTLSServingCertProvider,
dynamicCertProvider dynamiccert.Provider,
secretInformer corev1informers.SecretInformer,
withInformer pinnipedcontroller.WithInformerOptionFunc,
) controllerlib.Controller {

View File

@ -17,7 +17,7 @@ import (
kubernetesfake "k8s.io/client-go/kubernetes/fake"
"go.pinniped.dev/internal/controllerlib"
"go.pinniped.dev/internal/provider"
"go.pinniped.dev/internal/dynamiccert"
"go.pinniped.dev/internal/testutil"
)
@ -107,7 +107,7 @@ func TestObserverControllerSync(t *testing.T) {
var timeoutContext context.Context
var timeoutContextCancel context.CancelFunc
var syncContext *controllerlib.Context
var dynamicCertProvider provider.DynamicTLSServingCertProvider
var dynamicCertProvider dynamiccert.Provider
// Defer starting the informers until the last possible moment so that the
// nested Before's can keep adding things to the informer caches.
@ -143,7 +143,7 @@ func TestObserverControllerSync(t *testing.T) {
kubeInformerClient = kubernetesfake.NewSimpleClientset()
kubeInformers = kubeinformers.NewSharedInformerFactory(kubeInformerClient, 0)
dynamicCertProvider = provider.NewDynamicTLSServingCertProvider()
dynamicCertProvider = dynamiccert.New()
})
it.After(func() {

View File

@ -8,6 +8,7 @@ import (
"fmt"
"k8s.io/apimachinery/pkg/api/equality"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/util/retry"
@ -34,14 +35,31 @@ func CreateOrUpdateCredentialIssuerConfig(
return fmt.Errorf("get failed: %w", err)
}
return createOrUpdateCredentialIssuerConfig(
ctx,
existingCredentialIssuerConfig,
notFound,
credentialIssuerConfigResourceName,
credentialIssuerConfigNamespace,
pinnipedClient,
applyUpdatesToCredentialIssuerConfigFunc)
credentialIssuerConfigsClient := pinnipedClient.ConfigV1alpha1().CredentialIssuerConfigs(credentialIssuerConfigNamespace)
if notFound {
// Create it
credentialIssuerConfig := minimalValidCredentialIssuerConfig(credentialIssuerConfigResourceName, credentialIssuerConfigNamespace)
applyUpdatesToCredentialIssuerConfigFunc(credentialIssuerConfig)
if _, err := credentialIssuerConfigsClient.Create(ctx, credentialIssuerConfig, metav1.CreateOptions{}); err != nil {
return fmt.Errorf("create failed: %w", err)
}
} else {
// Already exists, so check to see if we need to update it
credentialIssuerConfig := existingCredentialIssuerConfig.DeepCopy()
applyUpdatesToCredentialIssuerConfigFunc(credentialIssuerConfig)
if equality.Semantic.DeepEqual(existingCredentialIssuerConfig, credentialIssuerConfig) {
// Nothing interesting would change as a result of this update, so skip it
return nil
}
if _, err := credentialIssuerConfigsClient.Update(ctx, credentialIssuerConfig, metav1.UpdateOptions{}); err != nil {
return err
}
}
return nil
})
if err != nil {
@ -50,43 +68,6 @@ func CreateOrUpdateCredentialIssuerConfig(
return nil
}
func createOrUpdateCredentialIssuerConfig(
ctx context.Context,
existingCredentialIssuerConfig *configv1alpha1.CredentialIssuerConfig,
notFound bool,
credentialIssuerConfigName string,
credentialIssuerConfigNamespace string,
pinnipedClient pinnipedclientset.Interface,
applyUpdatesToCredentialIssuerConfigFunc func(configToUpdate *configv1alpha1.CredentialIssuerConfig),
) error {
credentialIssuerConfigsClient := pinnipedClient.ConfigV1alpha1().CredentialIssuerConfigs(credentialIssuerConfigNamespace)
if notFound {
// Create it
credentialIssuerConfig := minimalValidCredentialIssuerConfig(credentialIssuerConfigName, credentialIssuerConfigNamespace)
applyUpdatesToCredentialIssuerConfigFunc(credentialIssuerConfig)
if _, err := credentialIssuerConfigsClient.Create(ctx, credentialIssuerConfig, metav1.CreateOptions{}); err != nil {
return fmt.Errorf("create failed: %w", err)
}
} else {
// Already exists, so check to see if we need to update it
credentialIssuerConfig := existingCredentialIssuerConfig.DeepCopy()
applyUpdatesToCredentialIssuerConfigFunc(credentialIssuerConfig)
if equality.Semantic.DeepEqual(existingCredentialIssuerConfig, credentialIssuerConfig) {
// Nothing interesting would change as a result of this update, so skip it
return nil
}
if _, err := credentialIssuerConfigsClient.Update(ctx, credentialIssuerConfig, metav1.UpdateOptions{}); err != nil {
return err
}
}
return nil
}
func minimalValidCredentialIssuerConfig(
credentialIssuerConfigName string,
credentialIssuerConfigNamespace string,

View File

@ -1,5 +1,5 @@
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Package discovery contains controller(s) for reconciling CredentialIssuerConfig's.
// Package issuerconfig contains controller(s) for reconciling CredentialIssuerConfig's.
package issuerconfig

View File

@ -14,7 +14,6 @@ import (
configv1alpha1 "go.pinniped.dev/generated/1.19/apis/config/v1alpha1"
pinnipedclientset "go.pinniped.dev/generated/1.19/client/clientset/versioned"
configv1alpha1informers "go.pinniped.dev/generated/1.19/client/informers/externalversions/config/v1alpha1"
pinnipedcontroller "go.pinniped.dev/internal/controller"
"go.pinniped.dev/internal/controllerlib"
)
@ -25,33 +24,34 @@ const (
clusterInfoConfigMapKey = "kubeconfig"
)
type publisherController struct {
namespace string
credentialIssuerConfigResourceName string
serverOverride *string
pinnipedClient pinnipedclientset.Interface
configMapInformer corev1informers.ConfigMapInformer
credentialIssuerConfigInformer configv1alpha1informers.CredentialIssuerConfigInformer
type kubeConigInfoPublisherController struct {
credentialIssuerConfigNamespaceName string
credentialIssuerConfigResourceName string
serverOverride *string
pinnipedClient pinnipedclientset.Interface
configMapInformer corev1informers.ConfigMapInformer
}
func NewPublisherController(namespace string,
// NewKubeConfigInfoPublisherController returns a controller that syncs the
// configv1alpha1.CredentialIssuerConfig.Status.KubeConfigInfo field with the cluster-info ConfigMap
// in the kube-public namespace.
func NewKubeConfigInfoPublisherController(
credentialIssuerConfigNamespaceName string,
credentialIssuerConfigResourceName string,
serverOverride *string,
pinnipedClient pinnipedclientset.Interface,
configMapInformer corev1informers.ConfigMapInformer,
credentialIssuerConfigInformer configv1alpha1informers.CredentialIssuerConfigInformer,
withInformer pinnipedcontroller.WithInformerOptionFunc,
) controllerlib.Controller {
return controllerlib.New(
controllerlib.Config{
Name: "publisher-controller",
Syncer: &publisherController{
credentialIssuerConfigResourceName: credentialIssuerConfigResourceName,
namespace: namespace,
serverOverride: serverOverride,
pinnipedClient: pinnipedClient,
configMapInformer: configMapInformer,
credentialIssuerConfigInformer: credentialIssuerConfigInformer,
Syncer: &kubeConigInfoPublisherController{
credentialIssuerConfigResourceName: credentialIssuerConfigResourceName,
credentialIssuerConfigNamespaceName: credentialIssuerConfigNamespaceName,
serverOverride: serverOverride,
pinnipedClient: pinnipedClient,
configMapInformer: configMapInformer,
},
},
withInformer(
@ -59,15 +59,10 @@ func NewPublisherController(namespace string,
pinnipedcontroller.NameAndNamespaceExactMatchFilterFactory(clusterInfoName, ClusterInfoNamespace),
controllerlib.InformerOption{},
),
withInformer(
credentialIssuerConfigInformer,
pinnipedcontroller.NameAndNamespaceExactMatchFilterFactory(credentialIssuerConfigResourceName, namespace),
controllerlib.InformerOption{},
),
)
}
func (c *publisherController) Sync(ctx controllerlib.Context) error {
func (c *kubeConigInfoPublisherController) Sync(ctx controllerlib.Context) error {
configMap, err := c.configMapInformer.
Lister().
ConfigMaps(ClusterInfoNamespace).
@ -108,15 +103,6 @@ func (c *publisherController) Sync(ctx controllerlib.Context) error {
server = *c.serverOverride
}
existingCredentialIssuerConfigFromInformerCache, err := c.credentialIssuerConfigInformer.
Lister().
CredentialIssuerConfigs(c.namespace).
Get(c.credentialIssuerConfigResourceName)
notFound = k8serrors.IsNotFound(err)
if err != nil && !notFound {
return fmt.Errorf("could not get credentialissuerconfig: %w", err)
}
updateServerAndCAFunc := func(c *configv1alpha1.CredentialIssuerConfig) {
c.Status.KubeConfigInfo = &configv1alpha1.CredentialIssuerConfigKubeConfigInfo{
Server: server,
@ -124,17 +110,11 @@ func (c *publisherController) Sync(ctx controllerlib.Context) error {
}
}
err = createOrUpdateCredentialIssuerConfig(
return CreateOrUpdateCredentialIssuerConfig(
ctx.Context,
existingCredentialIssuerConfigFromInformerCache,
notFound,
c.credentialIssuerConfigNamespaceName,
c.credentialIssuerConfigResourceName,
c.namespace,
c.pinnipedClient,
updateServerAndCAFunc)
if err != nil {
return fmt.Errorf("could not create or update credentialissuerconfig: %w", err)
}
return nil
updateServerAndCAFunc,
)
}

View File

@ -22,7 +22,6 @@ import (
configv1alpha1 "go.pinniped.dev/generated/1.19/apis/config/v1alpha1"
pinnipedfake "go.pinniped.dev/generated/1.19/client/clientset/versioned/fake"
pinnipedinformers "go.pinniped.dev/generated/1.19/client/informers/externalversions"
"go.pinniped.dev/internal/controllerlib"
"go.pinniped.dev/internal/here"
"go.pinniped.dev/internal/testutil"
@ -36,24 +35,20 @@ func TestInformerFilters(t *testing.T) {
var r *require.Assertions
var observableWithInformerOption *testutil.ObservableWithInformerOption
var configMapInformerFilter controllerlib.Filter
var credentialIssuerConfigInformerFilter controllerlib.Filter
it.Before(func() {
r = require.New(t)
observableWithInformerOption = testutil.NewObservableWithInformerOption()
configMapInformer := kubeinformers.NewSharedInformerFactory(nil, 0).Core().V1().ConfigMaps()
credentialIssuerConfigInformer := pinnipedinformers.NewSharedInformerFactory(nil, 0).Config().V1alpha1().CredentialIssuerConfigs()
_ = NewPublisherController(
_ = NewKubeConfigInfoPublisherController(
installedInNamespace,
credentialIssuerConfigResourceName,
nil,
nil,
configMapInformer,
credentialIssuerConfigInformer,
observableWithInformerOption.WithInformer, // make it possible to observe the behavior of the Filters
)
configMapInformerFilter = observableWithInformerOption.GetFilterForInformer(configMapInformer)
credentialIssuerConfigInformerFilter = observableWithInformerOption.GetFilterForInformer(credentialIssuerConfigInformer)
})
when("watching ConfigMap objects", func() {
@ -103,62 +98,6 @@ func TestInformerFilters(t *testing.T) {
})
})
})
when("watching CredentialIssuerConfig objects", func() {
var subject controllerlib.Filter
var target, wrongNamespace, wrongName, unrelated *configv1alpha1.CredentialIssuerConfig
it.Before(func() {
subject = credentialIssuerConfigInformerFilter
target = &configv1alpha1.CredentialIssuerConfig{
ObjectMeta: metav1.ObjectMeta{Name: credentialIssuerConfigResourceName, Namespace: installedInNamespace},
}
wrongNamespace = &configv1alpha1.CredentialIssuerConfig{
ObjectMeta: metav1.ObjectMeta{Name: credentialIssuerConfigResourceName, Namespace: "wrong-namespace"},
}
wrongName = &configv1alpha1.CredentialIssuerConfig{
ObjectMeta: metav1.ObjectMeta{Name: "wrong-name", Namespace: installedInNamespace},
}
unrelated = &configv1alpha1.CredentialIssuerConfig{
ObjectMeta: metav1.ObjectMeta{Name: "wrong-name", Namespace: "wrong-namespace"},
}
})
when("the target CredentialIssuerConfig changes", func() {
it("returns true to trigger the sync method", func() {
r.True(subject.Add(target))
r.True(subject.Update(target, unrelated))
r.True(subject.Update(unrelated, target))
r.True(subject.Delete(target))
})
})
when("a CredentialIssuerConfig from another namespace changes", func() {
it("returns false to avoid triggering the sync method", func() {
r.False(subject.Add(wrongNamespace))
r.False(subject.Update(wrongNamespace, unrelated))
r.False(subject.Update(unrelated, wrongNamespace))
r.False(subject.Delete(wrongNamespace))
})
})
when("a CredentialIssuerConfig with a different name changes", func() {
it("returns false to avoid triggering the sync method", func() {
r.False(subject.Add(wrongName))
r.False(subject.Update(wrongName, unrelated))
r.False(subject.Update(unrelated, wrongName))
r.False(subject.Delete(wrongName))
})
})
when("a CredentialIssuerConfig with a different name and a different namespace changes", func() {
it("returns false to avoid triggering the sync method", func() {
r.False(subject.Add(unrelated))
r.False(subject.Update(unrelated, unrelated))
r.False(subject.Delete(unrelated))
})
})
})
}, spec.Parallel(), spec.Report(report.Terminal{}))
}
@ -172,9 +111,7 @@ func TestSync(t *testing.T) {
var subject controllerlib.Controller
var serverOverride *string
var kubeInformerClient *kubernetesfake.Clientset
var pinnipedInformerClient *pinnipedfake.Clientset
var kubeInformers kubeinformers.SharedInformerFactory
var pinnipedInformers pinnipedinformers.SharedInformerFactory
var pinnipedAPIClient *pinnipedfake.Clientset
var timeoutContext context.Context
var timeoutContextCancel context.CancelFunc
@ -206,13 +143,12 @@ func TestSync(t *testing.T) {
// nested Before's can keep adding things to the informer caches.
var startInformersAndController = func() {
// Set this at the last second to allow for injection of server override.
subject = NewPublisherController(
subject = NewKubeConfigInfoPublisherController(
installedInNamespace,
credentialIssuerConfigResourceName,
serverOverride,
pinnipedAPIClient,
kubeInformers.Core().V1().ConfigMaps(),
pinnipedInformers.Config().V1alpha1().CredentialIssuerConfigs(),
controllerlib.WithInformer,
)
@ -228,7 +164,6 @@ func TestSync(t *testing.T) {
// Must start informers before calling TestRunSynchronously()
kubeInformers.Start(timeoutContext.Done())
pinnipedInformers.Start(timeoutContext.Done())
controllerlib.TestRunSynchronously(t, subject)
}
@ -240,8 +175,6 @@ func TestSync(t *testing.T) {
kubeInformerClient = kubernetesfake.NewSimpleClientset()
kubeInformers = kubeinformers.NewSharedInformerFactory(kubeInformerClient, 0)
pinnipedAPIClient = pinnipedfake.NewSimpleClientset()
pinnipedInformerClient = pinnipedfake.NewSimpleClientset()
pinnipedInformers = pinnipedinformers.NewSharedInformerFactory(pinnipedInformerClient, 0)
})
it.After(func() {
@ -288,6 +221,7 @@ func TestSync(t *testing.T) {
r.Equal(
[]coretesting.Action{
coretesting.NewGetAction(expectedCredentialIssuerConfigGVR, installedInNamespace, expectedCredentialIssuerConfig.Name),
coretesting.NewCreateAction(
expectedCredentialIssuerConfigGVR,
installedInNamespace,
@ -334,6 +268,7 @@ func TestSync(t *testing.T) {
r.Equal(
[]coretesting.Action{
coretesting.NewGetAction(expectedCredentialIssuerConfigGVR, installedInNamespace, expectedCredentialIssuerConfig.Name),
coretesting.NewCreateAction(
expectedCredentialIssuerConfigGVR,
installedInNamespace,
@ -348,13 +283,16 @@ func TestSync(t *testing.T) {
when("the CredentialIssuerConfig already exists", func() {
when("the CredentialIssuerConfig is already up to date according to the data in the ConfigMap", func() {
var credentialIssuerConfigGVR schema.GroupVersionResource
var credentialIssuerConfig *configv1alpha1.CredentialIssuerConfig
it.Before(func() {
_, expectedCredentialIssuerConfig := expectedCredentialIssuerConfig(
credentialIssuerConfigGVR, credentialIssuerConfig = expectedCredentialIssuerConfig(
installedInNamespace,
kubeServerURL,
caData,
)
err := pinnipedInformerClient.Tracker().Add(expectedCredentialIssuerConfig)
err := pinnipedAPIClient.Tracker().Add(credentialIssuerConfig)
r.NoError(err)
})
@ -363,7 +301,12 @@ func TestSync(t *testing.T) {
err := controllerlib.TestSync(t, subject, *syncContext)
r.NoError(err)
r.Empty(pinnipedAPIClient.Actions())
r.Equal(
[]coretesting.Action{
coretesting.NewGetAction(credentialIssuerConfigGVR, installedInNamespace, credentialIssuerConfig.Name),
},
pinnipedAPIClient.Actions(),
)
})
})
@ -375,7 +318,6 @@ func TestSync(t *testing.T) {
caData,
)
expectedCredentialIssuerConfig.Status.KubeConfigInfo.Server = "https://some-other-server"
r.NoError(pinnipedInformerClient.Tracker().Add(expectedCredentialIssuerConfig))
r.NoError(pinnipedAPIClient.Tracker().Add(expectedCredentialIssuerConfig))
})
@ -390,6 +332,7 @@ func TestSync(t *testing.T) {
caData,
)
expectedActions := []coretesting.Action{
coretesting.NewGetAction(expectedCredentialIssuerConfigGVR, installedInNamespace, expectedCredentialIssuerConfig.Name),
coretesting.NewUpdateAction(
expectedCredentialIssuerConfigGVR,
installedInNamespace,

View File

@ -0,0 +1,216 @@
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package kubecertagent
import (
"context"
"fmt"
"github.com/spf13/pflag"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/clock"
corev1informers "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/util/retry"
"k8s.io/klog/v2"
pinnipedclientset "go.pinniped.dev/generated/1.19/client/clientset/versioned"
pinnipedcontroller "go.pinniped.dev/internal/controller"
"go.pinniped.dev/internal/controllerlib"
)
// These constants are the default values for the kube-controller-manager flags. If the flags are
// not properly set on the kube-controller-manager process, then we will fallback to using these.
const (
k8sAPIServerCACertPEMDefaultPath = "/etc/kubernetes/ca/ca.pem"
k8sAPIServerCAKeyPEMDefaultPath = "/etc/kubernetes/ca/ca.key"
)
type annotaterController struct {
agentPodConfig *AgentPodConfig
credentialIssuerConfigLocationConfig *CredentialIssuerConfigLocationConfig
clock clock.Clock
k8sClient kubernetes.Interface
pinnipedAPIClient pinnipedclientset.Interface
kubeSystemPodInformer corev1informers.PodInformer
agentPodInformer corev1informers.PodInformer
}
// NewAnnotaterController returns a controller that updates agent pods with the path to the kube
// API's certificate and key.
//
// This controller will add annotations to agent pods with the best-guess paths to the kube API's
// certificate and key.
//
// It also is tasked with updating the CredentialIssuerConfig, located via the provided
// credentialIssuerConfigLocationConfig, with any errors that it encounters.
func NewAnnotaterController(
agentPodConfig *AgentPodConfig,
credentialIssuerConfigLocationConfig *CredentialIssuerConfigLocationConfig,
clock clock.Clock,
k8sClient kubernetes.Interface,
pinnipedAPIClient pinnipedclientset.Interface,
kubeSystemPodInformer corev1informers.PodInformer,
agentPodInformer corev1informers.PodInformer,
withInformer pinnipedcontroller.WithInformerOptionFunc,
) controllerlib.Controller {
return controllerlib.New(
controllerlib.Config{
Name: "kube-cert-agent-annotater-controller",
Syncer: &annotaterController{
agentPodConfig: agentPodConfig,
credentialIssuerConfigLocationConfig: credentialIssuerConfigLocationConfig,
clock: clock,
k8sClient: k8sClient,
pinnipedAPIClient: pinnipedAPIClient,
kubeSystemPodInformer: kubeSystemPodInformer,
agentPodInformer: agentPodInformer,
},
},
withInformer(
kubeSystemPodInformer,
pinnipedcontroller.SimpleFilter(isControllerManagerPod),
controllerlib.InformerOption{},
),
withInformer(
agentPodInformer,
pinnipedcontroller.SimpleFilter(isAgentPod),
controllerlib.InformerOption{},
),
)
}
// Sync implements controllerlib.Syncer.
func (c *annotaterController) Sync(ctx controllerlib.Context) error {
agentSelector := labels.SelectorFromSet(c.agentPodConfig.Labels())
agentPods, err := c.agentPodInformer.
Lister().
Pods(c.agentPodConfig.Namespace).
List(agentSelector)
if err != nil {
return fmt.Errorf("informer cannot list agent pods: %w", err)
}
for _, agentPod := range agentPods {
controllerManagerPod, err := findControllerManagerPodForSpecificAgentPod(agentPod, c.kubeSystemPodInformer)
if err != nil {
return err
}
if controllerManagerPod == nil {
// The deleter will clean this orphaned agent.
continue
}
certPath := getContainerArgByName(
controllerManagerPod,
"cluster-signing-cert-file",
k8sAPIServerCACertPEMDefaultPath,
)
keyPath := getContainerArgByName(
controllerManagerPod,
"cluster-signing-key-file",
k8sAPIServerCAKeyPEMDefaultPath,
)
if err := c.maybeUpdateAgentPod(
ctx.Context,
agentPod.Name,
agentPod.Namespace,
certPath,
keyPath,
); err != nil {
err = fmt.Errorf("cannot update agent pod: %w", err)
strategyResultUpdateErr := createOrUpdateCredentialIssuerConfig(
ctx.Context,
*c.credentialIssuerConfigLocationConfig,
c.clock,
c.pinnipedAPIClient,
err,
)
if strategyResultUpdateErr != nil {
// If the CIC update fails, then we probably want to try again. This controller will get
// called again because of the pod create failure, so just try the CIC update again then.
klog.ErrorS(strategyResultUpdateErr, "could not create or update CredentialIssuerConfig")
}
return err
}
}
return nil
}
func (c *annotaterController) maybeUpdateAgentPod(
ctx context.Context,
name string,
namespace string,
certPath string,
keyPath string,
) error {
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
agentPod, err := c.k8sClient.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return err
}
if agentPod.Annotations[agentPodCertPathAnnotationKey] != certPath ||
agentPod.Annotations[agentPodKeyPathAnnotationKey] != keyPath {
if err := c.reallyUpdateAgentPod(
ctx,
agentPod,
certPath,
keyPath,
); err != nil {
return err
}
}
return nil
})
}
func (c *annotaterController) reallyUpdateAgentPod(
ctx context.Context,
agentPod *corev1.Pod,
certPath string,
keyPath string,
) error {
// Create a deep copy of the agent pod since it is coming straight from the cache.
updatedAgentPod := agentPod.DeepCopy()
if updatedAgentPod.Annotations == nil {
updatedAgentPod.Annotations = make(map[string]string)
}
updatedAgentPod.Annotations[agentPodCertPathAnnotationKey] = certPath
updatedAgentPod.Annotations[agentPodKeyPathAnnotationKey] = keyPath
klog.InfoS(
"updating agent pod annotations",
"pod",
klog.KObj(updatedAgentPod),
"certPath",
certPath,
"keyPath",
keyPath,
)
_, err := c.k8sClient.
CoreV1().
Pods(agentPod.Namespace).
Update(ctx, updatedAgentPod, metav1.UpdateOptions{})
return err
}
func getContainerArgByName(pod *corev1.Pod, name, fallbackValue string) string {
for _, container := range pod.Spec.Containers {
flagset := pflag.NewFlagSet("", pflag.ContinueOnError)
flagset.ParseErrorsWhitelist = pflag.ParseErrorsWhitelist{UnknownFlags: true}
var val string
flagset.StringVar(&val, name, "", "")
_ = flagset.Parse(append(container.Command, container.Args...))
if val != "" {
return val
}
}
return fallbackValue
}

View File

@ -0,0 +1,672 @@
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package kubecertagent
import (
"context"
"errors"
"testing"
"time"
"github.com/sclevine/spec"
"github.com/sclevine/spec/report"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/clock"
kubeinformers "k8s.io/client-go/informers"
corev1informers "k8s.io/client-go/informers/core/v1"
kubernetesfake "k8s.io/client-go/kubernetes/fake"
coretesting "k8s.io/client-go/testing"
configv1alpha1 "go.pinniped.dev/generated/1.19/apis/config/v1alpha1"
pinnipedfake "go.pinniped.dev/generated/1.19/client/clientset/versioned/fake"
"go.pinniped.dev/internal/controllerlib"
"go.pinniped.dev/internal/testutil"
)
func TestAnnotaterControllerFilter(t *testing.T) {
defineSharedKubecertagentFilterSpecs(
t,
"AnnotaterControllerFilter",
func(
agentPodConfig *AgentPodConfig,
_ *CredentialIssuerConfigLocationConfig,
kubeSystemPodInformer corev1informers.PodInformer,
agentPodInformer corev1informers.PodInformer,
observableWithInformerOption *testutil.ObservableWithInformerOption,
) {
_ = NewAnnotaterController(
agentPodConfig,
nil, // credentialIssuerConfigLocationConfig, shouldn't matter
nil, // clock, shouldn't matter
nil, // k8sClient, shouldn't matter
nil, // pinnipedClient, shouldn't matter
kubeSystemPodInformer,
agentPodInformer,
observableWithInformerOption.WithInformer,
)
},
)
}
func TestAnnotaterControllerSync(t *testing.T) {
spec.Run(t, "AnnotaterControllerSync", func(t *testing.T, when spec.G, it spec.S) {
const kubeSystemNamespace = "kube-system"
const agentPodNamespace = "agent-pod-namespace"
const defaultKubeControllerManagerClusterSigningCertFileFlagValue = "/etc/kubernetes/ca/ca.pem"
const defaultKubeControllerManagerClusterSigningKeyFileFlagValue = "/etc/kubernetes/ca/ca.key"
const credentialIssuerConfigNamespaceName = "cic-namespace-name"
const credentialIssuerConfigResourceName = "cic-resource-name"
const (
certPath = "some-cert-path"
certPathAnnotation = "kube-cert-agent.pinniped.dev/cert-path"
keyPath = "some-key-path"
keyPathAnnotation = "kube-cert-agent.pinniped.dev/key-path"
)
var r *require.Assertions
var subject controllerlib.Controller
var kubeAPIClient *kubernetesfake.Clientset
var kubeSystemInformerClient *kubernetesfake.Clientset
var kubeSystemInformers kubeinformers.SharedInformerFactory
var agentInformerClient *kubernetesfake.Clientset
var agentInformers kubeinformers.SharedInformerFactory
var pinnipedAPIClient *pinnipedfake.Clientset
var timeoutContext context.Context
var timeoutContextCancel context.CancelFunc
var syncContext *controllerlib.Context
var controllerManagerPod, agentPod *corev1.Pod
var podsGVR schema.GroupVersionResource
var credentialIssuerConfigGVR schema.GroupVersionResource
var frozenNow time.Time
// Defer starting the informers until the last possible moment so that the
// nested Before's can keep adding things to the informer caches.
var startInformersAndController = func() {
// Set this at the last second to allow for injection of server override.
subject = NewAnnotaterController(
&AgentPodConfig{
Namespace: agentPodNamespace,
ContainerImage: "some-agent-image",
PodNamePrefix: "some-agent-name-",
},
&CredentialIssuerConfigLocationConfig{
Namespace: credentialIssuerConfigNamespaceName,
Name: credentialIssuerConfigResourceName,
},
clock.NewFakeClock(frozenNow),
kubeAPIClient,
pinnipedAPIClient,
kubeSystemInformers.Core().V1().Pods(),
agentInformers.Core().V1().Pods(),
controllerlib.WithInformer,
)
// Set this at the last second to support calling subject.Name().
syncContext = &controllerlib.Context{
Context: timeoutContext,
Name: subject.Name(),
Key: controllerlib.Key{
Namespace: kubeSystemNamespace,
Name: "should-not-matter",
},
}
// Must start informers before calling TestRunSynchronously()
kubeSystemInformers.Start(timeoutContext.Done())
agentInformers.Start(timeoutContext.Done())
controllerlib.TestRunSynchronously(t, subject)
}
it.Before(func() {
r = require.New(t)
kubeAPIClient = kubernetesfake.NewSimpleClientset()
kubeSystemInformerClient = kubernetesfake.NewSimpleClientset()
kubeSystemInformers = kubeinformers.NewSharedInformerFactory(kubeSystemInformerClient, 0)
agentInformerClient = kubernetesfake.NewSimpleClientset()
agentInformers = kubeinformers.NewSharedInformerFactory(agentInformerClient, 0)
pinnipedAPIClient = pinnipedfake.NewSimpleClientset()
timeoutContext, timeoutContextCancel = context.WithTimeout(context.Background(), time.Second*3)
controllerManagerPod, agentPod = exampleControllerManagerAndAgentPods(
kubeSystemNamespace, agentPodNamespace, certPath, keyPath,
)
podsGVR = schema.GroupVersionResource{
Group: corev1.SchemeGroupVersion.Group,
Version: corev1.SchemeGroupVersion.Version,
Resource: "pods",
}
credentialIssuerConfigGVR = schema.GroupVersionResource{
Group: configv1alpha1.GroupName,
Version: configv1alpha1.SchemeGroupVersion.Version,
Resource: "credentialissuerconfigs",
}
frozenNow = time.Date(2020, time.September, 23, 7, 42, 0, 0, time.Local)
// Add a pod into the test that doesn't matter to make sure we don't accidentally trigger any
// logic on this thing.
ignorablePod := corev1.Pod{}
ignorablePod.Name = "some-ignorable-pod"
r.NoError(kubeSystemInformerClient.Tracker().Add(&ignorablePod))
r.NoError(agentInformerClient.Tracker().Add(&ignorablePod))
r.NoError(kubeAPIClient.Tracker().Add(&ignorablePod))
})
it.After(func() {
timeoutContextCancel()
})
when("there is an agent pod without annotations set", func() {
it.Before(func() {
r.NoError(agentInformerClient.Tracker().Add(agentPod))
r.NoError(kubeAPIClient.Tracker().Add(agentPod))
})
when("there is a matching controller manager pod", func() {
it.Before(func() {
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
})
it("updates the annotations according to the controller manager pod", func() {
startInformersAndController()
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
updatedAgentPod := agentPod.DeepCopy()
updatedAgentPod.Annotations[certPathAnnotation] = certPath
updatedAgentPod.Annotations[keyPathAnnotation] = keyPath
r.Equal(
[]coretesting.Action{
coretesting.NewGetAction(
podsGVR,
agentPodNamespace,
updatedAgentPod.Name,
),
coretesting.NewUpdateAction(
podsGVR,
agentPodNamespace,
updatedAgentPod,
),
},
kubeAPIClient.Actions(),
)
})
when("updating the agent pod fails", func() {
it.Before(func() {
kubeAPIClient.PrependReactor(
"update",
"pods",
func(_ coretesting.Action) (bool, runtime.Object, error) {
return true, nil, errors.New("some update error")
},
)
})
it("returns the error", func() {
startInformersAndController()
err := controllerlib.TestSync(t, subject, *syncContext)
r.EqualError(err, "cannot update agent pod: some update error")
})
when("there is already a CredentialIssuerConfig", func() {
var initialCredentialIssuerConfig *configv1alpha1.CredentialIssuerConfig
it.Before(func() {
initialCredentialIssuerConfig = &configv1alpha1.CredentialIssuerConfig{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
Name: credentialIssuerConfigResourceName,
Namespace: credentialIssuerConfigNamespaceName,
},
Status: configv1alpha1.CredentialIssuerConfigStatus{
Strategies: []configv1alpha1.CredentialIssuerConfigStrategy{},
KubeConfigInfo: &configv1alpha1.CredentialIssuerConfigKubeConfigInfo{
Server: "some-server",
CertificateAuthorityData: "some-ca-value",
},
},
}
r.NoError(pinnipedAPIClient.Tracker().Add(initialCredentialIssuerConfig))
})
it("updates the CredentialIssuerConfig status with the error", func() {
startInformersAndController()
err := controllerlib.TestSync(t, subject, *syncContext)
expectedCredentialIssuerConfig := initialCredentialIssuerConfig.DeepCopy()
expectedCredentialIssuerConfig.Status.Strategies = []configv1alpha1.CredentialIssuerConfigStrategy{
{
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
Status: configv1alpha1.ErrorStrategyStatus,
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
Message: "cannot update agent pod: some update error",
LastUpdateTime: metav1.NewTime(frozenNow),
},
}
expectedGetAction := coretesting.NewGetAction(
credentialIssuerConfigGVR,
credentialIssuerConfigNamespaceName,
credentialIssuerConfigResourceName,
)
expectedUpdateAction := coretesting.NewUpdateAction(
credentialIssuerConfigGVR,
credentialIssuerConfigNamespaceName,
expectedCredentialIssuerConfig,
)
r.EqualError(err, "cannot update agent pod: some update error")
r.Equal(
[]coretesting.Action{
expectedGetAction,
expectedUpdateAction,
},
pinnipedAPIClient.Actions(),
)
})
when("updating the CredentialIssuerConfig fails", func() {
it.Before(func() {
pinnipedAPIClient.PrependReactor(
"update",
"credentialissuerconfigs",
func(_ coretesting.Action) (bool, runtime.Object, error) {
return true, nil, errors.New("some update error")
},
)
})
it("returns the original pod update error so the controller gets scheduled again", func() {
startInformersAndController()
err := controllerlib.TestSync(t, subject, *syncContext)
r.EqualError(err, "cannot update agent pod: some update error")
})
})
})
when("there is not already a CredentialIssuerConfig", func() {
it("creates the CredentialIssuerConfig status with the error", func() {
startInformersAndController()
err := controllerlib.TestSync(t, subject, *syncContext)
expectedCredentialIssuerConfig := &configv1alpha1.CredentialIssuerConfig{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
Name: credentialIssuerConfigResourceName,
Namespace: credentialIssuerConfigNamespaceName,
},
Status: configv1alpha1.CredentialIssuerConfigStatus{
Strategies: []configv1alpha1.CredentialIssuerConfigStrategy{
{
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
Status: configv1alpha1.ErrorStrategyStatus,
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
Message: "cannot update agent pod: some update error",
LastUpdateTime: metav1.NewTime(frozenNow),
},
},
},
}
expectedGetAction := coretesting.NewGetAction(
credentialIssuerConfigGVR,
credentialIssuerConfigNamespaceName,
credentialIssuerConfigResourceName,
)
expectedCreateAction := coretesting.NewCreateAction(
credentialIssuerConfigGVR,
credentialIssuerConfigNamespaceName,
expectedCredentialIssuerConfig,
)
r.EqualError(err, "cannot update agent pod: some update error")
r.Equal(
[]coretesting.Action{
expectedGetAction,
expectedCreateAction,
},
pinnipedAPIClient.Actions(),
)
})
})
})
})
when("there is a controller manager pod with CLI flag values separated by spaces", func() {
it.Before(func() {
controllerManagerPod.Spec.Containers[0].Command = []string{
"kube-controller-manager",
"--cluster-signing-cert-file", certPath,
"--cluster-signing-key-file", keyPath,
}
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
})
it("updates the annotations according to the controller manager pod", func() {
startInformersAndController()
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
updatedAgentPod := agentPod.DeepCopy()
updatedAgentPod.Annotations[certPathAnnotation] = certPath
updatedAgentPod.Annotations[keyPathAnnotation] = keyPath
r.Equal(
[]coretesting.Action{
coretesting.NewGetAction(
podsGVR,
agentPodNamespace,
updatedAgentPod.Name,
),
coretesting.NewUpdateAction(
podsGVR,
agentPodNamespace,
updatedAgentPod,
),
},
kubeAPIClient.Actions(),
)
})
})
when("there is a controller manager pod with no CLI flags", func() {
it.Before(func() {
controllerManagerPod.Spec.Containers[0].Command = []string{
"kube-controller-manager",
}
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
})
it("updates the annotations with the default values", func() {
startInformersAndController()
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
updatedAgentPod := agentPod.DeepCopy()
updatedAgentPod.Annotations[certPathAnnotation] = defaultKubeControllerManagerClusterSigningCertFileFlagValue
updatedAgentPod.Annotations[keyPathAnnotation] = defaultKubeControllerManagerClusterSigningKeyFileFlagValue
r.Equal(
[]coretesting.Action{
coretesting.NewGetAction(
podsGVR,
agentPodNamespace,
updatedAgentPod.Name,
),
coretesting.NewUpdateAction(
podsGVR,
agentPodNamespace,
updatedAgentPod,
),
},
kubeAPIClient.Actions(),
)
})
})
when("there is a controller manager pod with unparsable CLI flags", func() {
it.Before(func() {
controllerManagerPod.Spec.Containers[0].Command = []string{
"kube-controller-manager",
"--cluster-signing-cert-file-blah", certPath,
"--cluster-signing-key-file-blah", keyPath,
}
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
})
it("updates the annotations with the default values", func() {
startInformersAndController()
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
updatedAgentPod := agentPod.DeepCopy()
updatedAgentPod.Annotations[certPathAnnotation] = defaultKubeControllerManagerClusterSigningCertFileFlagValue
updatedAgentPod.Annotations[keyPathAnnotation] = defaultKubeControllerManagerClusterSigningKeyFileFlagValue
r.Equal(
[]coretesting.Action{
coretesting.NewGetAction(
podsGVR,
agentPodNamespace,
updatedAgentPod.Name,
),
coretesting.NewUpdateAction(
podsGVR,
agentPodNamespace,
updatedAgentPod,
),
},
kubeAPIClient.Actions(),
)
})
})
when("there is a controller manager pod with unparsable cert CLI flag", func() {
it.Before(func() {
controllerManagerPod.Spec.Containers[0].Command = []string{
"kube-controller-manager",
"--cluster-signing-cert-file-blah", certPath,
"--cluster-signing-key-file", keyPath,
}
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
})
it("updates the key annotation with the default cert flag value", func() {
startInformersAndController()
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
updatedAgentPod := agentPod.DeepCopy()
updatedAgentPod.Annotations[certPathAnnotation] = defaultKubeControllerManagerClusterSigningCertFileFlagValue
updatedAgentPod.Annotations[keyPathAnnotation] = keyPath
r.Equal(
[]coretesting.Action{
coretesting.NewGetAction(
podsGVR,
agentPodNamespace,
updatedAgentPod.Name,
),
coretesting.NewUpdateAction(
podsGVR,
agentPodNamespace,
updatedAgentPod,
),
},
kubeAPIClient.Actions(),
)
})
})
when("there is a controller manager pod with unparsable key CLI flag", func() {
it.Before(func() {
controllerManagerPod.Spec.Containers[0].Command = []string{
"kube-controller-manager",
"--cluster-signing-cert-file", certPath,
"--cluster-signing-key-file-blah", keyPath,
}
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
})
it("updates the cert annotation with the default key flag value", func() {
startInformersAndController()
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
updatedAgentPod := agentPod.DeepCopy()
updatedAgentPod.Annotations[certPathAnnotation] = certPath
updatedAgentPod.Annotations[keyPathAnnotation] = defaultKubeControllerManagerClusterSigningKeyFileFlagValue
r.Equal(
[]coretesting.Action{
coretesting.NewGetAction(
podsGVR,
agentPodNamespace,
updatedAgentPod.Name,
),
coretesting.NewUpdateAction(
podsGVR,
agentPodNamespace,
updatedAgentPod,
),
},
kubeAPIClient.Actions(),
)
})
})
when("there is a non-matching controller manager pod via uid", func() {
it.Before(func() {
controllerManagerPod.UID = "some-other-controller-manager-uid"
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
})
it("does nothing; the deleter will delete this pod to trigger resync", func() {
startInformersAndController()
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
r.Equal(
[]coretesting.Action{},
kubeAPIClient.Actions(),
)
})
})
when("there is a non-matching controller manager pod via name", func() {
it.Before(func() {
controllerManagerPod.Name = "some-other-controller-manager-name"
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
})
it("does nothing; the deleter will delete this pod to trigger resync", func() {
startInformersAndController()
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
r.Equal(
[]coretesting.Action{},
kubeAPIClient.Actions(),
)
})
})
})
when("there is an agent pod with correct annotations set", func() {
it.Before(func() {
agentPod.Annotations = make(map[string]string)
agentPod.Annotations[certPathAnnotation] = certPath
agentPod.Annotations[keyPathAnnotation] = keyPath
r.NoError(agentInformerClient.Tracker().Add(agentPod))
r.NoError(kubeAPIClient.Tracker().Add(agentPod))
})
when("there is a matching controller manager pod", func() {
it.Before(func() {
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
})
it("does nothing since the pod is up to date", func() {
startInformersAndController()
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
r.Equal(
[]coretesting.Action{},
kubeAPIClient.Actions(),
)
})
})
})
when("there is an agent pod with the wrong cert annotation", func() {
it.Before(func() {
agentPod.Annotations[certPathAnnotation] = "wrong"
agentPod.Annotations[keyPathAnnotation] = keyPath
r.NoError(agentInformerClient.Tracker().Add(agentPod))
r.NoError(kubeAPIClient.Tracker().Add(agentPod))
})
when("there is a matching controller manager pod", func() {
it.Before(func() {
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
})
it("updates the agent with the correct cert annotation", func() {
startInformersAndController()
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
updatedAgentPod := agentPod.DeepCopy()
updatedAgentPod.Annotations[certPathAnnotation] = certPath
r.Equal(
[]coretesting.Action{
coretesting.NewGetAction(
podsGVR,
agentPodNamespace,
updatedAgentPod.Name,
),
coretesting.NewUpdateAction(
podsGVR,
agentPodNamespace,
updatedAgentPod,
),
},
kubeAPIClient.Actions(),
)
})
})
})
when("there is an agent pod with the wrong key annotation", func() {
it.Before(func() {
agentPod.Annotations[certPathAnnotation] = certPath
agentPod.Annotations[keyPathAnnotation] = "key"
r.NoError(agentInformerClient.Tracker().Add(agentPod))
r.NoError(kubeAPIClient.Tracker().Add(agentPod))
})
when("there is a matching controller manager pod", func() {
it.Before(func() {
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
})
it("updates the agent with the correct key annotation", func() {
startInformersAndController()
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
updatedAgentPod := agentPod.DeepCopy()
updatedAgentPod.Annotations[keyPathAnnotation] = keyPath
r.Equal(
[]coretesting.Action{
coretesting.NewGetAction(
podsGVR,
agentPodNamespace,
updatedAgentPod.Name,
),
coretesting.NewUpdateAction(
podsGVR,
agentPodNamespace,
updatedAgentPod,
),
},
kubeAPIClient.Actions(),
)
})
})
})
}, spec.Parallel(), spec.Report(report.Terminal{}))
}

View File

@ -0,0 +1,176 @@
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package kubecertagent
import (
"fmt"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/clock"
corev1informers "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
pinnipedclientset "go.pinniped.dev/generated/1.19/client/clientset/versioned"
"go.pinniped.dev/internal/constable"
pinnipedcontroller "go.pinniped.dev/internal/controller"
"go.pinniped.dev/internal/controllerlib"
)
type createrController struct {
agentPodConfig *AgentPodConfig
credentialIssuerConfigLocationConfig *CredentialIssuerConfigLocationConfig
clock clock.Clock
k8sClient kubernetes.Interface
pinnipedAPIClient pinnipedclientset.Interface
kubeSystemPodInformer corev1informers.PodInformer
agentPodInformer corev1informers.PodInformer
}
// NewCreaterController returns a controller that creates new kube-cert-agent pods for every known
// kube-controller-manager pod.
//
// It also is tasked with updating the CredentialIssuerConfig, located via the provided
// credentialIssuerConfigLocationConfig, with any errors that it encounters.
func NewCreaterController(
agentPodConfig *AgentPodConfig,
credentialIssuerConfigLocationConfig *CredentialIssuerConfigLocationConfig,
clock clock.Clock,
k8sClient kubernetes.Interface,
pinnipedAPIClient pinnipedclientset.Interface,
kubeSystemPodInformer corev1informers.PodInformer,
agentPodInformer corev1informers.PodInformer,
withInformer pinnipedcontroller.WithInformerOptionFunc,
) controllerlib.Controller {
return controllerlib.New(
controllerlib.Config{
//nolint: misspell
Name: "kube-cert-agent-creater-controller",
Syncer: &createrController{
agentPodConfig: agentPodConfig,
credentialIssuerConfigLocationConfig: credentialIssuerConfigLocationConfig,
clock: clock,
k8sClient: k8sClient,
pinnipedAPIClient: pinnipedAPIClient,
kubeSystemPodInformer: kubeSystemPodInformer,
agentPodInformer: agentPodInformer,
},
},
withInformer(
kubeSystemPodInformer,
pinnipedcontroller.SimpleFilter(isControllerManagerPod),
controllerlib.InformerOption{},
),
withInformer(
agentPodInformer,
pinnipedcontroller.SimpleFilter(isAgentPod),
controllerlib.InformerOption{},
),
)
}
// Sync implements controllerlib.Syncer.
func (c *createrController) Sync(ctx controllerlib.Context) error {
controllerManagerSelector, err := labels.Parse("component=kube-controller-manager")
if err != nil {
return fmt.Errorf("cannot create controller manager selector: %w", err)
}
controllerManagerPods, err := c.kubeSystemPodInformer.Lister().List(controllerManagerSelector)
if err != nil {
return fmt.Errorf("informer cannot list controller manager pods: %w", err)
}
if len(controllerManagerPods) == 0 {
// If there are no controller manager pods, we alert the user that we can't find the keypair via
// the CredentialIssuerConfig.
return createOrUpdateCredentialIssuerConfig(
ctx.Context,
*c.credentialIssuerConfigLocationConfig,
c.clock,
c.pinnipedAPIClient,
constable.Error("Controller manager pod(s) could not be found"),
)
}
for _, controllerManagerPod := range controllerManagerPods {
agentPod, err := findAgentPodForSpecificControllerManagerPod(
controllerManagerPod,
c.kubeSystemPodInformer,
c.agentPodInformer,
c.agentPodConfig.Labels(),
)
if err != nil {
return err
}
if agentPod == nil {
agentPod = newAgentPod(controllerManagerPod, c.agentPodConfig.PodTemplate())
klog.InfoS(
"creating agent pod",
"pod",
klog.KObj(agentPod),
"controller",
klog.KObj(controllerManagerPod),
)
_, err := c.k8sClient.CoreV1().
Pods(c.agentPodConfig.Namespace).
Create(ctx.Context, agentPod, metav1.CreateOptions{})
if err != nil {
err = fmt.Errorf("cannot create agent pod: %w", err)
strategyResultUpdateErr := createOrUpdateCredentialIssuerConfig(
ctx.Context,
*c.credentialIssuerConfigLocationConfig,
c.clock,
c.pinnipedAPIClient,
err,
)
if strategyResultUpdateErr != nil {
// If the CIC update fails, then we probably want to try again. This controller will get
// called again because of the pod create failure, so just try the CIC update again then.
klog.ErrorS(strategyResultUpdateErr, "could not create or update CredentialIssuerConfig")
}
return err
}
}
// The deleter controller handles the case where the expected fields do not match in the agent pod.
}
return nil
}
func findAgentPodForSpecificControllerManagerPod(
controllerManagerPod *corev1.Pod,
kubeSystemPodInformer corev1informers.PodInformer,
agentPodInformer corev1informers.PodInformer,
agentLabels map[string]string,
) (*corev1.Pod, error) {
agentSelector := labels.SelectorFromSet(agentLabels)
agentPods, err := agentPodInformer.
Lister().
List(agentSelector)
if err != nil {
return nil, fmt.Errorf("informer cannot list agent pods: %w", err)
}
for _, maybeAgentPod := range agentPods {
maybeControllerManagerPod, err := findControllerManagerPodForSpecificAgentPod(
maybeAgentPod,
kubeSystemPodInformer,
)
if err != nil {
return nil, err
}
if maybeControllerManagerPod != nil &&
maybeControllerManagerPod.UID == controllerManagerPod.UID {
return maybeAgentPod, nil
}
}
return nil, nil
}

View File

@ -0,0 +1,551 @@
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package kubecertagent
import (
"context"
"errors"
"testing"
"time"
"github.com/sclevine/spec"
"github.com/sclevine/spec/report"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/clock"
kubeinformers "k8s.io/client-go/informers"
corev1informers "k8s.io/client-go/informers/core/v1"
kubernetesfake "k8s.io/client-go/kubernetes/fake"
coretesting "k8s.io/client-go/testing"
configv1alpha1 "go.pinniped.dev/generated/1.19/apis/config/v1alpha1"
pinnipedfake "go.pinniped.dev/generated/1.19/client/clientset/versioned/fake"
"go.pinniped.dev/internal/controllerlib"
"go.pinniped.dev/internal/testutil"
)
func TestCreaterControllerFilter(t *testing.T) {
defineSharedKubecertagentFilterSpecs(
t,
"CreaterControllerFilter",
func(
agentPodConfig *AgentPodConfig,
credentialIssuerConfigLocationConfig *CredentialIssuerConfigLocationConfig,
kubeSystemPodInformer corev1informers.PodInformer,
agentPodInformer corev1informers.PodInformer,
observableWithInformerOption *testutil.ObservableWithInformerOption,
) {
_ = NewCreaterController(
agentPodConfig,
credentialIssuerConfigLocationConfig,
nil, // clock, shound't matter
nil, // k8sClient, shouldn't matter
nil, // pinnipedAPIClient, shouldn't matter
kubeSystemPodInformer,
agentPodInformer,
observableWithInformerOption.WithInformer,
)
},
)
}
func TestCreaterControllerSync(t *testing.T) {
spec.Run(t, "CreaterControllerSync", func(t *testing.T, when spec.G, it spec.S) {
const kubeSystemNamespace = "kube-system"
const agentPodNamespace = "agent-pod-namespace"
const credentialIssuerConfigNamespaceName = "cic-namespace-name"
const credentialIssuerConfigResourceName = "cic-resource-name"
var r *require.Assertions
var subject controllerlib.Controller
var kubeAPIClient *kubernetesfake.Clientset
var kubeSystemInformerClient *kubernetesfake.Clientset
var kubeSystemInformers kubeinformers.SharedInformerFactory
var agentInformerClient *kubernetesfake.Clientset
var agentInformers kubeinformers.SharedInformerFactory
var pinnipedAPIClient *pinnipedfake.Clientset
var timeoutContext context.Context
var timeoutContextCancel context.CancelFunc
var syncContext *controllerlib.Context
var controllerManagerPod, agentPod *corev1.Pod
var podsGVR schema.GroupVersionResource
var credentialIssuerConfigGVR schema.GroupVersionResource
var frozenNow time.Time
// Defer starting the informers until the last possible moment so that the
// nested Before's can keep adding things to the informer caches.
var startInformersAndController = func() {
// Set this at the last second to allow for injection of server override.
subject = NewCreaterController(
&AgentPodConfig{
Namespace: agentPodNamespace,
ContainerImage: "some-agent-image",
PodNamePrefix: "some-agent-name-",
},
&CredentialIssuerConfigLocationConfig{
Namespace: credentialIssuerConfigNamespaceName,
Name: credentialIssuerConfigResourceName,
},
clock.NewFakeClock(frozenNow),
kubeAPIClient,
pinnipedAPIClient,
kubeSystemInformers.Core().V1().Pods(),
agentInformers.Core().V1().Pods(),
controllerlib.WithInformer,
)
// Set this at the last second to support calling subject.Name().
syncContext = &controllerlib.Context{
Context: timeoutContext,
Name: subject.Name(),
Key: controllerlib.Key{
Namespace: kubeSystemNamespace,
Name: "should-not-matter",
},
}
// Must start informers before calling TestRunSynchronously()
kubeSystemInformers.Start(timeoutContext.Done())
agentInformers.Start(timeoutContext.Done())
controllerlib.TestRunSynchronously(t, subject)
}
it.Before(func() {
r = require.New(t)
kubeAPIClient = kubernetesfake.NewSimpleClientset()
kubeSystemInformerClient = kubernetesfake.NewSimpleClientset()
kubeSystemInformers = kubeinformers.NewSharedInformerFactory(kubeSystemInformerClient, 0)
agentInformerClient = kubernetesfake.NewSimpleClientset()
agentInformers = kubeinformers.NewSharedInformerFactory(agentInformerClient, 0)
pinnipedAPIClient = pinnipedfake.NewSimpleClientset()
timeoutContext, timeoutContextCancel = context.WithTimeout(context.Background(), time.Second*3)
controllerManagerPod, agentPod = exampleControllerManagerAndAgentPods(
kubeSystemNamespace, agentPodNamespace, "ignored for this test", "ignored for this test",
)
podsGVR = schema.GroupVersionResource{
Group: corev1.SchemeGroupVersion.Group,
Version: corev1.SchemeGroupVersion.Version,
Resource: "pods",
}
credentialIssuerConfigGVR = schema.GroupVersionResource{
Group: configv1alpha1.GroupName,
Version: configv1alpha1.SchemeGroupVersion.Version,
Resource: "credentialissuerconfigs",
}
frozenNow = time.Date(2020, time.September, 23, 7, 42, 0, 0, time.Local)
// Add a pod into the test that doesn't matter to make sure we don't accidentally trigger any
// logic on this thing.
ignorablePod := corev1.Pod{}
ignorablePod.Name = "some-ignorable-pod"
r.NoError(kubeSystemInformerClient.Tracker().Add(&ignorablePod))
r.NoError(kubeAPIClient.Tracker().Add(&ignorablePod))
// Add another valid agent pod to make sure our logic works for just the pod we care about.
otherAgentPod := agentPod.DeepCopy()
otherAgentPod.Name = "some-other-agent"
otherAgentPod.Annotations = map[string]string{
"kube-cert-agent.pinniped.dev/controller-manager-name": "some-other-controller-manager-name",
"kube-cert-agent.pinniped.dev/controller-manager-uid": "some-other-controller-manager-uid",
}
r.NoError(agentInformerClient.Tracker().Add(otherAgentPod))
r.NoError(kubeAPIClient.Tracker().Add(otherAgentPod))
})
it.After(func() {
timeoutContextCancel()
})
when("there is a controller manager pod", func() {
it.Before(func() {
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
})
when("there is a matching agent pod", func() {
it.Before(func() {
r.NoError(agentInformerClient.Tracker().Add(agentPod))
r.NoError(kubeAPIClient.Tracker().Add(agentPod))
})
it("does nothing", func() {
startInformersAndController()
err := controllerlib.TestSync(t, subject, *syncContext)
r.NoError(err)
r.Equal(
[]coretesting.Action{},
kubeAPIClient.Actions(),
)
})
})
when("there is a non-matching agent pod", func() {
it.Before(func() {
nonMatchingAgentPod := agentPod.DeepCopy()
nonMatchingAgentPod.Name = "some-agent-name-85da432e"
nonMatchingAgentPod.Annotations[controllerManagerUIDAnnotationKey] = "some-non-matching-uid"
r.NoError(agentInformerClient.Tracker().Add(nonMatchingAgentPod))
r.NoError(kubeAPIClient.Tracker().Add(nonMatchingAgentPod))
})
it("creates a matching agent pod", func() {
startInformersAndController()
err := controllerlib.TestSync(t, subject, *syncContext)
r.NoError(err)
r.Equal(
[]coretesting.Action{
coretesting.NewCreateAction(
podsGVR,
agentPodNamespace,
agentPod,
),
},
kubeAPIClient.Actions(),
)
})
})
when("there is no matching agent pod", func() {
it("creates a matching agent pod", func() {
startInformersAndController()
err := controllerlib.TestSync(t, subject, *syncContext)
r.NoError(err)
r.Equal(
[]coretesting.Action{
coretesting.NewCreateAction(
podsGVR,
agentPodNamespace,
agentPod,
),
},
kubeAPIClient.Actions(),
)
})
when("creating the matching agent pod fails", func() {
it.Before(func() {
kubeAPIClient.PrependReactor(
"create",
"pods",
func(_ coretesting.Action) (bool, runtime.Object, error) {
return true, nil, errors.New("some create error")
},
)
})
when("there is already a CredentialIssuerConfig", func() {
var initialCredentialIssuerConfig *configv1alpha1.CredentialIssuerConfig
it.Before(func() {
initialCredentialIssuerConfig = &configv1alpha1.CredentialIssuerConfig{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
Name: credentialIssuerConfigResourceName,
Namespace: credentialIssuerConfigNamespaceName,
},
Status: configv1alpha1.CredentialIssuerConfigStatus{
Strategies: []configv1alpha1.CredentialIssuerConfigStrategy{},
KubeConfigInfo: &configv1alpha1.CredentialIssuerConfigKubeConfigInfo{
Server: "some-server",
CertificateAuthorityData: "some-ca-value",
},
},
}
r.NoError(pinnipedAPIClient.Tracker().Add(initialCredentialIssuerConfig))
})
it("updates the CredentialIssuerConfig status saying that controller manager pods couldn't be found", func() {
startInformersAndController()
err := controllerlib.TestSync(t, subject, *syncContext)
expectedCredentialIssuerConfig := initialCredentialIssuerConfig.DeepCopy()
expectedCredentialIssuerConfig.Status.Strategies = []configv1alpha1.CredentialIssuerConfigStrategy{
{
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
Status: configv1alpha1.ErrorStrategyStatus,
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
Message: "cannot create agent pod: some create error",
LastUpdateTime: metav1.NewTime(frozenNow),
},
}
expectedGetAction := coretesting.NewGetAction(
credentialIssuerConfigGVR,
credentialIssuerConfigNamespaceName,
credentialIssuerConfigResourceName,
)
expectedUpdateAction := coretesting.NewUpdateAction(
credentialIssuerConfigGVR,
credentialIssuerConfigNamespaceName,
expectedCredentialIssuerConfig,
)
r.EqualError(err, "cannot create agent pod: some create error")
r.Equal(
[]coretesting.Action{
expectedGetAction,
expectedUpdateAction,
},
pinnipedAPIClient.Actions(),
)
})
when("the CredentialIssuerConfig operation fails", func() {
it.Before(func() {
pinnipedAPIClient.PrependReactor(
"update",
"credentialissuerconfigs",
func(_ coretesting.Action) (bool, runtime.Object, error) {
return true, nil, errors.New("some update error")
},
)
it("still returns the pod create error, since the controller will get rescheduled", func() {
startInformersAndController()
err := controllerlib.TestSync(t, subject, *syncContext)
r.EqualError(err, "cannot create agent pod: some create error")
})
})
})
})
when("there is not already a CredentialIssuerConfig", func() {
it("returns an error and updates the CredentialIssuerConfig status", func() {
startInformersAndController()
err := controllerlib.TestSync(t, subject, *syncContext)
expectedCredentialIssuerConfig := &configv1alpha1.CredentialIssuerConfig{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
Name: credentialIssuerConfigResourceName,
Namespace: credentialIssuerConfigNamespaceName,
},
Status: configv1alpha1.CredentialIssuerConfigStatus{
Strategies: []configv1alpha1.CredentialIssuerConfigStrategy{
{
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
Status: configv1alpha1.ErrorStrategyStatus,
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
Message: "cannot create agent pod: some create error",
LastUpdateTime: metav1.NewTime(frozenNow),
},
},
},
}
expectedGetAction := coretesting.NewGetAction(
credentialIssuerConfigGVR,
credentialIssuerConfigNamespaceName,
credentialIssuerConfigResourceName,
)
expectedCreateAction := coretesting.NewCreateAction(
credentialIssuerConfigGVR,
credentialIssuerConfigNamespaceName,
expectedCredentialIssuerConfig,
)
r.EqualError(err, "cannot create agent pod: some create error")
r.Equal(
[]coretesting.Action{
expectedGetAction,
expectedCreateAction,
},
pinnipedAPIClient.Actions(),
)
})
})
})
})
})
when("there is no controller manager pod", func() {
when("there is already a CredentialIssuerConfig", func() {
var initialCredentialIssuerConfig *configv1alpha1.CredentialIssuerConfig
it.Before(func() {
initialCredentialIssuerConfig = &configv1alpha1.CredentialIssuerConfig{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
Name: credentialIssuerConfigResourceName,
Namespace: credentialIssuerConfigNamespaceName,
},
Status: configv1alpha1.CredentialIssuerConfigStatus{
Strategies: []configv1alpha1.CredentialIssuerConfigStrategy{},
KubeConfigInfo: &configv1alpha1.CredentialIssuerConfigKubeConfigInfo{
Server: "some-server",
CertificateAuthorityData: "some-ca-value",
},
},
}
r.NoError(pinnipedAPIClient.Tracker().Add(initialCredentialIssuerConfig))
})
it("updates the CredentialIssuerConfig status saying that controller manager pods couldn't be found", func() {
startInformersAndController()
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
expectedCredentialIssuerConfig := initialCredentialIssuerConfig.DeepCopy()
expectedCredentialIssuerConfig.Status.Strategies = []configv1alpha1.CredentialIssuerConfigStrategy{
{
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
Status: configv1alpha1.ErrorStrategyStatus,
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
Message: "Controller manager pod(s) could not be found",
LastUpdateTime: metav1.NewTime(frozenNow),
},
}
expectedGetAction := coretesting.NewGetAction(
credentialIssuerConfigGVR,
credentialIssuerConfigNamespaceName,
credentialIssuerConfigResourceName,
)
expectedUpdateAction := coretesting.NewUpdateAction(
credentialIssuerConfigGVR,
credentialIssuerConfigNamespaceName,
expectedCredentialIssuerConfig,
)
r.Equal(
[]coretesting.Action{
expectedGetAction,
expectedUpdateAction,
},
pinnipedAPIClient.Actions(),
)
})
when("when updating the CredentialIssuerConfig fails", func() {
it.Before(func() {
pinnipedAPIClient.PrependReactor(
"update",
"credentialissuerconfigs",
func(_ coretesting.Action) (bool, runtime.Object, error) {
return true, nil, errors.New("some update error")
},
)
})
it("returns an error", func() {
startInformersAndController()
err := controllerlib.TestSync(t, subject, *syncContext)
r.EqualError(err, "could not create or update credentialissuerconfig: some update error")
})
})
when("when getting the CredentialIssuerConfig fails", func() {
it.Before(func() {
pinnipedAPIClient.PrependReactor(
"get",
"credentialissuerconfigs",
func(_ coretesting.Action) (bool, runtime.Object, error) {
return true, nil, errors.New("some get error")
},
)
})
it("returns an error", func() {
startInformersAndController()
err := controllerlib.TestSync(t, subject, *syncContext)
r.EqualError(err, "could not create or update credentialissuerconfig: get failed: some get error")
})
})
})
when("there is not already a CredentialIssuerConfig", func() {
it("creates the CredentialIssuerConfig status saying that controller manager pods couldn't be found", func() {
startInformersAndController()
err := controllerlib.TestSync(t, subject, *syncContext)
expectedCredentialIssuerConfig := &configv1alpha1.CredentialIssuerConfig{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
Name: credentialIssuerConfigResourceName,
Namespace: credentialIssuerConfigNamespaceName,
},
Status: configv1alpha1.CredentialIssuerConfigStatus{
Strategies: []configv1alpha1.CredentialIssuerConfigStrategy{
{
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
Status: configv1alpha1.ErrorStrategyStatus,
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
Message: "Controller manager pod(s) could not be found",
LastUpdateTime: metav1.NewTime(frozenNow),
},
},
},
}
expectedGetAction := coretesting.NewGetAction(
credentialIssuerConfigGVR,
credentialIssuerConfigNamespaceName,
credentialIssuerConfigResourceName,
)
expectedCreateAction := coretesting.NewCreateAction(
credentialIssuerConfigGVR,
credentialIssuerConfigNamespaceName,
expectedCredentialIssuerConfig,
)
r.NoError(err)
r.Equal(
[]coretesting.Action{
expectedGetAction,
expectedCreateAction,
},
pinnipedAPIClient.Actions(),
)
})
when("when creating the CredentialIssuerConfig fails", func() {
it.Before(func() {
pinnipedAPIClient.PrependReactor(
"create",
"credentialissuerconfigs",
func(_ coretesting.Action) (bool, runtime.Object, error) {
return true, nil, errors.New("some create error")
},
)
})
it("returns an error", func() {
startInformersAndController()
err := controllerlib.TestSync(t, subject, *syncContext)
r.EqualError(err, "could not create or update credentialissuerconfig: create failed: some create error")
})
})
when("when getting the CredentialIssuerConfig fails", func() {
it.Before(func() {
pinnipedAPIClient.PrependReactor(
"get",
"credentialissuerconfigs",
func(_ coretesting.Action) (bool, runtime.Object, error) {
return true, nil, errors.New("some get error")
},
)
})
it("returns an error", func() {
startInformersAndController()
err := controllerlib.TestSync(t, subject, *syncContext)
r.EqualError(err, "could not create or update credentialissuerconfig: get failed: some get error")
})
})
})
})
}, spec.Parallel(), spec.Report(report.Terminal{}))
}

View File

@ -0,0 +1,90 @@
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package kubecertagent
import (
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
corev1informers "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
pinnipedcontroller "go.pinniped.dev/internal/controller"
"go.pinniped.dev/internal/controllerlib"
)
type deleterController struct {
agentPodConfig *AgentPodConfig
k8sClient kubernetes.Interface
kubeSystemPodInformer corev1informers.PodInformer
agentPodInformer corev1informers.PodInformer
}
// NewDeleterController returns a controller that deletes any kube-cert-agent pods that are out of
// sync with the known kube-controller-manager pods.
//
// This controller only uses the Template field of the provided agentInfo.
func NewDeleterController(
agentPodConfig *AgentPodConfig,
k8sClient kubernetes.Interface,
kubeSystemPodInformer corev1informers.PodInformer,
agentPodInformer corev1informers.PodInformer,
withInformer pinnipedcontroller.WithInformerOptionFunc,
) controllerlib.Controller {
return controllerlib.New(
controllerlib.Config{
Name: "kube-cert-agent-deleter-controller",
Syncer: &deleterController{
agentPodConfig: agentPodConfig,
k8sClient: k8sClient,
kubeSystemPodInformer: kubeSystemPodInformer,
agentPodInformer: agentPodInformer,
},
},
withInformer(
kubeSystemPodInformer,
pinnipedcontroller.SimpleFilter(isControllerManagerPod),
controllerlib.InformerOption{},
),
withInformer(
agentPodInformer,
pinnipedcontroller.SimpleFilter(isAgentPod),
controllerlib.InformerOption{},
),
)
}
// Sync implements controllerlib.Syncer.
func (c *deleterController) Sync(ctx controllerlib.Context) error {
agentSelector := labels.SelectorFromSet(c.agentPodConfig.Labels())
agentPods, err := c.agentPodInformer.
Lister().
Pods(c.agentPodConfig.Namespace).
List(agentSelector)
if err != nil {
return fmt.Errorf("informer cannot list agent pods: %w", err)
}
for _, agentPod := range agentPods {
controllerManagerPod, err := findControllerManagerPodForSpecificAgentPod(agentPod, c.kubeSystemPodInformer)
if err != nil {
return err
}
if controllerManagerPod == nil ||
!isAgentPodUpToDate(agentPod, newAgentPod(controllerManagerPod, c.agentPodConfig.PodTemplate())) {
klog.InfoS("deleting agent pod", "pod", klog.KObj(agentPod))
err := c.k8sClient.
CoreV1().
Pods(agentPod.Namespace).
Delete(ctx.Context, agentPod.Name, metav1.DeleteOptions{})
if err != nil {
return fmt.Errorf("cannot delete agent pod: %w", err)
}
}
}
return nil
}

View File

@ -0,0 +1,510 @@
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package kubecertagent
import (
"context"
"testing"
"time"
"github.com/sclevine/spec"
"github.com/sclevine/spec/report"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
kubeinformers "k8s.io/client-go/informers"
corev1informers "k8s.io/client-go/informers/core/v1"
kubernetesfake "k8s.io/client-go/kubernetes/fake"
coretesting "k8s.io/client-go/testing"
"go.pinniped.dev/internal/controllerlib"
"go.pinniped.dev/internal/testutil"
)
func TestDeleterControllerFilter(t *testing.T) {
defineSharedKubecertagentFilterSpecs(
t,
"DeleterControllerFilter",
func(
agentPodConfig *AgentPodConfig,
_ *CredentialIssuerConfigLocationConfig,
kubeSystemPodInformer corev1informers.PodInformer,
agentPodInformer corev1informers.PodInformer,
observableWithInformerOption *testutil.ObservableWithInformerOption,
) {
_ = NewDeleterController(
agentPodConfig,
nil, // k8sClient, shouldn't matter
kubeSystemPodInformer,
agentPodInformer,
observableWithInformerOption.WithInformer,
)
},
)
}
func TestDeleterControllerSync(t *testing.T) {
spec.Run(t, "DeleterControllerSync", func(t *testing.T, when spec.G, it spec.S) {
const kubeSystemNamespace = "kube-system"
const agentPodNamespace = "agent-pod-namespace"
var r *require.Assertions
var subject controllerlib.Controller
var kubeAPIClient *kubernetesfake.Clientset
var kubeSystemInformerClient *kubernetesfake.Clientset
var kubeSystemInformers kubeinformers.SharedInformerFactory
var agentInformerClient *kubernetesfake.Clientset
var agentInformers kubeinformers.SharedInformerFactory
var timeoutContext context.Context
var timeoutContextCancel context.CancelFunc
var syncContext *controllerlib.Context
var controllerManagerPod, agentPod *corev1.Pod
var podsGVR schema.GroupVersionResource
// Defer starting the informers until the last possible moment so that the
// nested Before's can keep adding things to the informer caches.
var startInformersAndController = func() {
// Set this at the last second to allow for injection of server override.
subject = NewDeleterController(
&AgentPodConfig{
Namespace: agentPodNamespace,
ContainerImage: "some-agent-image",
PodNamePrefix: "some-agent-name-",
},
kubeAPIClient,
kubeSystemInformers.Core().V1().Pods(),
agentInformers.Core().V1().Pods(),
controllerlib.WithInformer,
)
// Set this at the last second to support calling subject.Name().
syncContext = &controllerlib.Context{
Context: timeoutContext,
Name: subject.Name(),
Key: controllerlib.Key{
Namespace: kubeSystemNamespace,
Name: "should-not-matter",
},
}
// Must start informers before calling TestRunSynchronously()
kubeSystemInformers.Start(timeoutContext.Done())
agentInformers.Start(timeoutContext.Done())
controllerlib.TestRunSynchronously(t, subject)
}
it.Before(func() {
r = require.New(t)
timeoutContext, timeoutContextCancel = context.WithTimeout(context.Background(), time.Second*3)
kubeAPIClient = kubernetesfake.NewSimpleClientset()
kubeSystemInformerClient = kubernetesfake.NewSimpleClientset()
kubeSystemInformers = kubeinformers.NewSharedInformerFactory(kubeSystemInformerClient, 0)
agentInformerClient = kubernetesfake.NewSimpleClientset()
agentInformers = kubeinformers.NewSharedInformerFactory(agentInformerClient, 0)
controllerManagerPod, agentPod = exampleControllerManagerAndAgentPods(
kubeSystemNamespace, agentPodNamespace, "ignored for this test", "ignored for this test",
)
podsGVR = schema.GroupVersionResource{
Group: corev1.SchemeGroupVersion.Group,
Version: corev1.SchemeGroupVersion.Version,
Resource: "pods",
}
// Add an pod into the test that doesn't matter to make sure we don't accidentally
// trigger any logic on this thing.
ignorablePod := corev1.Pod{}
ignorablePod.Name = "some-ignorable-pod"
r.NoError(kubeSystemInformerClient.Tracker().Add(&ignorablePod))
r.NoError(agentInformerClient.Tracker().Add(&ignorablePod))
r.NoError(kubeAPIClient.Tracker().Add(&ignorablePod))
})
it.After(func() {
timeoutContextCancel()
})
when("there is an agent pod", func() {
it.Before(func() {
r.NoError(agentInformerClient.Tracker().Add(agentPod))
r.NoError(kubeAPIClient.Tracker().Add(agentPod))
})
when("there is a matching controller manager pod", func() {
it.Before(func() {
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
})
it("does nothing", func() {
startInformersAndController()
err := controllerlib.TestSync(t, subject, *syncContext)
r.NoError(err)
r.Equal(
[]coretesting.Action{},
kubeAPIClient.Actions(),
)
})
when("the agent pod is out of sync with the controller manager via volume mounts", func() {
it.Before(func() {
controllerManagerPod.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{
{
Name: "some-other-volume-mount",
},
}
r.NoError(kubeSystemInformerClient.Tracker().Update(podsGVR, controllerManagerPod, controllerManagerPod.Namespace))
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, controllerManagerPod, controllerManagerPod.Namespace))
})
it("deletes the agent pod", func() {
startInformersAndController()
err := controllerlib.TestSync(t, subject, *syncContext)
r.NoError(err)
r.Equal(
[]coretesting.Action{
coretesting.NewDeleteAction(
podsGVR,
agentPodNamespace,
agentPod.Name,
),
},
kubeAPIClient.Actions(),
)
})
})
when("the agent pod is out of sync with the controller manager via volumes", func() {
it.Before(func() {
controllerManagerPod.Spec.Volumes = []corev1.Volume{
{
Name: "some-other-volume",
},
}
r.NoError(kubeSystemInformerClient.Tracker().Update(podsGVR, controllerManagerPod, controllerManagerPod.Namespace))
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, controllerManagerPod, controllerManagerPod.Namespace))
})
it("deletes the agent pod", func() {
startInformersAndController()
err := controllerlib.TestSync(t, subject, *syncContext)
r.NoError(err)
r.Equal(
[]coretesting.Action{
coretesting.NewDeleteAction(
podsGVR,
agentPodNamespace,
agentPod.Name,
),
},
kubeAPIClient.Actions(),
)
})
})
when("the agent pod is out of sync with the controller manager via node selector", func() {
it.Before(func() {
controllerManagerPod.Spec.NodeSelector = map[string]string{
"some-other-node-selector-key": "some-other-node-selector-value",
}
r.NoError(kubeSystemInformerClient.Tracker().Update(podsGVR, controllerManagerPod, controllerManagerPod.Namespace))
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, controllerManagerPod, controllerManagerPod.Namespace))
})
it("deletes the agent pod", func() {
startInformersAndController()
err := controllerlib.TestSync(t, subject, *syncContext)
r.NoError(err)
r.Equal(
[]coretesting.Action{
coretesting.NewDeleteAction(
podsGVR,
agentPodNamespace,
agentPod.Name,
),
},
kubeAPIClient.Actions(),
)
})
})
when("the agent pod is out of sync with the controller manager via node name", func() {
it.Before(func() {
controllerManagerPod.Spec.NodeName = "some-other-node-name"
r.NoError(kubeSystemInformerClient.Tracker().Update(podsGVR, controllerManagerPod, controllerManagerPod.Namespace))
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, controllerManagerPod, controllerManagerPod.Namespace))
})
it("deletes the agent pod", func() {
startInformersAndController()
err := controllerlib.TestSync(t, subject, *syncContext)
r.NoError(err)
r.Equal(
[]coretesting.Action{
coretesting.NewDeleteAction(
podsGVR,
agentPodNamespace,
agentPod.Name,
),
},
kubeAPIClient.Actions(),
)
})
})
when("the agent pod is out of sync with the controller manager via tolerations", func() {
it.Before(func() {
controllerManagerPod.Spec.Tolerations = []corev1.Toleration{
{
Key: "some-other-toleration-key",
},
}
r.NoError(kubeSystemInformerClient.Tracker().Update(podsGVR, controllerManagerPod, controllerManagerPod.Namespace))
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, controllerManagerPod, controllerManagerPod.Namespace))
})
it("deletes the agent pod", func() {
startInformersAndController()
err := controllerlib.TestSync(t, subject, *syncContext)
r.NoError(err)
r.Equal(
[]coretesting.Action{
coretesting.NewDeleteAction(
podsGVR,
agentPodNamespace,
agentPod.Name,
),
},
kubeAPIClient.Actions(),
)
})
})
when("the agent pod is out of sync via restart policy", func() {
it.Before(func() {
updatedAgentPod := agentPod.DeepCopy()
updatedAgentPod.Spec.RestartPolicy = corev1.RestartPolicyAlways
r.NoError(agentInformerClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
})
it("deletes the agent pod", func() {
startInformersAndController()
err := controllerlib.TestSync(t, subject, *syncContext)
r.NoError(err)
r.Equal(
[]coretesting.Action{
coretesting.NewDeleteAction(
podsGVR,
agentPodNamespace,
agentPod.Name,
),
},
kubeAPIClient.Actions(),
)
})
})
when("the agent pod is out of sync via automount service account token", func() {
it.Before(func() {
updatedAgentPod := agentPod.DeepCopy()
updatedAgentPod.Spec.AutomountServiceAccountToken = boolPtr(true)
r.NoError(agentInformerClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
})
it("deletes the agent pod", func() {
startInformersAndController()
err := controllerlib.TestSync(t, subject, *syncContext)
r.NoError(err)
r.Equal(
[]coretesting.Action{
coretesting.NewDeleteAction(
podsGVR,
agentPodNamespace,
agentPod.Name,
),
},
kubeAPIClient.Actions(),
)
})
})
when("the agent pod is out of sync with the template via name", func() {
it.Before(func() {
updatedAgentPod := agentPod.DeepCopy()
updatedAgentPod.Spec.Containers[0].Name = "some-new-name"
r.NoError(agentInformerClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
})
it("deletes the agent pod", func() {
startInformersAndController()
err := controllerlib.TestSync(t, subject, *syncContext)
r.NoError(err)
r.Equal(
[]coretesting.Action{
coretesting.NewDeleteAction(
podsGVR,
agentPodNamespace,
agentPod.Name,
),
},
kubeAPIClient.Actions(),
)
})
})
when("the agent pod is out of sync with the template via image", func() {
it.Before(func() {
updatedAgentPod := agentPod.DeepCopy()
updatedAgentPod.Spec.Containers[0].Image = "new-image"
r.NoError(agentInformerClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
})
it("deletes the agent pod", func() {
startInformersAndController()
err := controllerlib.TestSync(t, subject, *syncContext)
r.NoError(err)
r.Equal(
[]coretesting.Action{
coretesting.NewDeleteAction(
podsGVR,
agentPodNamespace,
agentPod.Name,
),
},
kubeAPIClient.Actions(),
)
})
})
when("the agent pod is out of sync with the template via command", func() {
it.Before(func() {
updatedAgentPod := agentPod.DeepCopy()
updatedAgentPod.Spec.Containers[0].Command = []string{"some", "new", "command"}
r.NoError(agentInformerClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
})
it("deletes the agent pod", func() {
startInformersAndController()
err := controllerlib.TestSync(t, subject, *syncContext)
r.NoError(err)
r.Equal(
[]coretesting.Action{
coretesting.NewDeleteAction(
podsGVR,
agentPodNamespace,
agentPod.Name,
),
},
kubeAPIClient.Actions(),
)
})
})
})
when("there is a non-matching controller manager pod via uid", func() {
it.Before(func() {
controllerManagerPod.UID = "some-other-controller-manager-uid"
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
})
it("deletes the agent pod", func() {
startInformersAndController()
err := controllerlib.TestSync(t, subject, *syncContext)
r.NoError(err)
r.Equal(
[]coretesting.Action{
coretesting.NewDeleteAction(
podsGVR,
agentPodNamespace,
agentPod.Name,
),
},
kubeAPIClient.Actions(),
)
})
})
when("there is a non-matching controller manager pod via name", func() {
it.Before(func() {
controllerManagerPod.Name = "some-other-controller-manager-name"
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
})
it("deletes the agent pod", func() {
startInformersAndController()
err := controllerlib.TestSync(t, subject, *syncContext)
r.NoError(err)
r.Equal(
[]coretesting.Action{
coretesting.NewDeleteAction(
podsGVR,
agentPodNamespace,
agentPod.Name,
),
},
kubeAPIClient.Actions(),
)
})
})
when("there is no matching controller manager pod", func() {
it("deletes the agent pod", func() {
startInformersAndController()
err := controllerlib.TestSync(t, subject, *syncContext)
r.NoError(err)
r.Equal(
[]coretesting.Action{
coretesting.NewDeleteAction(
podsGVR,
agentPodNamespace,
agentPod.Name,
),
},
kubeAPIClient.Actions(),
)
})
})
})
when("there is no agent pod", func() {
it("does nothing", func() {
startInformersAndController()
err := controllerlib.TestSync(t, subject, *syncContext)
r.NoError(err)
r.Equal(
[]coretesting.Action{},
kubeAPIClient.Actions(),
)
})
})
}, spec.Parallel(), spec.Report(report.Terminal{}))
}

View File

@ -0,0 +1,137 @@
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package kubecertagent
import (
"fmt"
v1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/clock"
corev1informers "k8s.io/client-go/informers/core/v1"
"k8s.io/klog/v2"
pinnipedclientset "go.pinniped.dev/generated/1.19/client/clientset/versioned"
pinnipedcontroller "go.pinniped.dev/internal/controller"
"go.pinniped.dev/internal/controllerlib"
"go.pinniped.dev/internal/dynamiccert"
)
type execerController struct {
credentialIssuerConfigLocationConfig *CredentialIssuerConfigLocationConfig
dynamicCertProvider dynamiccert.Provider
podCommandExecutor PodCommandExecutor
clock clock.Clock
pinnipedAPIClient pinnipedclientset.Interface
agentPodInformer corev1informers.PodInformer
}
// NewExecerController returns a controllerlib.Controller that listens for agent pods with proper
// cert/key path annotations and execs into them to get the cert/key material. It sets the retrieved
// key material in a provided dynamicCertProvider.
func NewExecerController(
credentialIssuerConfigLocationConfig *CredentialIssuerConfigLocationConfig,
dynamicCertProvider dynamiccert.Provider,
podCommandExecutor PodCommandExecutor,
pinnipedAPIClient pinnipedclientset.Interface,
clock clock.Clock,
agentPodInformer corev1informers.PodInformer,
withInformer pinnipedcontroller.WithInformerOptionFunc,
) controllerlib.Controller {
return controllerlib.New(
controllerlib.Config{
Name: "kube-cert-agent-execer-controller",
Syncer: &execerController{
credentialIssuerConfigLocationConfig: credentialIssuerConfigLocationConfig,
dynamicCertProvider: dynamicCertProvider,
podCommandExecutor: podCommandExecutor,
pinnipedAPIClient: pinnipedAPIClient,
clock: clock,
agentPodInformer: agentPodInformer,
},
},
withInformer(
agentPodInformer,
pinnipedcontroller.SimpleFilter(isAgentPod),
controllerlib.InformerOption{},
),
)
}
func (c *execerController) Sync(ctx controllerlib.Context) error {
maybeAgentPod, err := c.agentPodInformer.Lister().Pods(ctx.Key.Namespace).Get(ctx.Key.Name)
notFound := k8serrors.IsNotFound(err)
if err != nil && !notFound {
return fmt.Errorf("failed to get %s/%s pod: %w", ctx.Key.Namespace, ctx.Key.Name, err)
}
if notFound {
// The pod in question does not exist, so it was probably deleted
return nil
}
certPath, keyPath := c.getKeypairFilePaths(maybeAgentPod)
if certPath == "" || keyPath == "" {
// The annotator controller has not annotated this agent pod yet, or it is not an agent pod at all
return nil
}
agentPod := maybeAgentPod
if agentPod.Status.Phase != v1.PodRunning {
// Seems to be an agent pod, but it is not ready yet
return nil
}
certPEM, err := c.podCommandExecutor.Exec(agentPod.Namespace, agentPod.Name, "cat", certPath)
if err != nil {
strategyResultUpdateErr := createOrUpdateCredentialIssuerConfig(
ctx.Context,
*c.credentialIssuerConfigLocationConfig,
c.clock,
c.pinnipedAPIClient,
err,
)
klog.ErrorS(strategyResultUpdateErr, "could not create or update CredentialIssuerConfig with strategy success")
return err
}
keyPEM, err := c.podCommandExecutor.Exec(agentPod.Namespace, agentPod.Name, "cat", keyPath)
if err != nil {
strategyResultUpdateErr := createOrUpdateCredentialIssuerConfig(
ctx.Context,
*c.credentialIssuerConfigLocationConfig,
c.clock,
c.pinnipedAPIClient,
err,
)
klog.ErrorS(strategyResultUpdateErr, "could not create or update CredentialIssuerConfig with strategy success")
return err
}
c.dynamicCertProvider.Set([]byte(certPEM), []byte(keyPEM))
err = createOrUpdateCredentialIssuerConfig(
ctx.Context,
*c.credentialIssuerConfigLocationConfig,
c.clock,
c.pinnipedAPIClient,
nil, // nil error = success! yay!
)
if err != nil {
return err
}
return nil
}
func (c *execerController) getKeypairFilePaths(pod *v1.Pod) (string, string) {
annotations := pod.Annotations
if annotations == nil {
annotations = make(map[string]string)
}
certPath := annotations[agentPodCertPathAnnotationKey]
keyPath := annotations[agentPodKeyPathAnnotationKey]
return certPath, keyPath
}

View File

@ -0,0 +1,506 @@
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package kubecertagent
import (
"context"
"errors"
"fmt"
"io/ioutil"
"testing"
"time"
"github.com/sclevine/spec"
"github.com/sclevine/spec/report"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/clock"
kubeinformers "k8s.io/client-go/informers"
kubernetesfake "k8s.io/client-go/kubernetes/fake"
coretesting "k8s.io/client-go/testing"
configv1alpha1 "go.pinniped.dev/generated/1.19/apis/config/v1alpha1"
pinnipedfake "go.pinniped.dev/generated/1.19/client/clientset/versioned/fake"
"go.pinniped.dev/internal/controllerlib"
"go.pinniped.dev/internal/dynamiccert"
"go.pinniped.dev/internal/testutil"
)
func TestExecerControllerOptions(t *testing.T) {
spec.Run(t, "options", func(t *testing.T, when spec.G, it spec.S) {
var r *require.Assertions
var observableWithInformerOption *testutil.ObservableWithInformerOption
var agentPodInformerFilter controllerlib.Filter
whateverPod := &corev1.Pod{}
it.Before(func() {
r = require.New(t)
observableWithInformerOption = testutil.NewObservableWithInformerOption()
agentPodsInformer := kubeinformers.NewSharedInformerFactory(nil, 0).Core().V1().Pods()
_ = NewExecerController(
&CredentialIssuerConfigLocationConfig{
Namespace: "ignored by this test",
Name: "ignored by this test",
},
nil, // dynamicCertProvider, not needed for this test
nil, // podCommandExecutor, not needed for this test
nil, // pinnipedAPIClient, not needed for this test
nil, // clock, not needed for this test
agentPodsInformer,
observableWithInformerOption.WithInformer,
)
agentPodInformerFilter = observableWithInformerOption.GetFilterForInformer(agentPodsInformer)
})
when("the change is happening in the agent's namespace", func() {
when("a pod with all agent labels is added/updated/deleted", func() {
it("returns true", func() {
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"kube-cert-agent.pinniped.dev": "true",
},
},
}
r.True(agentPodInformerFilter.Add(pod))
r.True(agentPodInformerFilter.Update(whateverPod, pod))
r.True(agentPodInformerFilter.Update(pod, whateverPod))
r.True(agentPodInformerFilter.Delete(pod))
})
})
when("a pod missing the agent label is added/updated/deleted", func() {
it("returns false", func() {
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"some-other-label-key": "some-other-label-value",
},
},
}
r.False(agentPodInformerFilter.Add(pod))
r.False(agentPodInformerFilter.Update(whateverPod, pod))
r.False(agentPodInformerFilter.Update(pod, whateverPod))
r.False(agentPodInformerFilter.Delete(pod))
})
})
})
}, spec.Parallel(), spec.Report(report.Terminal{}))
}
type fakePodExecutor struct {
r *require.Assertions
resultsToReturn []string
errorsToReturn []error
calledWithPodName []string
calledWithPodNamespace []string
calledWithCommandAndArgs [][]string
callCount int
}
func (s *fakePodExecutor) Exec(podNamespace string, podName string, commandAndArgs ...string) (string, error) {
s.calledWithPodNamespace = append(s.calledWithPodNamespace, podNamespace)
s.calledWithPodName = append(s.calledWithPodName, podName)
s.calledWithCommandAndArgs = append(s.calledWithCommandAndArgs, commandAndArgs)
s.r.Less(s.callCount, len(s.resultsToReturn), "unexpected extra invocation of fakePodExecutor")
result := s.resultsToReturn[s.callCount]
var err error = nil
if s.errorsToReturn != nil {
s.r.Less(s.callCount, len(s.errorsToReturn), "unexpected extra invocation of fakePodExecutor")
err = s.errorsToReturn[s.callCount]
}
s.callCount++
if err != nil {
return "", err
}
return result, nil
}
func TestManagerControllerSync(t *testing.T) {
spec.Run(t, "Sync", func(t *testing.T, when spec.G, it spec.S) {
const agentPodNamespace = "some-namespace"
const agentPodName = "some-agent-pod-name-123"
const certPathAnnotationName = "kube-cert-agent.pinniped.dev/cert-path"
const keyPathAnnotationName = "kube-cert-agent.pinniped.dev/key-path"
const fakeCertPath = "/some/cert/path"
const fakeKeyPath = "/some/key/path"
const defaultDynamicCertProviderCert = "initial-cert"
const defaultDynamicCertProviderKey = "initial-key"
const credentialIssuerConfigNamespaceName = "cic-namespace-name"
const credentialIssuerConfigResourceName = "cic-resource-name"
var r *require.Assertions
var subject controllerlib.Controller
var timeoutContext context.Context
var timeoutContextCancel context.CancelFunc
var syncContext *controllerlib.Context
var pinnipedAPIClient *pinnipedfake.Clientset
var agentPodInformer kubeinformers.SharedInformerFactory
var agentPodInformerClient *kubernetesfake.Clientset
var fakeExecutor *fakePodExecutor
var dynamicCertProvider dynamiccert.Provider
var fakeCertPEM, fakeKeyPEM string
var credentialIssuerConfigGVR schema.GroupVersionResource
var frozenNow time.Time
// Defer starting the informers until the last possible moment so that the
// nested Before's can keep adding things to the informer caches.
var startInformersAndController = func() {
// Set this at the last second to allow for injection of server override.
subject = NewExecerController(
&CredentialIssuerConfigLocationConfig{
Namespace: credentialIssuerConfigNamespaceName,
Name: credentialIssuerConfigResourceName,
},
dynamicCertProvider,
fakeExecutor,
pinnipedAPIClient,
clock.NewFakeClock(frozenNow),
agentPodInformer.Core().V1().Pods(),
controllerlib.WithInformer,
)
// Set this at the last second to support calling subject.Name().
syncContext = &controllerlib.Context{
Context: timeoutContext,
Name: subject.Name(),
Key: controllerlib.Key{
Namespace: agentPodNamespace,
Name: agentPodName,
},
}
// Must start informers before calling TestRunSynchronously()
agentPodInformer.Start(timeoutContext.Done())
controllerlib.TestRunSynchronously(t, subject)
}
var newAgentPod = func(agentPodName string, hasCertPathAnnotations bool) *corev1.Pod {
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: agentPodName,
Namespace: agentPodNamespace,
Labels: map[string]string{
"some-label-key": "some-label-value",
},
},
}
if hasCertPathAnnotations {
pod.Annotations = map[string]string{
certPathAnnotationName: fakeCertPath,
keyPathAnnotationName: fakeKeyPath,
}
}
return pod
}
var requireDynamicCertProviderHasDefaultValues = func() {
actualCertPEM, actualKeyPEM := dynamicCertProvider.CurrentCertKeyContent()
r.Equal(defaultDynamicCertProviderCert, string(actualCertPEM))
r.Equal(defaultDynamicCertProviderKey, string(actualKeyPEM))
}
var requireNoExternalActionsTaken = func() {
r.Empty(pinnipedAPIClient.Actions())
r.Zero(fakeExecutor.callCount)
requireDynamicCertProviderHasDefaultValues()
}
it.Before(func() {
r = require.New(t)
timeoutContext, timeoutContextCancel = context.WithTimeout(context.Background(), time.Second*3)
pinnipedAPIClient = pinnipedfake.NewSimpleClientset()
agentPodInformerClient = kubernetesfake.NewSimpleClientset()
agentPodInformer = kubeinformers.NewSharedInformerFactory(agentPodInformerClient, 0)
fakeExecutor = &fakePodExecutor{r: r}
frozenNow = time.Date(2020, time.September, 23, 7, 42, 0, 0, time.Local)
dynamicCertProvider = dynamiccert.New()
dynamicCertProvider.Set([]byte(defaultDynamicCertProviderCert), []byte(defaultDynamicCertProviderKey))
loadFile := func(filename string) string {
bytes, err := ioutil.ReadFile(filename)
r.NoError(err)
return string(bytes)
}
fakeCertPEM = loadFile("./testdata/test.crt")
fakeKeyPEM = loadFile("./testdata/test.key")
credentialIssuerConfigGVR = schema.GroupVersionResource{
Group: configv1alpha1.GroupName,
Version: configv1alpha1.SchemeGroupVersion.Version,
Resource: "credentialissuerconfigs",
}
})
it.After(func() {
timeoutContextCancel()
})
when("there is not yet any agent pods or they were deleted", func() {
it.Before(func() {
unrelatedPod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "some other pod",
Namespace: agentPodNamespace,
},
}
r.NoError(agentPodInformerClient.Tracker().Add(unrelatedPod))
startInformersAndController()
})
it("does nothing", func() {
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
requireNoExternalActionsTaken()
})
})
when("there is an agent pod, as determined by its labels matching the agent pod template labels, which is not yet annotated by the annotater controller", func() {
it.Before(func() {
agentPod := newAgentPod(agentPodName, false)
r.NoError(agentPodInformerClient.Tracker().Add(agentPod))
startInformersAndController()
})
it("does nothing", func() {
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
requireNoExternalActionsTaken()
})
})
when("there is an agent pod, as determined by its labels matching the agent pod template labels, and it was annotated by the annotater controller, but it is not Running", func() {
it.Before(func() {
agentPod := newAgentPod(agentPodName, true)
agentPod.Status.Phase = corev1.PodPending // not Running
r.NoError(agentPodInformerClient.Tracker().Add(agentPod))
startInformersAndController()
})
it("does nothing", func() {
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
requireNoExternalActionsTaken()
})
})
when("there is an agent pod, as determined by its labels matching the agent pod template labels, which is already annotated by the annotater controller, and it is Running", func() {
it.Before(func() {
targetAgentPod := newAgentPod(agentPodName, true)
targetAgentPod.Status.Phase = corev1.PodRunning
anotherAgentPod := newAgentPod("some-other-agent-pod-which-is-not-the-context-of-this-sync", true)
r.NoError(agentPodInformerClient.Tracker().Add(targetAgentPod))
r.NoError(agentPodInformerClient.Tracker().Add(anotherAgentPod))
})
when("the resulting pod execs will succeed", func() {
it.Before(func() {
fakeExecutor.resultsToReturn = []string{fakeCertPEM, fakeKeyPEM}
})
it("execs to the agent pod to get the keys and updates the dynamic certificates provider with the new certs", func() {
startInformersAndController()
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
r.Equal(2, fakeExecutor.callCount)
r.Equal(agentPodNamespace, fakeExecutor.calledWithPodNamespace[0])
r.Equal(agentPodName, fakeExecutor.calledWithPodName[0])
r.Equal([]string{"cat", fakeCertPath}, fakeExecutor.calledWithCommandAndArgs[0])
r.Equal(agentPodNamespace, fakeExecutor.calledWithPodNamespace[1])
r.Equal(agentPodName, fakeExecutor.calledWithPodName[1])
r.Equal([]string{"cat", fakeKeyPath}, fakeExecutor.calledWithCommandAndArgs[1])
actualCertPEM, actualKeyPEM := dynamicCertProvider.CurrentCertKeyContent()
r.Equal(fakeCertPEM, string(actualCertPEM))
r.Equal(fakeKeyPEM, string(actualKeyPEM))
})
when("there is already a CredentialIssuerConfig", func() {
var initialCredentialIssuerConfig *configv1alpha1.CredentialIssuerConfig
it.Before(func() {
initialCredentialIssuerConfig = &configv1alpha1.CredentialIssuerConfig{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
Name: credentialIssuerConfigResourceName,
Namespace: credentialIssuerConfigNamespaceName,
},
Status: configv1alpha1.CredentialIssuerConfigStatus{
Strategies: []configv1alpha1.CredentialIssuerConfigStrategy{},
KubeConfigInfo: &configv1alpha1.CredentialIssuerConfigKubeConfigInfo{
Server: "some-server",
CertificateAuthorityData: "some-ca-value",
},
},
}
r.NoError(pinnipedAPIClient.Tracker().Add(initialCredentialIssuerConfig))
})
it("also updates the the existing CredentialIssuerConfig status field", func() {
startInformersAndController()
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
expectedCredentialIssuerConfig := initialCredentialIssuerConfig.DeepCopy()
expectedCredentialIssuerConfig.Status.Strategies = []configv1alpha1.CredentialIssuerConfigStrategy{
{
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
Status: configv1alpha1.SuccessStrategyStatus,
Reason: configv1alpha1.FetchedKeyStrategyReason,
Message: "Key was fetched successfully",
LastUpdateTime: metav1.NewTime(frozenNow),
},
}
expectedGetAction := coretesting.NewGetAction(credentialIssuerConfigGVR, credentialIssuerConfigNamespaceName, credentialIssuerConfigResourceName)
expectedCreateAction := coretesting.NewUpdateAction(credentialIssuerConfigGVR, credentialIssuerConfigNamespaceName, expectedCredentialIssuerConfig)
r.Equal([]coretesting.Action{expectedGetAction, expectedCreateAction}, pinnipedAPIClient.Actions())
})
when("updating the CredentialIssuerConfig fails", func() {
it.Before(func() {
pinnipedAPIClient.PrependReactor(
"update",
"credentialissuerconfigs",
func(_ coretesting.Action) (bool, runtime.Object, error) {
return true, nil, errors.New("some update error")
},
)
})
it("returns an error", func() {
startInformersAndController()
err := controllerlib.TestSync(t, subject, *syncContext)
r.EqualError(err, "could not create or update credentialissuerconfig: some update error")
})
})
})
when("there is not already a CredentialIssuerConfig", func() {
it.Before(func() {
startInformersAndController()
})
it("also creates the the CredentialIssuerConfig with the appropriate status field", func() {
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
expectedCredentialIssuerConfig := &configv1alpha1.CredentialIssuerConfig{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
Name: credentialIssuerConfigResourceName,
Namespace: credentialIssuerConfigNamespaceName,
},
Status: configv1alpha1.CredentialIssuerConfigStatus{
Strategies: []configv1alpha1.CredentialIssuerConfigStrategy{
{
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
Status: configv1alpha1.SuccessStrategyStatus,
Reason: configv1alpha1.FetchedKeyStrategyReason,
Message: "Key was fetched successfully",
LastUpdateTime: metav1.NewTime(frozenNow),
},
},
},
}
expectedGetAction := coretesting.NewGetAction(credentialIssuerConfigGVR, credentialIssuerConfigNamespaceName, credentialIssuerConfigResourceName)
expectedCreateAction := coretesting.NewCreateAction(credentialIssuerConfigGVR, credentialIssuerConfigNamespaceName, expectedCredentialIssuerConfig)
r.Equal([]coretesting.Action{expectedGetAction, expectedCreateAction}, pinnipedAPIClient.Actions())
})
})
})
when("the first resulting pod exec will fail", func() {
var podExecErrorMessage string
it.Before(func() {
podExecErrorMessage = "some pod exec error message"
fakeExecutor.errorsToReturn = []error{fmt.Errorf(podExecErrorMessage), nil}
fakeExecutor.resultsToReturn = []string{"", fakeKeyPEM}
startInformersAndController()
})
it("does not update the dynamic certificates provider", func() {
r.EqualError(controllerlib.TestSync(t, subject, *syncContext), podExecErrorMessage)
requireDynamicCertProviderHasDefaultValues()
})
it("creates or updates the the CredentialIssuerConfig status field with an error", func() {
r.EqualError(controllerlib.TestSync(t, subject, *syncContext), podExecErrorMessage)
expectedCredentialIssuerConfig := &configv1alpha1.CredentialIssuerConfig{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
Name: credentialIssuerConfigResourceName,
Namespace: credentialIssuerConfigNamespaceName,
},
Status: configv1alpha1.CredentialIssuerConfigStatus{
Strategies: []configv1alpha1.CredentialIssuerConfigStrategy{
{
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
Status: configv1alpha1.ErrorStrategyStatus,
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
Message: podExecErrorMessage,
LastUpdateTime: metav1.NewTime(frozenNow),
},
},
},
}
expectedGetAction := coretesting.NewGetAction(credentialIssuerConfigGVR, credentialIssuerConfigNamespaceName, credentialIssuerConfigResourceName)
expectedCreateAction := coretesting.NewCreateAction(credentialIssuerConfigGVR, credentialIssuerConfigNamespaceName, expectedCredentialIssuerConfig)
r.Equal([]coretesting.Action{expectedGetAction, expectedCreateAction}, pinnipedAPIClient.Actions())
})
})
when("the second resulting pod exec will fail", func() {
var podExecErrorMessage string
it.Before(func() {
podExecErrorMessage = "some pod exec error message"
fakeExecutor.errorsToReturn = []error{nil, fmt.Errorf(podExecErrorMessage)}
fakeExecutor.resultsToReturn = []string{fakeCertPEM, ""}
startInformersAndController()
})
it("does not update the dynamic certificates provider", func() {
r.EqualError(controllerlib.TestSync(t, subject, *syncContext), podExecErrorMessage)
requireDynamicCertProviderHasDefaultValues()
})
it("creates or updates the the CredentialIssuerConfig status field with an error", func() {
r.EqualError(controllerlib.TestSync(t, subject, *syncContext), podExecErrorMessage)
expectedCredentialIssuerConfig := &configv1alpha1.CredentialIssuerConfig{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
Name: credentialIssuerConfigResourceName,
Namespace: credentialIssuerConfigNamespaceName,
},
Status: configv1alpha1.CredentialIssuerConfigStatus{
Strategies: []configv1alpha1.CredentialIssuerConfigStrategy{
{
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
Status: configv1alpha1.ErrorStrategyStatus,
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
Message: podExecErrorMessage,
LastUpdateTime: metav1.NewTime(frozenNow),
},
},
},
}
expectedGetAction := coretesting.NewGetAction(credentialIssuerConfigGVR, credentialIssuerConfigNamespaceName, credentialIssuerConfigResourceName)
expectedCreateAction := coretesting.NewCreateAction(credentialIssuerConfigGVR, credentialIssuerConfigNamespaceName, expectedCredentialIssuerConfig)
r.Equal([]coretesting.Action{expectedGetAction, expectedCreateAction}, pinnipedAPIClient.Actions())
})
})
})
}, spec.Parallel(), spec.Report(report.Terminal{}))
}

View File

@ -0,0 +1,295 @@
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Package kubecertagent provides controllers that ensure a set of pods (the kube-cert-agent), is
// colocated with the Kubernetes controller manager so that Pinniped can access its signing keys.
//
// Note: the controllers use a filter that accepts all pods that look like the controller manager or
// an agent pod, across any add/update/delete event. Each of the controllers only care about a
// subset of these events in reality, but the liberal filter implementation serves as an MVP.
package kubecertagent
import (
"context"
"encoding/hex"
"fmt"
"hash/fnv"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/clock"
corev1informers "k8s.io/client-go/informers/core/v1"
"k8s.io/klog/v2"
configv1alpha1 "go.pinniped.dev/generated/1.19/apis/config/v1alpha1"
pinnipedclientset "go.pinniped.dev/generated/1.19/client/clientset/versioned"
"go.pinniped.dev/internal/controller/issuerconfig"
)
const (
// ControllerManagerNamespace is the assumed namespace of the kube-controller-manager pod(s).
ControllerManagerNamespace = "kube-system"
controllerManagerNameAnnotationKey = "kube-cert-agent.pinniped.dev/controller-manager-name"
controllerManagerUIDAnnotationKey = "kube-cert-agent.pinniped.dev/controller-manager-uid"
// agentPodLabelKey is used to identify which pods are created by the kube-cert-agent
// controllers.
agentPodLabelKey = "kube-cert-agent.pinniped.dev"
agentPodLabelValue = "true"
// agentPodCertPathAnnotationKey is the annotation that the kube-cert-agent pod will use
// to communicate the in-pod path to the kube API's certificate.
agentPodCertPathAnnotationKey = "kube-cert-agent.pinniped.dev/cert-path"
// agentPodKeyPathAnnotationKey is the annotation that the kube-cert-agent pod will use
// to communicate the in-pod path to the kube API's key.
agentPodKeyPathAnnotationKey = "kube-cert-agent.pinniped.dev/key-path"
)
type AgentPodConfig struct {
// The namespace in which agent pods will be created.
Namespace string
// The container image used for the agent pods.
ContainerImage string
// The name prefix for each of the agent pods.
PodNamePrefix string
}
type CredentialIssuerConfigLocationConfig struct {
// The namespace in which the CredentialIssuerConfig should be created/updated.
Namespace string
// The resource name for the CredentialIssuerConfig to be created/updated.
Name string
}
func (c *AgentPodConfig) Labels() map[string]string {
return map[string]string{
agentPodLabelKey: agentPodLabelValue,
}
}
func (c *AgentPodConfig) PodTemplate() *corev1.Pod {
terminateImmediately := int64(0)
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: c.PodNamePrefix,
Namespace: c.Namespace,
Labels: c.Labels(),
},
Spec: corev1.PodSpec{
TerminationGracePeriodSeconds: &terminateImmediately,
Containers: []corev1.Container{
{
Name: "sleeper",
Image: c.ContainerImage,
ImagePullPolicy: corev1.PullIfNotPresent,
Command: []string{"/bin/sleep", "infinity"},
Resources: corev1.ResourceRequirements{
Limits: corev1.ResourceList{
corev1.ResourceMemory: resource.MustParse("16Mi"),
corev1.ResourceCPU: resource.MustParse("10m"),
},
Requests: corev1.ResourceList{
corev1.ResourceMemory: resource.MustParse("16Mi"),
corev1.ResourceCPU: resource.MustParse("10m"),
},
},
},
},
},
}
return pod
}
func newAgentPod(
controllerManagerPod *corev1.Pod,
template *corev1.Pod,
) *corev1.Pod {
agentPod := template.DeepCopy()
agentPod.Name = fmt.Sprintf("%s%s", agentPod.Name, hash(controllerManagerPod))
// It would be nice to use the OwnerReferences field here, but the agent pod is most likely in a
// different namespace than the kube-controller-manager pod, and therefore that breaks the
// OwnerReferences contract (see metav1.OwnerReference doc).
if agentPod.Annotations == nil {
agentPod.Annotations = make(map[string]string)
}
agentPod.Annotations[controllerManagerNameAnnotationKey] = controllerManagerPod.Name
agentPod.Annotations[controllerManagerUIDAnnotationKey] = string(controllerManagerPod.UID)
agentPod.Spec.Containers[0].VolumeMounts = controllerManagerPod.Spec.Containers[0].VolumeMounts
agentPod.Spec.Volumes = controllerManagerPod.Spec.Volumes
agentPod.Spec.RestartPolicy = corev1.RestartPolicyNever
agentPod.Spec.NodeSelector = controllerManagerPod.Spec.NodeSelector
agentPod.Spec.AutomountServiceAccountToken = boolPtr(false)
agentPod.Spec.NodeName = controllerManagerPod.Spec.NodeName
agentPod.Spec.Tolerations = controllerManagerPod.Spec.Tolerations
return agentPod
}
func isAgentPodUpToDate(actualAgentPod, expectedAgentPod *corev1.Pod) bool {
return equality.Semantic.DeepEqual(
actualAgentPod.Spec.Containers[0].VolumeMounts,
expectedAgentPod.Spec.Containers[0].VolumeMounts,
) &&
equality.Semantic.DeepEqual(
actualAgentPod.Spec.Containers[0].Name,
expectedAgentPod.Spec.Containers[0].Name,
) &&
equality.Semantic.DeepEqual(
actualAgentPod.Spec.Containers[0].Image,
expectedAgentPod.Spec.Containers[0].Image,
) &&
equality.Semantic.DeepEqual(
actualAgentPod.Spec.Containers[0].Command,
expectedAgentPod.Spec.Containers[0].Command,
) &&
equality.Semantic.DeepEqual(
actualAgentPod.Spec.Volumes,
expectedAgentPod.Spec.Volumes,
) &&
equality.Semantic.DeepEqual(
actualAgentPod.Spec.RestartPolicy,
expectedAgentPod.Spec.RestartPolicy,
) &&
equality.Semantic.DeepEqual(
actualAgentPod.Spec.NodeSelector,
expectedAgentPod.Spec.NodeSelector,
) &&
equality.Semantic.DeepEqual(
actualAgentPod.Spec.AutomountServiceAccountToken,
expectedAgentPod.Spec.AutomountServiceAccountToken,
) &&
equality.Semantic.DeepEqual(
actualAgentPod.Spec.NodeName,
expectedAgentPod.Spec.NodeName,
) &&
equality.Semantic.DeepEqual(
actualAgentPod.Spec.Tolerations,
expectedAgentPod.Spec.Tolerations,
)
}
func isControllerManagerPod(obj metav1.Object) bool {
pod, ok := obj.(*corev1.Pod)
if !ok {
return false
}
if pod.Labels == nil {
return false
}
component, ok := pod.Labels["component"]
if !ok || component != "kube-controller-manager" {
return false
}
if pod.Status.Phase != corev1.PodRunning {
return false
}
return true
}
func isAgentPod(obj metav1.Object) bool {
value, foundLabel := obj.GetLabels()[agentPodLabelKey]
return foundLabel && value == agentPodLabelValue
}
func findControllerManagerPodForSpecificAgentPod(
agentPod *corev1.Pod,
kubeSystemPodInformer corev1informers.PodInformer,
) (*corev1.Pod, error) {
name, ok := agentPod.Annotations[controllerManagerNameAnnotationKey]
if !ok {
klog.InfoS("agent pod missing parent name annotation", "pod", agentPod.Name)
return nil, nil
}
uid, ok := agentPod.Annotations[controllerManagerUIDAnnotationKey]
if !ok {
klog.InfoS("agent pod missing parent uid annotation", "pod", agentPod.Name)
return nil, nil
}
maybeControllerManagerPod, err := kubeSystemPodInformer.
Lister().
Pods(ControllerManagerNamespace).
Get(name)
notFound := k8serrors.IsNotFound(err)
if err != nil && !notFound {
return nil, fmt.Errorf("cannot get controller pod: %w", err)
} else if notFound ||
maybeControllerManagerPod == nil ||
string(maybeControllerManagerPod.UID) != uid {
return nil, nil
}
return maybeControllerManagerPod, nil
}
func createOrUpdateCredentialIssuerConfig(
ctx context.Context,
cicConfig CredentialIssuerConfigLocationConfig,
clock clock.Clock,
pinnipedAPIClient pinnipedclientset.Interface,
err error,
) error {
return issuerconfig.CreateOrUpdateCredentialIssuerConfig(
ctx,
cicConfig.Namespace,
cicConfig.Name,
pinnipedAPIClient,
func(configToUpdate *configv1alpha1.CredentialIssuerConfig) {
var strategyResult configv1alpha1.CredentialIssuerConfigStrategy
if err == nil {
strategyResult = strategySuccess(clock)
} else {
strategyResult = strategyError(clock, err)
}
configToUpdate.Status.Strategies = []configv1alpha1.CredentialIssuerConfigStrategy{
strategyResult,
}
},
)
}
func strategySuccess(clock clock.Clock) configv1alpha1.CredentialIssuerConfigStrategy {
return configv1alpha1.CredentialIssuerConfigStrategy{
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
Status: configv1alpha1.SuccessStrategyStatus,
Reason: configv1alpha1.FetchedKeyStrategyReason,
Message: "Key was fetched successfully",
LastUpdateTime: metav1.NewTime(clock.Now()),
}
}
func strategyError(clock clock.Clock, err error) configv1alpha1.CredentialIssuerConfigStrategy {
return configv1alpha1.CredentialIssuerConfigStrategy{
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
Status: configv1alpha1.ErrorStrategyStatus,
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
Message: err.Error(),
LastUpdateTime: metav1.NewTime(clock.Now()),
}
}
func boolPtr(b bool) *bool { return &b }
func hash(controllerManagerPod *corev1.Pod) string {
// FNV should be faster than SHA, and we don't care about hash-reversibility here, and Kubernetes
// uses FNV for their pod templates, so should be good enough for us?
h := fnv.New32a()
_, _ = h.Write([]byte(controllerManagerPod.UID)) // Never returns an error, per godoc.
return hex.EncodeToString(h.Sum([]byte{}))
}

View File

@ -0,0 +1,240 @@
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package kubecertagent
import (
"testing"
"github.com/sclevine/spec"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
kubeinformers "k8s.io/client-go/informers"
corev1informers "k8s.io/client-go/informers/core/v1"
"go.pinniped.dev/internal/controllerlib"
"go.pinniped.dev/internal/testutil"
)
func exampleControllerManagerAndAgentPods(
kubeSystemNamespace,
agentPodNamespace,
certPath,
keyPath string,
) (*corev1.Pod, *corev1.Pod) {
controllerManagerPod := &corev1.Pod{
TypeMeta: metav1.TypeMeta{
APIVersion: corev1.SchemeGroupVersion.String(),
Kind: "Pod",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: kubeSystemNamespace,
Name: "some-controller-manager-name",
Labels: map[string]string{
"component": "kube-controller-manager",
},
UID: types.UID("some-controller-manager-uid"),
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Image: "some-controller-manager-image",
Command: []string{
"kube-controller-manager",
"--cluster-signing-cert-file=" + certPath,
"--cluster-signing-key-file=" + keyPath,
},
VolumeMounts: []corev1.VolumeMount{
{
Name: "some-volume-mount-name",
},
},
},
},
NodeName: "some-node-name",
NodeSelector: map[string]string{
"some-node-selector-key": "some-node-selector-value",
},
Tolerations: []corev1.Toleration{
{
Key: "some-toleration",
},
},
},
Status: corev1.PodStatus{
Phase: corev1.PodRunning,
},
}
zero := int64(0)
// fnv 32a hash of controller-manager uid
controllerManagerPodHash := "fbb0addd"
agentPod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "some-agent-name-" + controllerManagerPodHash,
Namespace: agentPodNamespace,
Labels: map[string]string{
"kube-cert-agent.pinniped.dev": "true",
},
Annotations: map[string]string{
"kube-cert-agent.pinniped.dev/controller-manager-name": controllerManagerPod.Name,
"kube-cert-agent.pinniped.dev/controller-manager-uid": string(controllerManagerPod.UID),
},
},
Spec: corev1.PodSpec{
TerminationGracePeriodSeconds: &zero,
Containers: []corev1.Container{
{
Name: "sleeper",
Image: "some-agent-image",
ImagePullPolicy: corev1.PullIfNotPresent,
VolumeMounts: controllerManagerPod.Spec.Containers[0].VolumeMounts,
Command: []string{"/bin/sleep", "infinity"},
Resources: corev1.ResourceRequirements{
Limits: corev1.ResourceList{
corev1.ResourceMemory: resource.MustParse("16Mi"),
corev1.ResourceCPU: resource.MustParse("10m"),
},
Requests: corev1.ResourceList{
corev1.ResourceMemory: resource.MustParse("16Mi"),
corev1.ResourceCPU: resource.MustParse("10m"),
},
},
},
},
RestartPolicy: corev1.RestartPolicyNever,
AutomountServiceAccountToken: boolPtr(false),
NodeName: controllerManagerPod.Spec.NodeName,
NodeSelector: controllerManagerPod.Spec.NodeSelector,
Tolerations: controllerManagerPod.Spec.Tolerations,
},
}
return controllerManagerPod, agentPod
}
func defineSharedKubecertagentFilterSpecs(
t *testing.T,
name string,
newFunc func(
agentPodConfig *AgentPodConfig,
credentialIssuerConfigLocationConfig *CredentialIssuerConfigLocationConfig,
kubeSystemPodInformer corev1informers.PodInformer,
agentPodInformer corev1informers.PodInformer,
observableWithInformerOption *testutil.ObservableWithInformerOption,
),
) {
spec.Run(t, name, func(t *testing.T, when spec.G, it spec.S) {
var r *require.Assertions
var kubeSystemPodInformerFilter, agentPodInformerFilter controllerlib.Filter
whateverPod := &corev1.Pod{}
it.Before(func() {
r = require.New(t)
kubeSystemPodInformer := kubeinformers.NewSharedInformerFactory(nil, 0).Core().V1().Pods()
agentPodInformer := kubeinformers.NewSharedInformerFactory(nil, 0).Core().V1().Pods()
observableWithInformerOption := testutil.NewObservableWithInformerOption()
newFunc(&AgentPodConfig{}, &CredentialIssuerConfigLocationConfig{}, kubeSystemPodInformer, agentPodInformer, observableWithInformerOption)
kubeSystemPodInformerFilter = observableWithInformerOption.GetFilterForInformer(kubeSystemPodInformer)
agentPodInformerFilter = observableWithInformerOption.GetFilterForInformer(agentPodInformer)
})
when("the event is happening in the kube system namespace", func() {
when("a pod with the proper controller manager labels and phase is added/updated/deleted", func() {
it("returns true", func() {
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"component": "kube-controller-manager",
},
},
Status: corev1.PodStatus{
Phase: corev1.PodRunning,
},
}
r.True(kubeSystemPodInformerFilter.Add(pod))
r.True(kubeSystemPodInformerFilter.Update(whateverPod, pod))
r.True(kubeSystemPodInformerFilter.Update(pod, whateverPod))
r.True(kubeSystemPodInformerFilter.Delete(pod))
})
})
when("a pod without the proper controller manager label is added/updated/deleted", func() {
it("returns false", func() {
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{},
Status: corev1.PodStatus{
Phase: corev1.PodRunning,
},
}
r.False(kubeSystemPodInformerFilter.Add(pod))
r.False(kubeSystemPodInformerFilter.Update(whateverPod, pod))
r.False(kubeSystemPodInformerFilter.Update(pod, whateverPod))
r.False(kubeSystemPodInformerFilter.Delete(pod))
})
})
when("a pod without the proper controller manager phase is added/updated/deleted", func() {
it("returns false", func() {
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"component": "kube-controller-manager",
},
},
}
r.False(kubeSystemPodInformerFilter.Add(pod))
r.False(kubeSystemPodInformerFilter.Update(whateverPod, pod))
r.False(kubeSystemPodInformerFilter.Update(pod, whateverPod))
r.False(kubeSystemPodInformerFilter.Delete(pod))
})
})
})
when("the change is happening in the agent's informer", func() {
when("a pod with the agent label is added/updated/deleted", func() {
it("returns true", func() {
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"kube-cert-agent.pinniped.dev": "true",
},
},
}
r.True(agentPodInformerFilter.Add(pod))
r.True(agentPodInformerFilter.Update(whateverPod, pod))
r.True(agentPodInformerFilter.Update(pod, whateverPod))
r.True(agentPodInformerFilter.Delete(pod))
})
})
when("a pod missing the agent label is added/updated/deleted", func() {
it("returns false", func() {
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"some-other-label-key": "some-other-label-value",
},
},
}
r.False(agentPodInformerFilter.Add(pod))
r.False(agentPodInformerFilter.Update(whateverPod, pod))
r.False(agentPodInformerFilter.Update(pod, whateverPod))
r.False(agentPodInformerFilter.Delete(pod))
})
})
})
})
}

View File

@ -0,0 +1,59 @@
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package kubecertagent
import (
"bytes"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/deprecated/scheme"
"k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/remotecommand"
)
// PodCommandExecutor can exec a command in a pod located via namespace and name.
type PodCommandExecutor interface {
Exec(podNamespace string, podName string, commandAndArgs ...string) (stdoutResult string, err error)
}
type kubeClientPodCommandExecutor struct {
kubeConfig *restclient.Config
kubeClient kubernetes.Interface
}
// NewPodCommandExecutor returns a PodCommandExecutor that will interact with a pod via the provided
// kubeConfig and corresponding kubeClient.
func NewPodCommandExecutor(kubeConfig *restclient.Config, kubeClient kubernetes.Interface) PodCommandExecutor {
return &kubeClientPodCommandExecutor{kubeConfig: kubeConfig, kubeClient: kubeClient}
}
func (s *kubeClientPodCommandExecutor) Exec(podNamespace string, podName string, commandAndArgs ...string) (string, error) {
request := s.kubeClient.
CoreV1().
RESTClient().
Post().
Namespace(podNamespace).
Resource("pods").
Name(podName).
SubResource("exec").
VersionedParams(&v1.PodExecOptions{
Stdin: false,
Stdout: true,
Stderr: false,
TTY: false,
Command: commandAndArgs,
}, scheme.ParameterCodec)
executor, err := remotecommand.NewSPDYExecutor(s.kubeConfig, "POST", request.URL())
if err != nil {
return "", err
}
var stdoutBuf bytes.Buffer
if err := executor.Stream(remotecommand.StreamOptions{Stdout: &stdoutBuf}); err != nil {
return "", err
}
return stdoutBuf.String(), nil
}

View File

@ -1,6 +1,8 @@
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Package controllermanager provides an entrypoint into running all of the controllers that run as
// a part of Pinniped.
package controllermanager
import (
@ -9,8 +11,10 @@ import (
"time"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/clock"
k8sinformers "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
restclient "k8s.io/client-go/rest"
"k8s.io/klog/v2/klogr"
aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
@ -23,8 +27,9 @@ import (
"go.pinniped.dev/internal/controller/identityprovider/webhookcachecleaner"
"go.pinniped.dev/internal/controller/identityprovider/webhookcachefiller"
"go.pinniped.dev/internal/controller/issuerconfig"
"go.pinniped.dev/internal/controller/kubecertagent"
"go.pinniped.dev/internal/controllerlib"
"go.pinniped.dev/internal/provider"
"go.pinniped.dev/internal/dynamiccert"
"go.pinniped.dev/pkg/config/api"
)
@ -33,98 +38,200 @@ const (
defaultResyncInterval = 3 * time.Minute
)
// Config holds all the input parameters to the set of controllers run as a part of Pinniped.
//
// It is used to inject parameters into PrepareControllers.
type Config struct {
// ServerInstallationNamespace provides the namespace in which Pinniped is deployed.
ServerInstallationNamespace string
// NamesConfig comes from the Pinniped config API (see api.Config). It specifies how Kubernetes
// objects should be named.
NamesConfig *api.NamesConfigSpec
// KubeCertAgentConfig comes from the Pinniped config API (see api.Config). It configures how
// the kubecertagent package's controllers should manage the agent pods.
KubeCertAgentConfig *api.KubeCertAgentSpec
// DiscoveryURLOverride allows a caller to inject a hardcoded discovery URL into Pinniped
// discovery document.
DiscoveryURLOverride *string
// DynamicServingCertProvider provides a setter and a getter to the Pinniped API's serving cert.
DynamicServingCertProvider dynamiccert.Provider
// DynamicSigningCertProvider provides a setter and a getter to the Pinniped API's
// signing cert, i.e., the cert that it uses to sign certs for Pinniped clients wishing to login.
DynamicSigningCertProvider dynamiccert.Provider
// ServingCertDuration is the validity period, in seconds, of the API serving certificate.
ServingCertDuration time.Duration
// ServingCertRenewBefore is the period of time, in seconds, that pinniped will wait before
// rotating the serving certificate. This period of time starts upon issuance of the serving
// certificate.
ServingCertRenewBefore time.Duration
// IDPCache is a cache of authenticators shared amongst various IDP-related controllers.
IDPCache *idpcache.Cache
}
// Prepare the controllers and their informers and return a function that will start them when called.
func PrepareControllers(
serverInstallationNamespace string,
namesConfig api.NamesConfigSpec,
discoveryURLOverride *string,
dynamicCertProvider provider.DynamicTLSServingCertProvider,
servingCertDuration time.Duration,
servingCertRenewBefore time.Duration,
idpCache *idpcache.Cache,
) (func(ctx context.Context), error) {
//nolint:funlen // Eh, fair, it is a really long function...but it is wiring the world...so...
func PrepareControllers(c *Config) (func(ctx context.Context), error) {
// Create k8s clients.
k8sClient, aggregatorClient, pinnipedClient, err := createClients()
kubeConfig, err := createConfig()
if err != nil {
return nil, fmt.Errorf("could not create config for the controllers: %w", err)
}
k8sClient, aggregatorClient, pinnipedClient, err := createClients(kubeConfig)
if err != nil {
return nil, fmt.Errorf("could not create clients for the controllers: %w", err)
}
// Create informers. Don't forget to make sure they get started in the function returned below.
kubePublicNamespaceK8sInformers, installationNamespaceK8sInformers, installationNamespacePinnipedInformers :=
createInformers(serverInstallationNamespace, k8sClient, pinnipedClient)
informers := createInformers(c.ServerInstallationNamespace, k8sClient, pinnipedClient)
// Configuration for the kubecertagent controllers created below.
agentPodConfig := &kubecertagent.AgentPodConfig{
Namespace: c.ServerInstallationNamespace,
ContainerImage: *c.KubeCertAgentConfig.Image,
PodNamePrefix: *c.KubeCertAgentConfig.NamePrefix,
}
credentialIssuerConfigLocationConfig := &kubecertagent.CredentialIssuerConfigLocationConfig{
Namespace: c.ServerInstallationNamespace,
Name: c.NamesConfig.CredentialIssuerConfig,
}
// Create controller manager.
controllerManager := controllerlib.
NewManager().
// KubeConfig info publishing controller is responsible for writing the KubeConfig information to the
// CredentialIssuerConfig resource and keeping that information up to date.
WithController(
issuerconfig.NewPublisherController(serverInstallationNamespace,
namesConfig.CredentialIssuerConfig,
discoveryURLOverride,
issuerconfig.NewKubeConfigInfoPublisherController(
c.ServerInstallationNamespace,
c.NamesConfig.CredentialIssuerConfig,
c.DiscoveryURLOverride,
pinnipedClient,
kubePublicNamespaceK8sInformers.Core().V1().ConfigMaps(),
installationNamespacePinnipedInformers.Config().V1alpha1().CredentialIssuerConfigs(),
informers.kubePublicNamespaceK8s.Core().V1().ConfigMaps(),
controllerlib.WithInformer,
),
singletonWorker,
).
// API certs controllers are responsible for managing the TLS certificates used to serve Pinniped's API.
WithController(
apicerts.NewCertsManagerController(
serverInstallationNamespace,
namesConfig.ServingCertificateSecret,
c.ServerInstallationNamespace,
c.NamesConfig.ServingCertificateSecret,
k8sClient,
installationNamespaceK8sInformers.Core().V1().Secrets(),
informers.installationNamespaceK8s.Core().V1().Secrets(),
controllerlib.WithInformer,
controllerlib.WithInitialEvent,
servingCertDuration,
c.ServingCertDuration,
"Pinniped CA",
namesConfig.APIService,
c.NamesConfig.APIService,
),
singletonWorker,
).
WithController(
apicerts.NewAPIServiceUpdaterController(
serverInstallationNamespace,
namesConfig.ServingCertificateSecret,
c.ServerInstallationNamespace,
c.NamesConfig.ServingCertificateSecret,
loginv1alpha1.SchemeGroupVersion.Version+"."+loginv1alpha1.GroupName,
aggregatorClient,
installationNamespaceK8sInformers.Core().V1().Secrets(),
informers.installationNamespaceK8s.Core().V1().Secrets(),
controllerlib.WithInformer,
),
singletonWorker,
).
WithController(
apicerts.NewCertsObserverController(
serverInstallationNamespace,
namesConfig.ServingCertificateSecret,
dynamicCertProvider,
installationNamespaceK8sInformers.Core().V1().Secrets(),
c.ServerInstallationNamespace,
c.NamesConfig.ServingCertificateSecret,
c.DynamicServingCertProvider,
informers.installationNamespaceK8s.Core().V1().Secrets(),
controllerlib.WithInformer,
),
singletonWorker,
).
WithController(
apicerts.NewCertsExpirerController(
serverInstallationNamespace,
namesConfig.ServingCertificateSecret,
c.ServerInstallationNamespace,
c.NamesConfig.ServingCertificateSecret,
k8sClient,
installationNamespaceK8sInformers.Core().V1().Secrets(),
informers.installationNamespaceK8s.Core().V1().Secrets(),
controllerlib.WithInformer,
c.ServingCertRenewBefore,
),
singletonWorker,
).
// Kube cert agent controllers are responsible for finding the cluster's signing keys and keeping them
// up to date in memory, as well as reporting status on this cluster integration strategy.
WithController(
kubecertagent.NewCreaterController(
agentPodConfig,
credentialIssuerConfigLocationConfig,
clock.RealClock{},
k8sClient,
pinnipedClient,
informers.kubeSystemNamespaceK8s.Core().V1().Pods(),
informers.installationNamespaceK8s.Core().V1().Pods(),
controllerlib.WithInformer,
servingCertRenewBefore,
),
singletonWorker,
).
WithController(
kubecertagent.NewAnnotaterController(
agentPodConfig,
credentialIssuerConfigLocationConfig,
clock.RealClock{},
k8sClient,
pinnipedClient,
informers.kubeSystemNamespaceK8s.Core().V1().Pods(),
informers.installationNamespaceK8s.Core().V1().Pods(),
controllerlib.WithInformer,
),
singletonWorker,
).
WithController(
kubecertagent.NewExecerController(
credentialIssuerConfigLocationConfig,
c.DynamicSigningCertProvider,
kubecertagent.NewPodCommandExecutor(kubeConfig, k8sClient),
pinnipedClient,
clock.RealClock{},
informers.installationNamespaceK8s.Core().V1().Pods(),
controllerlib.WithInformer,
),
singletonWorker,
).
WithController(
kubecertagent.NewDeleterController(
agentPodConfig,
k8sClient,
informers.kubeSystemNamespaceK8s.Core().V1().Pods(),
informers.installationNamespaceK8s.Core().V1().Pods(),
controllerlib.WithInformer,
),
singletonWorker,
).
// The cache filler controllers are responsible for keep an in-memory representation of active
// IDPs up to date.
WithController(
webhookcachefiller.New(
idpCache,
installationNamespacePinnipedInformers.IDP().V1alpha1().WebhookIdentityProviders(),
c.IDPCache,
informers.installationNamespacePinniped.IDP().V1alpha1().WebhookIdentityProviders(),
klogr.New(),
),
singletonWorker,
).
WithController(
webhookcachecleaner.New(
idpCache,
installationNamespacePinnipedInformers.IDP().V1alpha1().WebhookIdentityProviders(),
c.IDPCache,
informers.installationNamespacePinniped.IDP().V1alpha1().WebhookIdentityProviders(),
klogr.New(),
),
singletonWorker,
@ -132,31 +239,29 @@ func PrepareControllers(
// Return a function which starts the informers and controllers.
return func(ctx context.Context) {
kubePublicNamespaceK8sInformers.Start(ctx.Done())
installationNamespaceK8sInformers.Start(ctx.Done())
installationNamespacePinnipedInformers.Start(ctx.Done())
kubePublicNamespaceK8sInformers.WaitForCacheSync(ctx.Done())
installationNamespaceK8sInformers.WaitForCacheSync(ctx.Done())
installationNamespacePinnipedInformers.WaitForCacheSync(ctx.Done())
informers.startAndWaitForSync(ctx)
go controllerManager.Start(ctx)
}, nil
}
// Create the rest config that will be used by the clients for the controllers.
func createConfig() (*rest.Config, error) {
// Load the Kubernetes client configuration.
kubeConfig, err := restclient.InClusterConfig()
if err != nil {
return nil, fmt.Errorf("could not load in-cluster configuration: %w", err)
}
return kubeConfig, nil
}
// Create the k8s clients that will be used by the controllers.
func createClients() (
func createClients(kubeConfig *rest.Config) (
k8sClient *kubernetes.Clientset,
aggregatorClient *aggregatorclient.Clientset,
pinnipedClient *pinnipedclientset.Clientset,
err error,
) {
// Load the Kubernetes client configuration.
kubeConfig, err := restclient.InClusterConfig()
if err != nil {
return nil, nil, nil, fmt.Errorf("could not load in-cluster configuration: %w", err)
}
// explicitly use protobuf when talking to built-in kube APIs
protoKubeConfig := createProtoKubeConfig(kubeConfig)
@ -184,32 +289,53 @@ func createClients() (
return
}
type informers struct {
kubePublicNamespaceK8s k8sinformers.SharedInformerFactory
kubeSystemNamespaceK8s k8sinformers.SharedInformerFactory
installationNamespaceK8s k8sinformers.SharedInformerFactory
installationNamespacePinniped pinnipedinformers.SharedInformerFactory
}
// Create the informers that will be used by the controllers.
func createInformers(
serverInstallationNamespace string,
k8sClient *kubernetes.Clientset,
pinnipedClient *pinnipedclientset.Clientset,
) (
kubePublicNamespaceK8sInformers k8sinformers.SharedInformerFactory,
installationNamespaceK8sInformers k8sinformers.SharedInformerFactory,
installationNamespacePinnipedInformers pinnipedinformers.SharedInformerFactory,
) {
kubePublicNamespaceK8sInformers = k8sinformers.NewSharedInformerFactoryWithOptions(
k8sClient,
defaultResyncInterval,
k8sinformers.WithNamespace(issuerconfig.ClusterInfoNamespace),
)
installationNamespaceK8sInformers = k8sinformers.NewSharedInformerFactoryWithOptions(
k8sClient,
defaultResyncInterval,
k8sinformers.WithNamespace(serverInstallationNamespace),
)
installationNamespacePinnipedInformers = pinnipedinformers.NewSharedInformerFactoryWithOptions(
pinnipedClient,
defaultResyncInterval,
pinnipedinformers.WithNamespace(serverInstallationNamespace),
)
return
) *informers {
return &informers{
kubePublicNamespaceK8s: k8sinformers.NewSharedInformerFactoryWithOptions(
k8sClient,
defaultResyncInterval,
k8sinformers.WithNamespace(issuerconfig.ClusterInfoNamespace),
),
kubeSystemNamespaceK8s: k8sinformers.NewSharedInformerFactoryWithOptions(
k8sClient,
defaultResyncInterval,
k8sinformers.WithNamespace(kubecertagent.ControllerManagerNamespace),
),
installationNamespaceK8s: k8sinformers.NewSharedInformerFactoryWithOptions(
k8sClient,
defaultResyncInterval,
k8sinformers.WithNamespace(serverInstallationNamespace),
),
installationNamespacePinniped: pinnipedinformers.NewSharedInformerFactoryWithOptions(
pinnipedClient,
defaultResyncInterval,
pinnipedinformers.WithNamespace(serverInstallationNamespace),
),
}
}
func (i *informers) startAndWaitForSync(ctx context.Context) {
i.kubePublicNamespaceK8s.Start(ctx.Done())
i.kubeSystemNamespaceK8s.Start(ctx.Done())
i.installationNamespaceK8s.Start(ctx.Done())
i.installationNamespacePinniped.Start(ctx.Done())
i.kubePublicNamespaceK8s.WaitForCacheSync(ctx.Done())
i.kubeSystemNamespaceK8s.WaitForCacheSync(ctx.Done())
i.installationNamespaceK8s.WaitForCacheSync(ctx.Done())
i.installationNamespacePinniped.WaitForCacheSync(ctx.Done())
}
// Returns a copy of the input config with the ContentConfig set to use protobuf.

View File

@ -0,0 +1,6 @@
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Package dynamiccert provides a simple way of communicating a dynamically updating PEM-encoded
// certificate and key.
package dynamiccert

View File

@ -1,7 +1,7 @@
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package provider
package dynamiccert
import (
"sync"
@ -9,33 +9,36 @@ import (
"k8s.io/apiserver/pkg/server/dynamiccertificates"
)
type DynamicTLSServingCertProvider interface {
// Provider provides a getter, CurrentCertKeyContent(), and a setter, Set(), for a PEM-formatted
// certificate and matching key.
type Provider interface {
dynamiccertificates.CertKeyContentProvider
Set(certPEM, keyPEM []byte)
}
type dynamicTLSServingCertProvider struct {
type provider struct {
certPEM []byte
keyPEM []byte
mutex sync.RWMutex
}
func NewDynamicTLSServingCertProvider() DynamicTLSServingCertProvider {
return &dynamicTLSServingCertProvider{}
// New returns an empty Provider. The returned Provider is thread-safe.
func New() Provider {
return &provider{}
}
func (p *dynamicTLSServingCertProvider) Set(certPEM, keyPEM []byte) {
func (p *provider) Set(certPEM, keyPEM []byte) {
p.mutex.Lock() // acquire a write lock
defer p.mutex.Unlock()
p.certPEM = certPEM
p.keyPEM = keyPEM
}
func (p *dynamicTLSServingCertProvider) Name() string {
return "DynamicTLSServingCertProvider"
func (p *provider) Name() string {
return "DynamicCertProvider"
}
func (p *dynamicTLSServingCertProvider) CurrentCertKeyContent() (cert []byte, key []byte) {
func (p *provider) CurrentCertKeyContent() (cert []byte, key []byte) {
p.mutex.RLock() // acquire a read lock
defer p.mutex.RUnlock()
return p.certPEM, p.keyPEM

View File

@ -11,24 +11,17 @@ import (
"time"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
genericapiserver "k8s.io/apiserver/pkg/server"
genericoptions "k8s.io/apiserver/pkg/server/options"
"k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"k8s.io/klog/v2"
configv1alpha1 "go.pinniped.dev/generated/1.19/apis/config/v1alpha1"
loginv1alpha1 "go.pinniped.dev/generated/1.19/apis/login/v1alpha1"
pinnipedclientset "go.pinniped.dev/generated/1.19/client/clientset/versioned"
"go.pinniped.dev/internal/apiserver"
"go.pinniped.dev/internal/certauthority/kubecertauthority"
"go.pinniped.dev/internal/certauthority/dynamiccertauthority"
"go.pinniped.dev/internal/controller/identityprovider/idpcache"
"go.pinniped.dev/internal/controller/issuerconfig"
"go.pinniped.dev/internal/controllermanager"
"go.pinniped.dev/internal/downward"
"go.pinniped.dev/internal/dynamiccert"
"go.pinniped.dev/internal/here"
"go.pinniped.dev/internal/provider"
"go.pinniped.dev/internal/registry/credentialrequest"
"go.pinniped.dev/pkg/config"
)
@ -111,13 +104,6 @@ func (a *App) runServer(ctx context.Context) error {
}
serverInstallationNamespace := podInfo.Namespace
// Load the Kubernetes cluster signing CA.
k8sClusterCA, shutdownCA, err := getClusterCASigner(ctx, serverInstallationNamespace, cfg.NamesConfig.CredentialIssuerConfig)
if err != nil {
return err
}
defer shutdownCA()
// Initialize the cache of active identity providers.
idpCache := idpcache.New()
@ -126,18 +112,26 @@ func (a *App) runServer(ctx context.Context) error {
// is stored in a k8s Secret. Therefore it also effectively acting as
// an in-memory cache of what is stored in the k8s Secret, helping to
// keep incoming requests fast.
dynamicCertProvider := provider.NewDynamicTLSServingCertProvider()
dynamicServingCertProvider := dynamiccert.New()
// This cert provider will be used to provide a signing key to the
// cert issuer used to issue certs to Pinniped clients wishing to login.
dynamicSigningCertProvider := dynamiccert.New()
// Prepare to start the controllers, but defer actually starting them until the
// post start hook of the aggregated API server.
startControllersFunc, err := controllermanager.PrepareControllers(
serverInstallationNamespace,
cfg.NamesConfig,
cfg.DiscoveryInfo.URL,
dynamicCertProvider,
time.Duration(*cfg.APIConfig.ServingCertificateConfig.DurationSeconds)*time.Second,
time.Duration(*cfg.APIConfig.ServingCertificateConfig.RenewBeforeSeconds)*time.Second,
idpCache,
&controllermanager.Config{
ServerInstallationNamespace: serverInstallationNamespace,
NamesConfig: &cfg.NamesConfig,
KubeCertAgentConfig: &cfg.KubeCertAgentConfig,
DiscoveryURLOverride: cfg.DiscoveryInfo.URL,
DynamicServingCertProvider: dynamicServingCertProvider,
DynamicSigningCertProvider: dynamicSigningCertProvider,
ServingCertDuration: time.Duration(*cfg.APIConfig.ServingCertificateConfig.DurationSeconds) * time.Second,
ServingCertRenewBefore: time.Duration(*cfg.APIConfig.ServingCertificateConfig.RenewBeforeSeconds) * time.Second,
IDPCache: idpCache,
},
)
if err != nil {
return fmt.Errorf("could not prepare controllers: %w", err)
@ -145,9 +139,9 @@ func (a *App) runServer(ctx context.Context) error {
// Get the aggregated API server config.
aggregatedAPIServerConfig, err := getAggregatedAPIServerConfig(
dynamicCertProvider,
dynamicServingCertProvider,
idpCache,
k8sClusterCA,
dynamiccertauthority.New(dynamicSigningCertProvider),
startControllersFunc,
)
if err != nil {
@ -164,87 +158,9 @@ func (a *App) runServer(ctx context.Context) error {
return server.GenericAPIServer.PrepareRun().Run(ctx.Done())
}
func getClusterCASigner(
ctx context.Context, serverInstallationNamespace string,
credentialIssuerConfigResourceName string,
) (credentialrequest.CertIssuer, kubecertauthority.ShutdownFunc, error) {
// Load the Kubernetes client configuration.
kubeConfig, err := restclient.InClusterConfig()
if err != nil {
return nil, nil, fmt.Errorf("could not load in-cluster configuration: %w", err)
}
// Connect to the core Kubernetes API.
kubeClient, err := kubernetes.NewForConfig(kubeConfig)
if err != nil {
return nil, nil, fmt.Errorf("could not initialize Kubernetes client: %w", err)
}
// Connect to the pinniped API.
pinnipedClient, err := pinnipedclientset.NewForConfig(kubeConfig)
if err != nil {
return nil, nil, fmt.Errorf("could not initialize pinniped client: %w", err)
}
// Make a clock tick that triggers a periodic refresh.
ticker := time.NewTicker(5 * time.Minute)
// Make a CA which uses the Kubernetes cluster API server's signing certs.
k8sClusterCA, shutdownCA := kubecertauthority.New(
kubeClient,
kubecertauthority.NewPodCommandExecutor(kubeConfig, kubeClient),
ticker.C,
func() { // success callback
err = issuerconfig.CreateOrUpdateCredentialIssuerConfig(
ctx,
serverInstallationNamespace,
credentialIssuerConfigResourceName,
pinnipedClient,
func(configToUpdate *configv1alpha1.CredentialIssuerConfig) {
configToUpdate.Status.Strategies = []configv1alpha1.CredentialIssuerConfigStrategy{
{
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
Status: configv1alpha1.SuccessStrategyStatus,
Reason: configv1alpha1.FetchedKeyStrategyReason,
Message: "Key was fetched successfully",
LastUpdateTime: metav1.Now(),
},
}
},
)
if err != nil {
klog.Errorf("error performing create or update on CredentialIssuerConfig to add strategy success: %s", err.Error())
}
},
func(err error) { // error callback
if updateErr := issuerconfig.CreateOrUpdateCredentialIssuerConfig(
ctx,
serverInstallationNamespace,
credentialIssuerConfigResourceName,
pinnipedClient,
func(configToUpdate *configv1alpha1.CredentialIssuerConfig) {
configToUpdate.Status.Strategies = []configv1alpha1.CredentialIssuerConfigStrategy{
{
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
Status: configv1alpha1.ErrorStrategyStatus,
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
Message: err.Error(),
LastUpdateTime: metav1.Now(),
},
}
},
); updateErr != nil {
klog.Errorf("error performing create or update on CredentialIssuerConfig to add strategy error: %s", updateErr.Error())
}
},
)
return k8sClusterCA, func() { shutdownCA(); ticker.Stop() }, nil
}
// Create a configuration for the aggregated API server.
func getAggregatedAPIServerConfig(
dynamicCertProvider provider.DynamicTLSServingCertProvider,
dynamicCertProvider dynamiccert.Provider,
authenticator credentialrequest.TokenCredentialRequestAuthenticator,
issuer credentialrequest.CertIssuer,
startControllersPostStartHook func(context.Context),

View File

@ -75,6 +75,12 @@ func (v *ValidCert) RequireMatchesPrivateKey(keyPEM string) {
require.NoError(v.t, err)
}
// RequireCommonName asserts that the certificate contains the provided commonName.
func (v *ValidCert) RequireCommonName(commonName string) {
v.t.Helper()
require.Equal(v.t, commonName, v.parsed.Subject.CommonName)
}
// CreateCertificate creates a certificate with the provided time bounds, and
// returns the PEM representation of the certificate.
//

View File

@ -5,9 +5,10 @@ package api
// Config contains knobs to setup an instance of Pinniped.
type Config struct {
DiscoveryInfo DiscoveryInfoSpec `json:"discovery"`
APIConfig APIConfigSpec `json:"api"`
NamesConfig NamesConfigSpec `json:"names"`
DiscoveryInfo DiscoveryInfoSpec `json:"discovery"`
APIConfig APIConfigSpec `json:"api"`
NamesConfig NamesConfigSpec `json:"names"`
KubeCertAgentConfig KubeCertAgentSpec `json:"kubeCertAgent"`
}
// DiscoveryInfoSpec contains configuration knobs specific to
@ -49,3 +50,15 @@ type ServingCertificateConfigSpec struct {
// seconds (about 9 months).
RenewBeforeSeconds *int64 `json:"renewBeforeSeconds,omitempty"`
}
type KubeCertAgentSpec struct {
// NamePrefix is the prefix of the name of the kube-cert-agent pods. For example, if this field is
// set to "some-prefix-", then the name of the pods will look like "some-prefix-blah". The default
// for this value is "pinniped-kube-cert-agent-".
NamePrefix *string `json:"namePrefix,omitempty"`
// Image is the container image that will be used by the kube-cert-agent pod. The container image
// should contain at least 2 binaries: /bin/sleep and cat (somewhere on the $PATH). The default
// for this value is "debian:latest".
Image *string `json:"image"`
}

View File

@ -40,6 +40,7 @@ func FromPath(path string) (*api.Config, error) {
}
maybeSetAPIDefaults(&config.APIConfig)
maybeSetKubeCertAgentDefaults(&config.KubeCertAgentConfig)
if err := validateAPI(&config.APIConfig); err != nil {
return nil, fmt.Errorf("validate api: %w", err)
@ -62,6 +63,16 @@ func maybeSetAPIDefaults(apiConfig *api.APIConfigSpec) {
}
}
func maybeSetKubeCertAgentDefaults(cfg *api.KubeCertAgentSpec) {
if cfg.NamePrefix == nil {
cfg.NamePrefix = stringPtr("pinniped-kube-cert-agent-")
}
if cfg.Image == nil {
cfg.Image = stringPtr("debian:latest")
}
}
func validateNames(names *api.NamesConfigSpec) error {
missingNames := []string{}
if names == nil {
@ -98,3 +109,7 @@ func validateAPI(apiConfig *api.APIConfigSpec) error {
func int64Ptr(i int64) *int64 {
return &i
}
func stringPtr(s string) *string {
return &s
}

View File

@ -35,6 +35,10 @@ func TestFromPath(t *testing.T) {
servingCertificateSecret: pinniped-api-tls-serving-certificate
credentialIssuerConfig: pinniped-config
apiService: pinniped-api
kubeCertAgentPrefix: kube-cert-agent-prefix
KubeCertAgent:
namePrefix: kube-cert-agent-name-prefix-
image: kube-cert-agent-image
`),
wantConfig: &api.Config{
DiscoveryInfo: api.DiscoveryInfoSpec{
@ -51,6 +55,10 @@ func TestFromPath(t *testing.T) {
CredentialIssuerConfig: "pinniped-config",
APIService: "pinniped-api",
},
KubeCertAgentConfig: api.KubeCertAgentSpec{
NamePrefix: stringPtr("kube-cert-agent-name-prefix-"),
Image: stringPtr("kube-cert-agent-image"),
},
},
},
{
@ -77,6 +85,10 @@ func TestFromPath(t *testing.T) {
CredentialIssuerConfig: "pinniped-config",
APIService: "pinniped-api",
},
KubeCertAgentConfig: api.KubeCertAgentSpec{
NamePrefix: stringPtr("pinniped-kube-cert-agent-"),
Image: stringPtr("debian:latest"),
},
},
},
{
@ -187,7 +199,3 @@ func TestFromPath(t *testing.T) {
})
}
}
func stringPtr(s string) *string {
return &s
}

View File

@ -28,7 +28,7 @@ func TestCLI(t *testing.T) {
)
// Create a test webhook configuration to use with the CLI.
ctx, cancelFunc := context.WithTimeout(context.Background(), 30*time.Second)
ctx, cancelFunc := context.WithTimeout(context.Background(), 4*time.Minute)
defer cancelFunc()
idp := library.CreateTestWebhookIDP(ctx, t)

View File

@ -169,9 +169,8 @@ func addTestClusterGroupCanViewEverythingRoleBinding(ctx context.Context, t *tes
func addTestClusterRoleBinding(ctx context.Context, t *testing.T, adminClient kubernetes.Interface, binding *rbacv1.ClusterRoleBinding) {
_, err := adminClient.RbacV1().ClusterRoleBindings().Get(ctx, binding.Name, metav1.GetOptions{})
if err != nil {
// "404 not found" errors are acceptable, but others would be unexpected
statusError, isStatus := err.(*errors.StatusError)
require.True(t, isStatus)
require.True(t, isStatus, "Only StatusNotFound error would be acceptable, but error was: ", err.Error())
require.Equal(t, http.StatusNotFound, int(statusError.Status().Code))
_, err = adminClient.RbacV1().ClusterRoleBindings().Create(ctx, binding, metav1.CreateOptions{})

View File

@ -9,10 +9,8 @@ import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/rest"
configv1alpha1 "go.pinniped.dev/generated/1.19/apis/config/v1alpha1"
"go.pinniped.dev/test/library"
@ -50,7 +48,14 @@ func TestCredentialIssuerConfig(t *testing.T) {
require.Equal(t, configv1alpha1.FetchedKeyStrategyReason, actualStatusStrategy.Reason)
require.Equal(t, "Key was fetched successfully", actualStatusStrategy.Message)
// Verify the published kube config info.
require.Equal(t, expectedStatusKubeConfigInfo(config), actualStatusKubeConfigInfo)
require.Equal(
t,
&configv1alpha1.CredentialIssuerConfigKubeConfigInfo{
Server: config.Host,
CertificateAuthorityData: base64.StdEncoding.EncodeToString(config.TLSClientConfig.CAData),
},
actualStatusKubeConfigInfo,
)
} else {
require.Equal(t, configv1alpha1.ErrorStrategyStatus, actualStatusStrategy.Status)
require.Equal(t, configv1alpha1.CouldNotFetchKeyStrategyReason, actualStatusStrategy.Reason)
@ -63,52 +68,4 @@ func TestCredentialIssuerConfig(t *testing.T) {
require.WithinDuration(t, time.Now(), actualStatusStrategy.LastUpdateTime.Local(), 10*time.Minute)
})
t.Run("reconciling CredentialIssuerConfig", func(t *testing.T) {
library.SkipUnlessClusterHasCapability(t, library.ClusterSigningKeyIsAvailable)
existingConfig, err := client.
ConfigV1alpha1().
CredentialIssuerConfigs(namespaceName).
Get(ctx, "pinniped-config", metav1.GetOptions{})
require.NoError(t, err)
require.Len(t, existingConfig.Status.Strategies, 1)
initialStrategy := existingConfig.Status.Strategies[0]
// Mutate the existing object. Don't delete it because that would mess up its `Status.Strategies` array,
// since the reconciling controller is not currently responsible for that field.
updatedServerValue := "https://junk"
existingConfig.Status.KubeConfigInfo.Server = updatedServerValue
updatedConfig, err := client.
ConfigV1alpha1().
CredentialIssuerConfigs(namespaceName).
Update(ctx, existingConfig, metav1.UpdateOptions{})
require.NoError(t, err)
require.Equal(t, updatedServerValue, updatedConfig.Status.KubeConfigInfo.Server)
// Expect that the object's mutated field is set back to what matches its source of truth by the controller.
var actualCredentialIssuerConfig *configv1alpha1.CredentialIssuerConfig
var configChangesServerField = func() bool {
actualCredentialIssuerConfig, err = client.
ConfigV1alpha1().
CredentialIssuerConfigs(namespaceName).
Get(ctx, "pinniped-config", metav1.GetOptions{})
return err == nil && actualCredentialIssuerConfig.Status.KubeConfigInfo.Server != updatedServerValue
}
assert.Eventually(t, configChangesServerField, 10*time.Second, 100*time.Millisecond)
require.NoError(t, err) // prints out the error and stops the test in case of failure
actualStatusKubeConfigInfo := actualCredentialIssuerConfig.Status.KubeConfigInfo
require.Equal(t, expectedStatusKubeConfigInfo(config), actualStatusKubeConfigInfo)
// The strategies should not have changed during reconciliation.
require.Len(t, actualCredentialIssuerConfig.Status.Strategies, 1)
require.Equal(t, initialStrategy, actualCredentialIssuerConfig.Status.Strategies[0])
})
}
func expectedStatusKubeConfigInfo(config *rest.Config) *configv1alpha1.CredentialIssuerConfigKubeConfigInfo {
return &configv1alpha1.CredentialIssuerConfigKubeConfigInfo{
Server: config.Host,
CertificateAuthorityData: base64.StdEncoding.EncodeToString(config.TLSClientConfig.CAData),
}
}

View File

@ -46,7 +46,7 @@ func TestSuccessfulCredentialRequest(t *testing.T) {
expectedTestUserGroups := strings.Split(
strings.ReplaceAll(library.GetEnv(t, "PINNIPED_TEST_USER_GROUPS"), " ", ""), ",",
)
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
ctx, cancel := context.WithTimeout(context.Background(), 6*time.Minute)
defer cancel()
testWebhook := library.CreateTestWebhookIDP(ctx, t)

View File

@ -0,0 +1,121 @@
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package integration
import (
"context"
"fmt"
"sort"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/diff"
"go.pinniped.dev/test/library"
)
const (
kubeCertAgentLabelSelector = "kube-cert-agent.pinniped.dev=true"
)
func TestKubeCertAgent(t *testing.T) {
library.SkipUnlessIntegration(t)
library.SkipUnlessClusterHasCapability(t, library.ClusterSigningKeyIsAvailable)
namespaceName := library.GetEnv(t, "PINNIPED_NAMESPACE")
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
defer cancel()
kubeClient := library.NewClientset(t)
// Get the current number of kube-cert-agent pods.
//
// We can pretty safely assert there should be more than 1, since there should be a
// kube-cert-agent pod per kube-controller-manager pod, and there should probably be at least
// 1 kube-controller-manager for this to be a working kube API.
originalAgentPods, err := kubeClient.CoreV1().Pods(namespaceName).List(ctx, metav1.ListOptions{
LabelSelector: kubeCertAgentLabelSelector,
})
require.NoError(t, err)
require.NotEmpty(t, originalAgentPods.Items)
sortPods(originalAgentPods)
agentPodsReconciled := func() bool {
var currentAgentPods *corev1.PodList
currentAgentPods, err = kubeClient.CoreV1().Pods(namespaceName).List(ctx, metav1.ListOptions{
LabelSelector: kubeCertAgentLabelSelector,
})
if err != nil {
return false
}
if len(originalAgentPods.Items) != len(currentAgentPods.Items) {
err = fmt.Errorf(
"original agent pod len != current agent pod len: %s",
diff.ObjectDiff(originalAgentPods.Items, currentAgentPods.Items),
)
return false
}
sortPods(currentAgentPods)
for i := range originalAgentPods.Items {
if !equality.Semantic.DeepEqual(
originalAgentPods.Items[i].Spec,
currentAgentPods.Items[i].Spec,
) {
err = fmt.Errorf(
"original agent pod != current agent pod: %s",
diff.ObjectDiff(originalAgentPods.Items[i].Spec, currentAgentPods.Items[i].Spec),
)
return false
}
}
return true
}
t.Run("reconcile on update", func(t *testing.T) {
// Update the image of the first pod. The controller should see it, and flip it back.
//
// Note that we update the toleration field here because it is the only field, currently, that
// 1) we are allowed to update on a running pod AND 2) the kube-cert-agent controllers care
// about.
updatedAgentPod := originalAgentPods.Items[0].DeepCopy()
updatedAgentPod.Spec.Tolerations = append(
updatedAgentPod.Spec.Tolerations,
corev1.Toleration{Key: "fake-toleration"},
)
_, err = kubeClient.CoreV1().Pods(namespaceName).Update(ctx, updatedAgentPod, metav1.UpdateOptions{})
require.NoError(t, err)
// Make sure the original pods come back.
assert.Eventually(t, agentPodsReconciled, 10*time.Second, 250*time.Millisecond)
require.NoError(t, err)
})
t.Run("reconcile on delete", func(t *testing.T) {
// Delete the first pod. The controller should see it, and flip it back.
err = kubeClient.
CoreV1().
Pods(namespaceName).
Delete(ctx, originalAgentPods.Items[0].Name, metav1.DeleteOptions{})
require.NoError(t, err)
// Make sure the original pods come back.
assert.Eventually(t, agentPodsReconciled, 10*time.Second, 250*time.Millisecond)
require.NoError(t, err)
})
}
func sortPods(pods *corev1.PodList) {
sort.Slice(pods.Items, func(i, j int) bool {
return pods.Items[i].Name < pods.Items[j].Name
})
}