Merge branch 'main' into oidc_password_grant
This commit is contained in:
commit
3077034b2d
@ -153,6 +153,9 @@ rules:
|
|||||||
- apiGroups: [ "" ]
|
- apiGroups: [ "" ]
|
||||||
resources: [ configmaps ]
|
resources: [ configmaps ]
|
||||||
verbs: [ list, get, watch ]
|
verbs: [ list, get, watch ]
|
||||||
|
- apiGroups: [ coordination.k8s.io ]
|
||||||
|
resources: [ leases ]
|
||||||
|
verbs: [ create, get, update ]
|
||||||
---
|
---
|
||||||
kind: RoleBinding
|
kind: RoleBinding
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
@ -48,6 +48,9 @@ rules:
|
|||||||
- apiGroups: [apps]
|
- apiGroups: [apps]
|
||||||
resources: [replicasets,deployments]
|
resources: [replicasets,deployments]
|
||||||
verbs: [get]
|
verbs: [get]
|
||||||
|
- apiGroups: [ coordination.k8s.io ]
|
||||||
|
resources: [ leases ]
|
||||||
|
verbs: [ create, get, update ]
|
||||||
---
|
---
|
||||||
kind: RoleBinding
|
kind: RoleBinding
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
17
go.mod
17
go.mod
@ -25,6 +25,7 @@ require (
|
|||||||
github.com/spf13/pflag v1.0.5
|
github.com/spf13/pflag v1.0.5
|
||||||
github.com/stretchr/testify v1.7.0
|
github.com/stretchr/testify v1.7.0
|
||||||
github.com/tdewolff/minify/v2 v2.9.21
|
github.com/tdewolff/minify/v2 v2.9.21
|
||||||
|
go.uber.org/atomic v1.7.0
|
||||||
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a
|
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a
|
||||||
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023
|
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023
|
||||||
golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602
|
golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602
|
||||||
@ -42,19 +43,3 @@ require (
|
|||||||
k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9
|
k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9
|
||||||
sigs.k8s.io/yaml v1.2.0
|
sigs.k8s.io/yaml v1.2.0
|
||||||
)
|
)
|
||||||
|
|
||||||
// Workaround a broken module version (see https://github.com/oleiade/reflections/issues/14).
|
|
||||||
// We need this until none of our deps tries to pull in v1.0.0, otherwise some tools like
|
|
||||||
// Dependabot will fail on our module.
|
|
||||||
replace github.com/oleiade/reflections v1.0.0 => github.com/oleiade/reflections v1.0.1
|
|
||||||
|
|
||||||
// We were never vulnerable to CVE-2020-26160 but this avoids future issues
|
|
||||||
// This fork is not particularly better though:
|
|
||||||
// https://github.com/form3tech-oss/jwt-go/issues/7
|
|
||||||
// We use the SHA of github.com/form3tech-oss/jwt-go@v3.2.2 to get around "used for two different module paths"
|
|
||||||
// https://golang.org/issues/26904
|
|
||||||
replace github.com/dgrijalva/jwt-go v3.2.0+incompatible => github.com/form3tech-oss/jwt-go v0.0.0-20200915135329-9162a5abdbc0
|
|
||||||
|
|
||||||
// Pin a gRPC module that's only used in some tests.
|
|
||||||
// This is required because sometime after v1.29.1, they moved this package into a separate module.
|
|
||||||
replace google.golang.org/grpc/examples => ./hack/dependencyhacks/grpcexamples/
|
|
||||||
|
4
go.sum
4
go.sum
@ -182,6 +182,7 @@ github.com/dgraph-io/ristretto v0.0.1/go.mod h1:T40EBc7CJke8TkpiYfGGKAeFjSaxuFXh
|
|||||||
github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E=
|
github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E=
|
||||||
github.com/dgraph-io/ristretto v0.0.3 h1:jh22xisGBjrEVnRZ1DVTpBVQm0Xndu8sMl0CWDzSIBI=
|
github.com/dgraph-io/ristretto v0.0.3 h1:jh22xisGBjrEVnRZ1DVTpBVQm0Xndu8sMl0CWDzSIBI=
|
||||||
github.com/dgraph-io/ristretto v0.0.3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E=
|
github.com/dgraph-io/ristretto v0.0.3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E=
|
||||||
|
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
|
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
|
||||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||||
@ -224,7 +225,6 @@ github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8S
|
|||||||
github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||||
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
|
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
|
||||||
github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
|
github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
|
||||||
github.com/form3tech-oss/jwt-go v0.0.0-20200915135329-9162a5abdbc0/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
|
|
||||||
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
|
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
|
||||||
github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c=
|
github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c=
|
||||||
github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
|
github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
|
||||||
@ -930,6 +930,7 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLA
|
|||||||
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
|
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
|
||||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||||
|
github.com/oleiade/reflections v1.0.0/go.mod h1:RbATFBbKYkVdqmSFtx13Bb/tVhR0lgOBXunWTZKeL4w=
|
||||||
github.com/oleiade/reflections v1.0.1 h1:D1XO3LVEYroYskEsoSiGItp9RUxG6jWnCVvrqH0HHQM=
|
github.com/oleiade/reflections v1.0.1 h1:D1XO3LVEYroYskEsoSiGItp9RUxG6jWnCVvrqH0HHQM=
|
||||||
github.com/oleiade/reflections v1.0.1/go.mod h1:rdFxbxq4QXVZWj0F+e9jqjDkc7dbp97vkRixKo2JR60=
|
github.com/oleiade/reflections v1.0.1/go.mod h1:rdFxbxq4QXVZWj0F+e9jqjDkc7dbp97vkRixKo2JR60=
|
||||||
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
@ -1793,6 +1794,7 @@ google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG
|
|||||||
google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||||
google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0=
|
google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0=
|
||||||
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||||
|
google.golang.org/grpc/examples v0.0.0-20210304020650-930c79186c99/go.mod h1:Ly7ZA/ARzg8fnPU9TyZIxoz33sEUuWX7txiqs8lPTgE=
|
||||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||||
|
@ -1,3 +0,0 @@
|
|||||||
module google.golang.org/grpc/examples
|
|
||||||
|
|
||||||
go 1.14
|
|
@ -46,7 +46,7 @@ function with_modules() {
|
|||||||
env_vars="KUBE_CACHE_MUTATION_DETECTOR=${kube_cache_mutation_detector} KUBE_PANIC_WATCH_DECODE_ERROR=${kube_panic_watch_decode_error}"
|
env_vars="KUBE_CACHE_MUTATION_DETECTOR=${kube_cache_mutation_detector} KUBE_PANIC_WATCH_DECODE_ERROR=${kube_panic_watch_decode_error}"
|
||||||
|
|
||||||
pushd "${ROOT}" >/dev/null
|
pushd "${ROOT}" >/dev/null
|
||||||
for mod_file in $(find . -maxdepth 4 -not -path "./generated/*" -not -path "./hack/*" -name go.mod | sort); do
|
for mod_file in $(find . -maxdepth 4 -not -path "./generated/*" -name go.mod | sort); do
|
||||||
mod_dir="$(dirname "${mod_file}")"
|
mod_dir="$(dirname "${mod_file}")"
|
||||||
(
|
(
|
||||||
echo "=> "
|
echo "=> "
|
||||||
|
@ -102,7 +102,7 @@ func newInternal( //nolint:funlen // yeah, it's kind of long.
|
|||||||
|
|
||||||
// Wire up the impersonation proxy signer CA as another valid authenticator for client cert auth,
|
// Wire up the impersonation proxy signer CA as another valid authenticator for client cert auth,
|
||||||
// along with the Kube API server's CA.
|
// along with the Kube API server's CA.
|
||||||
// Note: any changes to the the Authentication stack need to be kept in sync with any assumptions made
|
// Note: any changes to the Authentication stack need to be kept in sync with any assumptions made
|
||||||
// by getTransportForUser, especially if we ever update the TCR API to start returning bearer tokens.
|
// by getTransportForUser, especially if we ever update the TCR API to start returning bearer tokens.
|
||||||
kubeClientUnsafeForProxying, err := kubeclient.New(clientOpts...)
|
kubeClientUnsafeForProxying, err := kubeclient.New(clientOpts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -297,12 +297,7 @@ func TestExpirerControllerSync(t *testing.T) {
|
|||||||
|
|
||||||
if test.wantDelete {
|
if test.wantDelete {
|
||||||
require.Len(t, *opts, 1)
|
require.Len(t, *opts, 1)
|
||||||
require.Equal(t, metav1.DeleteOptions{
|
require.Equal(t, testutil.NewPreconditions(testUID, testRV), (*opts)[0])
|
||||||
Preconditions: &metav1.Preconditions{
|
|
||||||
UID: &testUID,
|
|
||||||
ResourceVersion: &testRV,
|
|
||||||
},
|
|
||||||
}, (*opts)[0])
|
|
||||||
} else {
|
} else {
|
||||||
require.Len(t, *opts, 0)
|
require.Len(t, *opts, 0)
|
||||||
}
|
}
|
||||||
|
@ -29,14 +29,12 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
|
||||||
"k8s.io/apimachinery/pkg/util/clock"
|
"k8s.io/apimachinery/pkg/util/clock"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
kubeinformers "k8s.io/client-go/informers"
|
kubeinformers "k8s.io/client-go/informers"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
kubernetesfake "k8s.io/client-go/kubernetes/fake"
|
kubernetesfake "k8s.io/client-go/kubernetes/fake"
|
||||||
coretesting "k8s.io/client-go/testing"
|
coretesting "k8s.io/client-go/testing"
|
||||||
"k8s.io/utils/pointer"
|
|
||||||
|
|
||||||
"go.pinniped.dev/generated/latest/apis/concierge/config/v1alpha1"
|
"go.pinniped.dev/generated/latest/apis/concierge/config/v1alpha1"
|
||||||
pinnipedfake "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned/fake"
|
pinnipedfake "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned/fake"
|
||||||
@ -1032,13 +1030,7 @@ func TestImpersonatorConfigControllerSync(t *testing.T) {
|
|||||||
// validate that we set delete preconditions correctly
|
// validate that we set delete preconditions correctly
|
||||||
r.NotEmpty(*deleteOptions)
|
r.NotEmpty(*deleteOptions)
|
||||||
for _, opt := range *deleteOptions {
|
for _, opt := range *deleteOptions {
|
||||||
uid := types.UID("uid-1234")
|
r.Equal(testutil.NewPreconditions("uid-1234", "rv-5678"), opt)
|
||||||
r.Equal(metav1.DeleteOptions{
|
|
||||||
Preconditions: &metav1.Preconditions{
|
|
||||||
UID: &uid,
|
|
||||||
ResourceVersion: pointer.String("rv-5678"),
|
|
||||||
},
|
|
||||||
}, opt)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -40,12 +40,29 @@ func NewLegacyPodCleanerController(
|
|||||||
controllerlib.Config{
|
controllerlib.Config{
|
||||||
Name: "legacy-pod-cleaner-controller",
|
Name: "legacy-pod-cleaner-controller",
|
||||||
Syncer: controllerlib.SyncFunc(func(ctx controllerlib.Context) error {
|
Syncer: controllerlib.SyncFunc(func(ctx controllerlib.Context) error {
|
||||||
if err := client.Kubernetes.CoreV1().Pods(ctx.Key.Namespace).Delete(ctx.Context, ctx.Key.Name, metav1.DeleteOptions{}); err != nil {
|
podClient := client.Kubernetes.CoreV1().Pods(ctx.Key.Namespace)
|
||||||
|
|
||||||
|
// avoid blind writes to the API
|
||||||
|
agentPod, err := podClient.Get(ctx.Context, ctx.Key.Name, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
if k8serrors.IsNotFound(err) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("could not get legacy agent pod: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := podClient.Delete(ctx.Context, ctx.Key.Name, metav1.DeleteOptions{
|
||||||
|
Preconditions: &metav1.Preconditions{
|
||||||
|
UID: &agentPod.UID,
|
||||||
|
ResourceVersion: &agentPod.ResourceVersion,
|
||||||
|
},
|
||||||
|
}); err != nil {
|
||||||
if k8serrors.IsNotFound(err) {
|
if k8serrors.IsNotFound(err) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return fmt.Errorf("could not delete legacy agent pod: %w", err)
|
return fmt.Errorf("could not delete legacy agent pod: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("deleted legacy kube-cert-agent pod", "pod", klog.KRef(ctx.Key.Namespace, ctx.Key.Name))
|
log.Info("deleted legacy kube-cert-agent pod", "pod", klog.KRef(ctx.Key.Namespace, ctx.Key.Name))
|
||||||
return nil
|
return nil
|
||||||
}),
|
}),
|
||||||
|
@ -20,6 +20,7 @@ import (
|
|||||||
|
|
||||||
"go.pinniped.dev/internal/controllerlib"
|
"go.pinniped.dev/internal/controllerlib"
|
||||||
"go.pinniped.dev/internal/kubeclient"
|
"go.pinniped.dev/internal/kubeclient"
|
||||||
|
"go.pinniped.dev/internal/testutil"
|
||||||
"go.pinniped.dev/internal/testutil/testlogger"
|
"go.pinniped.dev/internal/testutil/testlogger"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -28,9 +29,11 @@ func TestLegacyPodCleanerController(t *testing.T) {
|
|||||||
|
|
||||||
legacyAgentPodWithoutExtraLabel := &corev1.Pod{
|
legacyAgentPodWithoutExtraLabel := &corev1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Namespace: "concierge",
|
Namespace: "concierge",
|
||||||
Name: "pinniped-concierge-kube-cert-agent-without-extra-label",
|
Name: "pinniped-concierge-kube-cert-agent-without-extra-label",
|
||||||
Labels: map[string]string{"kube-cert-agent.pinniped.dev": "true"},
|
Labels: map[string]string{"kube-cert-agent.pinniped.dev": "true"},
|
||||||
|
UID: "1",
|
||||||
|
ResourceVersion: "2",
|
||||||
},
|
},
|
||||||
Spec: corev1.PodSpec{},
|
Spec: corev1.PodSpec{},
|
||||||
Status: corev1.PodStatus{Phase: corev1.PodRunning},
|
Status: corev1.PodStatus{Phase: corev1.PodRunning},
|
||||||
@ -40,10 +43,14 @@ func TestLegacyPodCleanerController(t *testing.T) {
|
|||||||
legacyAgentPodWithExtraLabel.Name = "pinniped-concierge-kube-cert-agent-with-extra-label"
|
legacyAgentPodWithExtraLabel.Name = "pinniped-concierge-kube-cert-agent-with-extra-label"
|
||||||
legacyAgentPodWithExtraLabel.Labels["extralabel"] = "labelvalue"
|
legacyAgentPodWithExtraLabel.Labels["extralabel"] = "labelvalue"
|
||||||
legacyAgentPodWithExtraLabel.Labels["anotherextralabel"] = "labelvalue"
|
legacyAgentPodWithExtraLabel.Labels["anotherextralabel"] = "labelvalue"
|
||||||
|
legacyAgentPodWithExtraLabel.UID = "3"
|
||||||
|
legacyAgentPodWithExtraLabel.ResourceVersion = "4"
|
||||||
|
|
||||||
nonLegacyAgentPod := legacyAgentPodWithExtraLabel.DeepCopy()
|
nonLegacyAgentPod := legacyAgentPodWithExtraLabel.DeepCopy()
|
||||||
nonLegacyAgentPod.Name = "pinniped-concierge-kube-cert-agent-not-legacy"
|
nonLegacyAgentPod.Name = "pinniped-concierge-kube-cert-agent-not-legacy"
|
||||||
nonLegacyAgentPod.Labels["kube-cert-agent.pinniped.dev"] = "v2"
|
nonLegacyAgentPod.Labels["kube-cert-agent.pinniped.dev"] = "v2"
|
||||||
|
nonLegacyAgentPod.UID = "5"
|
||||||
|
nonLegacyAgentPod.ResourceVersion = "6"
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
@ -52,10 +59,12 @@ func TestLegacyPodCleanerController(t *testing.T) {
|
|||||||
wantDistinctErrors []string
|
wantDistinctErrors []string
|
||||||
wantDistinctLogs []string
|
wantDistinctLogs []string
|
||||||
wantActions []coretesting.Action
|
wantActions []coretesting.Action
|
||||||
|
wantDeleteOptions []metav1.DeleteOptions
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "no pods",
|
name: "no pods",
|
||||||
wantActions: []coretesting.Action{},
|
wantActions: []coretesting.Action{},
|
||||||
|
wantDeleteOptions: []metav1.DeleteOptions{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "mix of pods",
|
name: "mix of pods",
|
||||||
@ -69,8 +78,12 @@ func TestLegacyPodCleanerController(t *testing.T) {
|
|||||||
`legacy-pod-cleaner-controller "level"=0 "msg"="deleted legacy kube-cert-agent pod" "pod"={"name":"pinniped-concierge-kube-cert-agent-with-extra-label","namespace":"concierge"}`,
|
`legacy-pod-cleaner-controller "level"=0 "msg"="deleted legacy kube-cert-agent pod" "pod"={"name":"pinniped-concierge-kube-cert-agent-with-extra-label","namespace":"concierge"}`,
|
||||||
},
|
},
|
||||||
wantActions: []coretesting.Action{ // the first delete triggers the informer again, but the second invocation triggers a Not Found
|
wantActions: []coretesting.Action{ // the first delete triggers the informer again, but the second invocation triggers a Not Found
|
||||||
|
coretesting.NewGetAction(corev1.Resource("pods").WithVersion("v1"), "concierge", legacyAgentPodWithExtraLabel.Name),
|
||||||
coretesting.NewDeleteAction(corev1.Resource("pods").WithVersion("v1"), "concierge", legacyAgentPodWithExtraLabel.Name),
|
coretesting.NewDeleteAction(corev1.Resource("pods").WithVersion("v1"), "concierge", legacyAgentPodWithExtraLabel.Name),
|
||||||
coretesting.NewDeleteAction(corev1.Resource("pods").WithVersion("v1"), "concierge", legacyAgentPodWithExtraLabel.Name),
|
coretesting.NewGetAction(corev1.Resource("pods").WithVersion("v1"), "concierge", legacyAgentPodWithExtraLabel.Name),
|
||||||
|
},
|
||||||
|
wantDeleteOptions: []metav1.DeleteOptions{
|
||||||
|
testutil.NewPreconditions("3", "4"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -89,9 +102,15 @@ func TestLegacyPodCleanerController(t *testing.T) {
|
|||||||
"could not delete legacy agent pod: some delete error",
|
"could not delete legacy agent pod: some delete error",
|
||||||
},
|
},
|
||||||
wantActions: []coretesting.Action{
|
wantActions: []coretesting.Action{
|
||||||
|
coretesting.NewGetAction(corev1.Resource("pods").WithVersion("v1"), "concierge", legacyAgentPodWithExtraLabel.Name),
|
||||||
coretesting.NewDeleteAction(corev1.Resource("pods").WithVersion("v1"), "concierge", legacyAgentPodWithExtraLabel.Name),
|
coretesting.NewDeleteAction(corev1.Resource("pods").WithVersion("v1"), "concierge", legacyAgentPodWithExtraLabel.Name),
|
||||||
|
coretesting.NewGetAction(corev1.Resource("pods").WithVersion("v1"), "concierge", legacyAgentPodWithExtraLabel.Name),
|
||||||
coretesting.NewDeleteAction(corev1.Resource("pods").WithVersion("v1"), "concierge", legacyAgentPodWithExtraLabel.Name),
|
coretesting.NewDeleteAction(corev1.Resource("pods").WithVersion("v1"), "concierge", legacyAgentPodWithExtraLabel.Name),
|
||||||
},
|
},
|
||||||
|
wantDeleteOptions: []metav1.DeleteOptions{
|
||||||
|
testutil.NewPreconditions("3", "4"),
|
||||||
|
testutil.NewPreconditions("3", "4"),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "fail to delete because of not found error",
|
name: "fail to delete because of not found error",
|
||||||
@ -107,8 +126,30 @@ func TestLegacyPodCleanerController(t *testing.T) {
|
|||||||
},
|
},
|
||||||
wantDistinctErrors: []string{""},
|
wantDistinctErrors: []string{""},
|
||||||
wantActions: []coretesting.Action{
|
wantActions: []coretesting.Action{
|
||||||
|
coretesting.NewGetAction(corev1.Resource("pods").WithVersion("v1"), "concierge", legacyAgentPodWithExtraLabel.Name),
|
||||||
coretesting.NewDeleteAction(corev1.Resource("pods").WithVersion("v1"), "concierge", legacyAgentPodWithExtraLabel.Name),
|
coretesting.NewDeleteAction(corev1.Resource("pods").WithVersion("v1"), "concierge", legacyAgentPodWithExtraLabel.Name),
|
||||||
},
|
},
|
||||||
|
wantDeleteOptions: []metav1.DeleteOptions{
|
||||||
|
testutil.NewPreconditions("3", "4"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "fail to delete because of not found error on get",
|
||||||
|
kubeObjects: []runtime.Object{
|
||||||
|
legacyAgentPodWithoutExtraLabel, // should not be delete (missing extra label)
|
||||||
|
legacyAgentPodWithExtraLabel, // should be deleted
|
||||||
|
nonLegacyAgentPod, // should not be deleted (missing legacy agent label)
|
||||||
|
},
|
||||||
|
addKubeReactions: func(clientset *kubefake.Clientset) {
|
||||||
|
clientset.PrependReactor("get", "*", func(action coretesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||||
|
return true, nil, k8serrors.NewNotFound(action.GetResource().GroupResource(), "")
|
||||||
|
})
|
||||||
|
},
|
||||||
|
wantDistinctErrors: []string{""},
|
||||||
|
wantActions: []coretesting.Action{
|
||||||
|
coretesting.NewGetAction(corev1.Resource("pods").WithVersion("v1"), "concierge", legacyAgentPodWithExtraLabel.Name),
|
||||||
|
},
|
||||||
|
wantDeleteOptions: []metav1.DeleteOptions{},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
@ -120,6 +161,10 @@ func TestLegacyPodCleanerController(t *testing.T) {
|
|||||||
if tt.addKubeReactions != nil {
|
if tt.addKubeReactions != nil {
|
||||||
tt.addKubeReactions(kubeClientset)
|
tt.addKubeReactions(kubeClientset)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
opts := &[]metav1.DeleteOptions{}
|
||||||
|
trackDeleteClient := testutil.NewDeleteOptionsRecorder(kubeClientset, opts)
|
||||||
|
|
||||||
kubeInformers := informers.NewSharedInformerFactory(kubeClientset, 0)
|
kubeInformers := informers.NewSharedInformerFactory(kubeClientset, 0)
|
||||||
log := testlogger.New(t)
|
log := testlogger.New(t)
|
||||||
controller := NewLegacyPodCleanerController(
|
controller := NewLegacyPodCleanerController(
|
||||||
@ -127,7 +172,7 @@ func TestLegacyPodCleanerController(t *testing.T) {
|
|||||||
Namespace: "concierge",
|
Namespace: "concierge",
|
||||||
Labels: map[string]string{"extralabel": "labelvalue"},
|
Labels: map[string]string{"extralabel": "labelvalue"},
|
||||||
},
|
},
|
||||||
&kubeclient.Client{Kubernetes: kubeClientset},
|
&kubeclient.Client{Kubernetes: trackDeleteClient},
|
||||||
kubeInformers.Core().V1().Pods(),
|
kubeInformers.Core().V1().Pods(),
|
||||||
log,
|
log,
|
||||||
controllerlib.WithMaxRetries(1),
|
controllerlib.WithMaxRetries(1),
|
||||||
@ -140,6 +185,7 @@ func TestLegacyPodCleanerController(t *testing.T) {
|
|||||||
assert.Equal(t, tt.wantDistinctErrors, deduplicate(errorMessages), "unexpected errors")
|
assert.Equal(t, tt.wantDistinctErrors, deduplicate(errorMessages), "unexpected errors")
|
||||||
assert.Equal(t, tt.wantDistinctLogs, deduplicate(log.Lines()), "unexpected logs")
|
assert.Equal(t, tt.wantDistinctLogs, deduplicate(log.Lines()), "unexpected logs")
|
||||||
assert.Equal(t, tt.wantActions, kubeClientset.Actions()[2:], "unexpected actions")
|
assert.Equal(t, tt.wantActions, kubeClientset.Actions()[2:], "unexpected actions")
|
||||||
|
assert.Equal(t, tt.wantDeleteOptions, *opts, "unexpected delete options")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -102,7 +102,12 @@ func (c *garbageCollectorController) Sync(ctx controllerlib.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if garbageCollectAfterTime.Before(frozenClock.Now()) {
|
if garbageCollectAfterTime.Before(frozenClock.Now()) {
|
||||||
err = c.kubeClient.CoreV1().Secrets(secret.Namespace).Delete(ctx.Context, secret.Name, metav1.DeleteOptions{})
|
err = c.kubeClient.CoreV1().Secrets(secret.Namespace).Delete(ctx.Context, secret.Name, metav1.DeleteOptions{
|
||||||
|
Preconditions: &metav1.Preconditions{
|
||||||
|
UID: &secret.UID,
|
||||||
|
ResourceVersion: &secret.ResourceVersion,
|
||||||
|
},
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
plog.WarningErr("failed to garbage collect resource", err, logKV(secret))
|
plog.WarningErr("failed to garbage collect resource", err, logKV(secret))
|
||||||
continue
|
continue
|
||||||
|
@ -18,6 +18,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
"k8s.io/apimachinery/pkg/util/clock"
|
"k8s.io/apimachinery/pkg/util/clock"
|
||||||
kubeinformers "k8s.io/client-go/informers"
|
kubeinformers "k8s.io/client-go/informers"
|
||||||
|
"k8s.io/client-go/kubernetes"
|
||||||
kubernetesfake "k8s.io/client-go/kubernetes/fake"
|
kubernetesfake "k8s.io/client-go/kubernetes/fake"
|
||||||
kubetesting "k8s.io/client-go/testing"
|
kubetesting "k8s.io/client-go/testing"
|
||||||
|
|
||||||
@ -116,6 +117,8 @@ func TestGarbageCollectorControllerSync(t *testing.T) {
|
|||||||
subject controllerlib.Controller
|
subject controllerlib.Controller
|
||||||
kubeInformerClient *kubernetesfake.Clientset
|
kubeInformerClient *kubernetesfake.Clientset
|
||||||
kubeClient *kubernetesfake.Clientset
|
kubeClient *kubernetesfake.Clientset
|
||||||
|
deleteOptions *[]metav1.DeleteOptions
|
||||||
|
deleteOptionsRecorder kubernetes.Interface
|
||||||
kubeInformers kubeinformers.SharedInformerFactory
|
kubeInformers kubeinformers.SharedInformerFactory
|
||||||
cancelContext context.Context
|
cancelContext context.Context
|
||||||
cancelContextCancelFunc context.CancelFunc
|
cancelContextCancelFunc context.CancelFunc
|
||||||
@ -130,7 +133,7 @@ func TestGarbageCollectorControllerSync(t *testing.T) {
|
|||||||
// Set this at the last second to allow for injection of server override.
|
// Set this at the last second to allow for injection of server override.
|
||||||
subject = GarbageCollectorController(
|
subject = GarbageCollectorController(
|
||||||
fakeClock,
|
fakeClock,
|
||||||
kubeClient,
|
deleteOptionsRecorder,
|
||||||
kubeInformers.Core().V1().Secrets(),
|
kubeInformers.Core().V1().Secrets(),
|
||||||
controllerlib.WithInformer,
|
controllerlib.WithInformer,
|
||||||
)
|
)
|
||||||
@ -158,6 +161,8 @@ func TestGarbageCollectorControllerSync(t *testing.T) {
|
|||||||
|
|
||||||
kubeInformerClient = kubernetesfake.NewSimpleClientset()
|
kubeInformerClient = kubernetesfake.NewSimpleClientset()
|
||||||
kubeClient = kubernetesfake.NewSimpleClientset()
|
kubeClient = kubernetesfake.NewSimpleClientset()
|
||||||
|
deleteOptions = &[]metav1.DeleteOptions{}
|
||||||
|
deleteOptionsRecorder = testutil.NewDeleteOptionsRecorder(kubeClient, deleteOptions)
|
||||||
kubeInformers = kubeinformers.NewSharedInformerFactory(kubeInformerClient, 0)
|
kubeInformers = kubeinformers.NewSharedInformerFactory(kubeInformerClient, 0)
|
||||||
frozenNow = time.Now().UTC()
|
frozenNow = time.Now().UTC()
|
||||||
fakeClock = clock.NewFakeClock(frozenNow)
|
fakeClock = clock.NewFakeClock(frozenNow)
|
||||||
@ -193,8 +198,10 @@ func TestGarbageCollectorControllerSync(t *testing.T) {
|
|||||||
it.Before(func() {
|
it.Before(func() {
|
||||||
firstExpiredSecret := &corev1.Secret{
|
firstExpiredSecret := &corev1.Secret{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "first expired secret",
|
Name: "first expired secret",
|
||||||
Namespace: installedInNamespace,
|
Namespace: installedInNamespace,
|
||||||
|
UID: "uid-123",
|
||||||
|
ResourceVersion: "rv-456",
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
"storage.pinniped.dev/garbage-collect-after": frozenNow.Add(-time.Second).Format(time.RFC3339),
|
"storage.pinniped.dev/garbage-collect-after": frozenNow.Add(-time.Second).Format(time.RFC3339),
|
||||||
},
|
},
|
||||||
@ -204,8 +211,10 @@ func TestGarbageCollectorControllerSync(t *testing.T) {
|
|||||||
r.NoError(kubeClient.Tracker().Add(firstExpiredSecret))
|
r.NoError(kubeClient.Tracker().Add(firstExpiredSecret))
|
||||||
secondExpiredSecret := &corev1.Secret{
|
secondExpiredSecret := &corev1.Secret{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "second expired secret",
|
Name: "second expired secret",
|
||||||
Namespace: installedInNamespace,
|
Namespace: installedInNamespace,
|
||||||
|
UID: "uid-789",
|
||||||
|
ResourceVersion: "rv-555",
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
"storage.pinniped.dev/garbage-collect-after": frozenNow.Add(-2 * time.Second).Format(time.RFC3339),
|
"storage.pinniped.dev/garbage-collect-after": frozenNow.Add(-2 * time.Second).Format(time.RFC3339),
|
||||||
},
|
},
|
||||||
@ -237,6 +246,13 @@ func TestGarbageCollectorControllerSync(t *testing.T) {
|
|||||||
},
|
},
|
||||||
kubeClient.Actions(),
|
kubeClient.Actions(),
|
||||||
)
|
)
|
||||||
|
r.ElementsMatch(
|
||||||
|
[]metav1.DeleteOptions{
|
||||||
|
testutil.NewPreconditions("uid-123", "rv-456"),
|
||||||
|
testutil.NewPreconditions("uid-789", "rv-555"),
|
||||||
|
},
|
||||||
|
*deleteOptions,
|
||||||
|
)
|
||||||
list, err := kubeClient.CoreV1().Secrets(installedInNamespace).List(context.Background(), metav1.ListOptions{})
|
list, err := kubeClient.CoreV1().Secrets(installedInNamespace).List(context.Background(), metav1.ListOptions{})
|
||||||
r.NoError(err)
|
r.NoError(err)
|
||||||
r.Len(list.Items, 2)
|
r.Len(list.Items, 2)
|
||||||
|
@ -33,6 +33,7 @@ import (
|
|||||||
"go.pinniped.dev/internal/dynamiccert"
|
"go.pinniped.dev/internal/dynamiccert"
|
||||||
"go.pinniped.dev/internal/groupsuffix"
|
"go.pinniped.dev/internal/groupsuffix"
|
||||||
"go.pinniped.dev/internal/kubeclient"
|
"go.pinniped.dev/internal/kubeclient"
|
||||||
|
"go.pinniped.dev/internal/leaderelection"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -97,7 +98,7 @@ type Config struct {
|
|||||||
func PrepareControllers(c *Config) (func(ctx context.Context), error) {
|
func PrepareControllers(c *Config) (func(ctx context.Context), error) {
|
||||||
loginConciergeGroupData, identityConciergeGroupData := groupsuffix.ConciergeAggregatedGroups(c.APIGroupSuffix)
|
loginConciergeGroupData, identityConciergeGroupData := groupsuffix.ConciergeAggregatedGroups(c.APIGroupSuffix)
|
||||||
|
|
||||||
dref, _, err := deploymentref.New(c.ServerInstallationInfo)
|
dref, deployment, err := deploymentref.New(c.ServerInstallationInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot create deployment ref: %w", err)
|
return nil, fmt.Errorf("cannot create deployment ref: %w", err)
|
||||||
}
|
}
|
||||||
@ -107,7 +108,9 @@ func PrepareControllers(c *Config) (func(ctx context.Context), error) {
|
|||||||
return nil, fmt.Errorf("cannot create API service ref: %w", err)
|
return nil, fmt.Errorf("cannot create API service ref: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
client, err := kubeclient.New(
|
client, leaderElector, err := leaderelection.New(
|
||||||
|
c.ServerInstallationInfo,
|
||||||
|
deployment,
|
||||||
dref, // first try to use the deployment as an owner ref (for namespace scoped resources)
|
dref, // first try to use the deployment as an owner ref (for namespace scoped resources)
|
||||||
apiServiceRef, // fallback to our API service (for everything else we create)
|
apiServiceRef, // fallback to our API service (for everything else we create)
|
||||||
kubeclient.WithMiddleware(groupsuffix.New(c.APIGroupSuffix)),
|
kubeclient.WithMiddleware(groupsuffix.New(c.APIGroupSuffix)),
|
||||||
@ -303,7 +306,7 @@ func PrepareControllers(c *Config) (func(ctx context.Context), error) {
|
|||||||
// Return a function which starts the informers and controllers.
|
// Return a function which starts the informers and controllers.
|
||||||
return func(ctx context.Context) {
|
return func(ctx context.Context) {
|
||||||
informers.startAndWaitForSync(ctx)
|
informers.startAndWaitForSync(ctx)
|
||||||
go controllerManager.Start(ctx)
|
go leaderElector(ctx, controllerManager.Start)
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -146,7 +146,7 @@ func handleOtherVerbs(
|
|||||||
|
|
||||||
result, err := middlewareReq.mutateRequest(obj)
|
result, err := middlewareReq.mutateRequest(obj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return true, nil, err
|
return true, nil, fmt.Errorf("middleware request for %#v failed to mutate: %w", middlewareReq, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !result.mutated {
|
if !result.mutated {
|
||||||
@ -231,7 +231,7 @@ func handleCreateOrUpdate(
|
|||||||
|
|
||||||
result, err := middlewareReq.mutateRequest(obj)
|
result, err := middlewareReq.mutateRequest(obj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return true, nil, err
|
return true, nil, fmt.Errorf("middleware request for %#v failed to mutate: %w", middlewareReq, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !result.mutated {
|
if !result.mutated {
|
||||||
|
150
internal/leaderelection/leaderelection.go
Normal file
150
internal/leaderelection/leaderelection.go
Normal file
@ -0,0 +1,150 @@
|
|||||||
|
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
package leaderelection
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"go.uber.org/atomic"
|
||||||
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/client-go/tools/leaderelection"
|
||||||
|
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||||
|
|
||||||
|
"go.pinniped.dev/internal/constable"
|
||||||
|
"go.pinniped.dev/internal/downward"
|
||||||
|
"go.pinniped.dev/internal/kubeclient"
|
||||||
|
"go.pinniped.dev/internal/plog"
|
||||||
|
)
|
||||||
|
|
||||||
|
const ErrNotLeader constable.Error = "write attempt rejected as client is not leader"
|
||||||
|
|
||||||
|
// New returns a client that has a leader election middleware injected into it.
|
||||||
|
// This middleware will prevent all non-read requests to the Kubernetes API when
|
||||||
|
// the current process does not hold the leader election lock. Unlike normal
|
||||||
|
// leader election where the process blocks until it acquires the lock, this
|
||||||
|
// middleware approach lets the process run as normal for all read requests.
|
||||||
|
// Another difference is that if the process acquires the lock and then loses it
|
||||||
|
// (i.e. a failed renewal), it will not exit (i.e. restart). Instead, it will
|
||||||
|
// simply attempt to acquire the lock again.
|
||||||
|
//
|
||||||
|
// The returned function is blocking and will run the leader election polling
|
||||||
|
// logic and will coordinate lease release with the input controller starter function.
|
||||||
|
func New(podInfo *downward.PodInfo, deployment *appsv1.Deployment, opts ...kubeclient.Option) (
|
||||||
|
*kubeclient.Client,
|
||||||
|
func(context.Context, func(context.Context)),
|
||||||
|
error,
|
||||||
|
) {
|
||||||
|
internalClient, err := kubeclient.New(opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("could not create internal client for leader election: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
isLeader := atomic.NewBool(false)
|
||||||
|
|
||||||
|
identity := podInfo.Name
|
||||||
|
leaseName := deployment.Name
|
||||||
|
|
||||||
|
leaderElectionConfig := leaderelection.LeaderElectionConfig{
|
||||||
|
Lock: &resourcelock.LeaseLock{
|
||||||
|
LeaseMeta: metav1.ObjectMeta{
|
||||||
|
Namespace: podInfo.Namespace,
|
||||||
|
Name: leaseName,
|
||||||
|
},
|
||||||
|
Client: internalClient.Kubernetes.CoordinationV1(),
|
||||||
|
LockConfig: resourcelock.ResourceLockConfig{
|
||||||
|
Identity: identity,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
ReleaseOnCancel: true, // semantics for correct release handled by controllersWithLeaderElector below
|
||||||
|
LeaseDuration: 60 * time.Second,
|
||||||
|
RenewDeadline: 15 * time.Second,
|
||||||
|
RetryPeriod: 5 * time.Second,
|
||||||
|
Callbacks: leaderelection.LeaderCallbacks{
|
||||||
|
OnStartedLeading: func(_ context.Context) {
|
||||||
|
plog.Debug("leader gained", "identity", identity)
|
||||||
|
isLeader.Store(true)
|
||||||
|
},
|
||||||
|
OnStoppedLeading: func() {
|
||||||
|
plog.Debug("leader lost", "identity", identity)
|
||||||
|
isLeader.Store(false)
|
||||||
|
},
|
||||||
|
OnNewLeader: func(newLeader string) {
|
||||||
|
if newLeader == identity {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
plog.Debug("new leader elected", "newLeader", newLeader)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Name: leaseName,
|
||||||
|
// this must be set to nil because we do not want to associate /healthz with a failed
|
||||||
|
// leader election renewal as we do not want to exit the process if the leader changes.
|
||||||
|
WatchDog: nil,
|
||||||
|
}
|
||||||
|
|
||||||
|
// validate our config here before we rely on it being functioning below
|
||||||
|
if _, err := leaderelection.NewLeaderElector(leaderElectionConfig); err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("invalid config - could not create leader elector: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
writeOnlyWhenLeader := kubeclient.MiddlewareFunc(func(_ context.Context, rt kubeclient.RoundTrip) {
|
||||||
|
switch rt.Verb() {
|
||||||
|
case kubeclient.VerbGet, kubeclient.VerbList, kubeclient.VerbWatch:
|
||||||
|
// reads are always allowed.
|
||||||
|
// note that while our pods/exec into the kube cert agent pod is a write request from the
|
||||||
|
// perspective of the Kube API, it is semantically a read request since no mutation occurs.
|
||||||
|
// we simply use it to fill a cache, and we need all pods to have a functioning cache.
|
||||||
|
// however, we do not need to handle it here because remotecommand.NewSPDYExecutor uses a
|
||||||
|
// kubeclient.Client.JSONConfig as input. since our middleware logic is only injected into
|
||||||
|
// the generated clientset code, this JSONConfig simply ignores this middleware all together.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if isLeader.Load() { // only perform "expensive" test for writes
|
||||||
|
return // we are currently the leader, all actions are permitted
|
||||||
|
}
|
||||||
|
|
||||||
|
rt.MutateRequest(func(_ kubeclient.Object) error {
|
||||||
|
return ErrNotLeader // we are not the leader, fail the write request
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
leaderElectionOpts := append(
|
||||||
|
// all middleware are always executed so this being the first middleware is not relevant
|
||||||
|
[]kubeclient.Option{kubeclient.WithMiddleware(writeOnlyWhenLeader)},
|
||||||
|
opts..., // do not mutate input slice
|
||||||
|
)
|
||||||
|
|
||||||
|
client, err := kubeclient.New(leaderElectionOpts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("could not create leader election client: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
controllersWithLeaderElector := func(ctx context.Context, controllers func(context.Context)) {
|
||||||
|
leaderElectorCtx, leaderElectorCancel := context.WithCancel(context.Background()) // purposefully detached context
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
controllers(ctx) // run the controllers with the global context, this blocks until the context is canceled
|
||||||
|
leaderElectorCancel() // once the controllers have all stopped, tell the leader elector to release the lock
|
||||||
|
}()
|
||||||
|
|
||||||
|
for { // run (and rerun on release) the leader elector with its own context (blocking)
|
||||||
|
select {
|
||||||
|
case <-leaderElectorCtx.Done():
|
||||||
|
return // keep trying to run until process exit
|
||||||
|
|
||||||
|
default:
|
||||||
|
// blocks while trying to acquire lease, unblocks on release.
|
||||||
|
// note that this creates a new leader elector on each loop to
|
||||||
|
// prevent any bugs from reusing that struct across elections.
|
||||||
|
// our config was validated above so this should never die.
|
||||||
|
leaderelection.RunOrDie(leaderElectorCtx, leaderElectionConfig)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return client, controllersWithLeaderElector, nil
|
||||||
|
}
|
@ -41,6 +41,7 @@ import (
|
|||||||
"go.pinniped.dev/internal/downward"
|
"go.pinniped.dev/internal/downward"
|
||||||
"go.pinniped.dev/internal/groupsuffix"
|
"go.pinniped.dev/internal/groupsuffix"
|
||||||
"go.pinniped.dev/internal/kubeclient"
|
"go.pinniped.dev/internal/kubeclient"
|
||||||
|
"go.pinniped.dev/internal/leaderelection"
|
||||||
"go.pinniped.dev/internal/oidc/jwks"
|
"go.pinniped.dev/internal/oidc/jwks"
|
||||||
"go.pinniped.dev/internal/oidc/provider"
|
"go.pinniped.dev/internal/oidc/provider"
|
||||||
"go.pinniped.dev/internal/oidc/provider/manager"
|
"go.pinniped.dev/internal/oidc/provider/manager"
|
||||||
@ -94,6 +95,7 @@ func startControllers(
|
|||||||
pinnipedClient pinnipedclientset.Interface,
|
pinnipedClient pinnipedclientset.Interface,
|
||||||
kubeInformers kubeinformers.SharedInformerFactory,
|
kubeInformers kubeinformers.SharedInformerFactory,
|
||||||
pinnipedInformers pinnipedinformers.SharedInformerFactory,
|
pinnipedInformers pinnipedinformers.SharedInformerFactory,
|
||||||
|
leaderElector func(context.Context, func(context.Context)),
|
||||||
) {
|
) {
|
||||||
federationDomainInformer := pinnipedInformers.Config().V1alpha1().FederationDomains()
|
federationDomainInformer := pinnipedInformers.Config().V1alpha1().FederationDomains()
|
||||||
secretInformer := kubeInformers.Core().V1().Secrets()
|
secretInformer := kubeInformers.Core().V1().Secrets()
|
||||||
@ -261,7 +263,7 @@ func startControllers(
|
|||||||
kubeInformers.WaitForCacheSync(ctx.Done())
|
kubeInformers.WaitForCacheSync(ctx.Done())
|
||||||
pinnipedInformers.WaitForCacheSync(ctx.Done())
|
pinnipedInformers.WaitForCacheSync(ctx.Done())
|
||||||
|
|
||||||
go controllerManager.Start(ctx)
|
go leaderElector(ctx, controllerManager.Start)
|
||||||
}
|
}
|
||||||
|
|
||||||
func run(podInfo *downward.PodInfo, cfg *supervisor.Config) error {
|
func run(podInfo *downward.PodInfo, cfg *supervisor.Config) error {
|
||||||
@ -275,14 +277,25 @@ func run(podInfo *downward.PodInfo, cfg *supervisor.Config) error {
|
|||||||
return fmt.Errorf("cannot create deployment ref: %w", err)
|
return fmt.Errorf("cannot create deployment ref: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
client, err := kubeclient.New(
|
opts := []kubeclient.Option{
|
||||||
dref,
|
dref,
|
||||||
kubeclient.WithMiddleware(groupsuffix.New(*cfg.APIGroupSuffix)),
|
kubeclient.WithMiddleware(groupsuffix.New(*cfg.APIGroupSuffix)),
|
||||||
|
}
|
||||||
|
|
||||||
|
client, leaderElector, err := leaderelection.New(
|
||||||
|
podInfo,
|
||||||
|
supervisorDeployment,
|
||||||
|
opts...,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot create k8s client: %w", err)
|
return fmt.Errorf("cannot create k8s client: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
clientWithoutLeaderElection, err := kubeclient.New(opts...)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot create k8s client without leader election: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
kubeInformers := kubeinformers.NewSharedInformerFactoryWithOptions(
|
kubeInformers := kubeinformers.NewSharedInformerFactoryWithOptions(
|
||||||
client.Kubernetes,
|
client.Kubernetes,
|
||||||
defaultResyncInterval,
|
defaultResyncInterval,
|
||||||
@ -312,7 +325,7 @@ func run(podInfo *downward.PodInfo, cfg *supervisor.Config) error {
|
|||||||
dynamicJWKSProvider,
|
dynamicJWKSProvider,
|
||||||
dynamicUpstreamIDPProvider,
|
dynamicUpstreamIDPProvider,
|
||||||
&secretCache,
|
&secretCache,
|
||||||
client.Kubernetes.CoreV1().Secrets(serverInstallationNamespace),
|
clientWithoutLeaderElection.Kubernetes.CoreV1().Secrets(serverInstallationNamespace), // writes to kube storage are allowed for non-leaders
|
||||||
)
|
)
|
||||||
|
|
||||||
startControllers(
|
startControllers(
|
||||||
@ -328,6 +341,7 @@ func run(podInfo *downward.PodInfo, cfg *supervisor.Config) error {
|
|||||||
client.PinnipedSupervisor,
|
client.PinnipedSupervisor,
|
||||||
kubeInformers,
|
kubeInformers,
|
||||||
pinnipedInformers,
|
pinnipedInformers,
|
||||||
|
leaderElector,
|
||||||
)
|
)
|
||||||
|
|
||||||
//nolint: gosec // Intentionally binding to all network interfaces.
|
//nolint: gosec // Intentionally binding to all network interfaces.
|
||||||
|
@ -7,6 +7,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
|
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
)
|
)
|
||||||
@ -32,10 +33,24 @@ type coreWrapper struct {
|
|||||||
opts *[]metav1.DeleteOptions
|
opts *[]metav1.DeleteOptions
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *coreWrapper) Pods(namespace string) corev1client.PodInterface {
|
||||||
|
return &podsWrapper{PodInterface: c.CoreV1Interface.Pods(namespace), opts: c.opts}
|
||||||
|
}
|
||||||
|
|
||||||
func (c *coreWrapper) Secrets(namespace string) corev1client.SecretInterface {
|
func (c *coreWrapper) Secrets(namespace string) corev1client.SecretInterface {
|
||||||
return &secretsWrapper{SecretInterface: c.CoreV1Interface.Secrets(namespace), opts: c.opts}
|
return &secretsWrapper{SecretInterface: c.CoreV1Interface.Secrets(namespace), opts: c.opts}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type podsWrapper struct {
|
||||||
|
corev1client.PodInterface
|
||||||
|
opts *[]metav1.DeleteOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *podsWrapper) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
|
||||||
|
*s.opts = append(*s.opts, opts)
|
||||||
|
return s.PodInterface.Delete(ctx, name, opts)
|
||||||
|
}
|
||||||
|
|
||||||
type secretsWrapper struct {
|
type secretsWrapper struct {
|
||||||
corev1client.SecretInterface
|
corev1client.SecretInterface
|
||||||
opts *[]metav1.DeleteOptions
|
opts *[]metav1.DeleteOptions
|
||||||
@ -45,3 +60,12 @@ func (s *secretsWrapper) Delete(ctx context.Context, name string, opts metav1.De
|
|||||||
*s.opts = append(*s.opts, opts)
|
*s.opts = append(*s.opts, opts)
|
||||||
return s.SecretInterface.Delete(ctx, name, opts)
|
return s.SecretInterface.Delete(ctx, name, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func NewPreconditions(uid types.UID, rv string) metav1.DeleteOptions {
|
||||||
|
return metav1.DeleteOptions{
|
||||||
|
Preconditions: &metav1.Preconditions{
|
||||||
|
UID: &uid,
|
||||||
|
ResourceVersion: &rv,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -438,7 +438,7 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
t.Run("using and watching all the basic verbs", func(t *testing.T) {
|
t.Run("using and watching all the basic verbs", func(t *testing.T) {
|
||||||
parallelIfNotEKS(t)
|
parallelIfNotEKS(t)
|
||||||
// Create a namespace, because it will be easier to exercise "deletecollection" if we have a namespace.
|
// Create a namespace, because it will be easier to exercise "deletecollection" if we have a namespace.
|
||||||
namespaceName := createTestNamespace(t, adminClient)
|
namespaceName := testlib.CreateNamespace(ctx, t, "impersonation").Name
|
||||||
|
|
||||||
// Create and start informer to exercise the "watch" verb for us.
|
// Create and start informer to exercise the "watch" verb for us.
|
||||||
informerFactory := k8sinformers.NewSharedInformerFactoryWithOptions(
|
informerFactory := k8sinformers.NewSharedInformerFactoryWithOptions(
|
||||||
@ -827,7 +827,7 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
// this works because impersonation cannot set UID and thus the final user info the proxy sees has no UID
|
// this works because impersonation cannot set UID and thus the final user info the proxy sees has no UID
|
||||||
t.Run("nested impersonation as a service account is allowed if it has enough RBAC permissions", func(t *testing.T) {
|
t.Run("nested impersonation as a service account is allowed if it has enough RBAC permissions", func(t *testing.T) {
|
||||||
parallelIfNotEKS(t)
|
parallelIfNotEKS(t)
|
||||||
namespaceName := createTestNamespace(t, adminClient)
|
namespaceName := testlib.CreateNamespace(ctx, t, "impersonation").Name
|
||||||
saName, saToken, saUID := createServiceAccountToken(ctx, t, adminClient, namespaceName)
|
saName, saToken, saUID := createServiceAccountToken(ctx, t, adminClient, namespaceName)
|
||||||
nestedImpersonationClient := newImpersonationProxyClientWithCredentials(t,
|
nestedImpersonationClient := newImpersonationProxyClientWithCredentials(t,
|
||||||
&loginv1alpha1.ClusterCredential{Token: saToken}, impersonationProxyURL, impersonationProxyCACertPEM,
|
&loginv1alpha1.ClusterCredential{Token: saToken}, impersonationProxyURL, impersonationProxyCACertPEM,
|
||||||
@ -916,7 +916,7 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Test using a service account token.
|
// Test using a service account token.
|
||||||
namespaceName := createTestNamespace(t, adminClient)
|
namespaceName := testlib.CreateNamespace(ctx, t, "impersonation").Name
|
||||||
saName, saToken, _ := createServiceAccountToken(ctx, t, adminClient, namespaceName)
|
saName, saToken, _ := createServiceAccountToken(ctx, t, adminClient, namespaceName)
|
||||||
impersonationProxyServiceAccountPinnipedConciergeClient := newImpersonationProxyClientWithCredentials(t,
|
impersonationProxyServiceAccountPinnipedConciergeClient := newImpersonationProxyClientWithCredentials(t,
|
||||||
&loginv1alpha1.ClusterCredential{Token: saToken},
|
&loginv1alpha1.ClusterCredential{Token: saToken},
|
||||||
@ -935,7 +935,7 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("WhoAmIRequests and SA token request", func(t *testing.T) {
|
t.Run("WhoAmIRequests and SA token request", func(t *testing.T) {
|
||||||
namespaceName := createTestNamespace(t, adminClient)
|
namespaceName := testlib.CreateNamespace(ctx, t, "impersonation").Name
|
||||||
kubeClient := adminClient.CoreV1()
|
kubeClient := adminClient.CoreV1()
|
||||||
saName, _, saUID := createServiceAccountToken(ctx, t, adminClient, namespaceName)
|
saName, _, saUID := createServiceAccountToken(ctx, t, adminClient, namespaceName)
|
||||||
|
|
||||||
@ -1145,7 +1145,7 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
|
|
||||||
t.Run("websocket client", func(t *testing.T) {
|
t.Run("websocket client", func(t *testing.T) {
|
||||||
parallelIfNotEKS(t)
|
parallelIfNotEKS(t)
|
||||||
namespaceName := createTestNamespace(t, adminClient)
|
namespaceName := testlib.CreateNamespace(ctx, t, "impersonation").Name
|
||||||
|
|
||||||
impersonationRestConfig := impersonationProxyRestConfig(
|
impersonationRestConfig := impersonationProxyRestConfig(
|
||||||
refreshCredential(t, impersonationProxyURL, impersonationProxyCACertPEM),
|
refreshCredential(t, impersonationProxyURL, impersonationProxyCACertPEM),
|
||||||
@ -1224,7 +1224,7 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
|
|
||||||
t.Run("http2 client", func(t *testing.T) {
|
t.Run("http2 client", func(t *testing.T) {
|
||||||
parallelIfNotEKS(t)
|
parallelIfNotEKS(t)
|
||||||
namespaceName := createTestNamespace(t, adminClient)
|
namespaceName := testlib.CreateNamespace(ctx, t, "impersonation").Name
|
||||||
|
|
||||||
wantConfigMapLabelKey, wantConfigMapLabelValue := "some-label-key", "some-label-value"
|
wantConfigMapLabelKey, wantConfigMapLabelValue := "some-label-key", "some-label-value"
|
||||||
wantConfigMap := &corev1.ConfigMap{
|
wantConfigMap := &corev1.ConfigMap{
|
||||||
@ -1783,7 +1783,7 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
testlib.RequireEventually(t, func(requireEventually *require.Assertions) {
|
testlib.RequireEventually(t, func(requireEventually *require.Assertions) {
|
||||||
_, err := adminClient.CoreV1().Secrets(env.ConciergeNamespace).Get(ctx, impersonationProxyTLSSecretName(env), metav1.GetOptions{})
|
_, err := adminClient.CoreV1().Secrets(env.ConciergeNamespace).Get(ctx, impersonationProxyTLSSecretName(env), metav1.GetOptions{})
|
||||||
requireEventually.Truef(k8serrors.IsNotFound(err), "expected NotFound error, got %v", err)
|
requireEventually.Truef(k8serrors.IsNotFound(err), "expected NotFound error, got %v", err)
|
||||||
}, 10*time.Second, 250*time.Millisecond)
|
}, time.Minute, time.Second)
|
||||||
|
|
||||||
// Check that the generated CA cert Secret was not deleted by the controller because it's supposed to keep this
|
// Check that the generated CA cert Secret was not deleted by the controller because it's supposed to keep this
|
||||||
// around in case we decide to later re-enable the impersonator. We want to avoid generating new CA certs when
|
// around in case we decide to later re-enable the impersonator. We want to avoid generating new CA certs when
|
||||||
@ -1864,27 +1864,6 @@ func ensureDNSResolves(t *testing.T, urlString string) {
|
|||||||
}, 5*time.Minute, 1*time.Second)
|
}, 5*time.Minute, 1*time.Second)
|
||||||
}
|
}
|
||||||
|
|
||||||
func createTestNamespace(t *testing.T, adminClient kubernetes.Interface) string {
|
|
||||||
t.Helper()
|
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
namespace, err := adminClient.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{GenerateName: "impersonation-integration-test-"},
|
|
||||||
}, metav1.CreateOptions{})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
t.Cleanup(func() {
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
t.Logf("cleaning up test namespace %s", namespace.Name)
|
|
||||||
require.NoError(t, adminClient.CoreV1().Namespaces().Delete(ctx, namespace.Name, metav1.DeleteOptions{}))
|
|
||||||
})
|
|
||||||
return namespace.Name
|
|
||||||
}
|
|
||||||
|
|
||||||
func createServiceAccountToken(ctx context.Context, t *testing.T, adminClient kubernetes.Interface, namespaceName string) (name, token string, uid types.UID) {
|
func createServiceAccountToken(ctx context.Context, t *testing.T, adminClient kubernetes.Interface, namespaceName string) (name, token string, uid types.UID) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
|
279
test/integration/leaderelection_test.go
Normal file
279
test/integration/leaderelection_test.go
Normal file
@ -0,0 +1,279 @@
|
|||||||
|
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
package integration
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
|
authenticationv1 "k8s.io/api/authentication/v1"
|
||||||
|
coordinationv1 "k8s.io/api/coordination/v1"
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/util/rand"
|
||||||
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
|
"k8s.io/client-go/util/retry"
|
||||||
|
"k8s.io/utils/pointer"
|
||||||
|
|
||||||
|
"go.pinniped.dev/internal/downward"
|
||||||
|
"go.pinniped.dev/internal/kubeclient"
|
||||||
|
"go.pinniped.dev/internal/leaderelection"
|
||||||
|
"go.pinniped.dev/test/testlib"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestLeaderElection(t *testing.T) {
|
||||||
|
_ = testlib.IntegrationEnv(t)
|
||||||
|
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
|
||||||
|
t.Cleanup(cancel)
|
||||||
|
|
||||||
|
leaseName := "leader-election-" + rand.String(5)
|
||||||
|
|
||||||
|
namespace := testlib.CreateNamespace(ctx, t, leaseName)
|
||||||
|
|
||||||
|
clients := leaderElectionClients(t, namespace, leaseName)
|
||||||
|
|
||||||
|
// the tests below are order dependant to some degree and definitely cannot be run in parallel
|
||||||
|
|
||||||
|
t.Run("sanity check write prevention", func(t *testing.T) {
|
||||||
|
lease := checkOnlyLeaderCanWrite(ctx, t, namespace, leaseName, clients)
|
||||||
|
logLease(t, lease)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("clients handle leader election transition correctly", func(t *testing.T) {
|
||||||
|
lease := forceTransition(ctx, t, namespace, leaseName, clients)
|
||||||
|
logLease(t, lease)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("sanity check write prevention after transition", func(t *testing.T) {
|
||||||
|
lease := checkOnlyLeaderCanWrite(ctx, t, namespace, leaseName, clients)
|
||||||
|
logLease(t, lease)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("clients handle leader election restart correctly", func(t *testing.T) {
|
||||||
|
lease := forceRestart(ctx, t, namespace, leaseName, clients)
|
||||||
|
logLease(t, lease)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("sanity check write prevention after restart", func(t *testing.T) {
|
||||||
|
lease := checkOnlyLeaderCanWrite(ctx, t, namespace, leaseName, clients)
|
||||||
|
logLease(t, lease)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func leaderElectionClient(t *testing.T, namespace *corev1.Namespace, leaseName, identity string) *kubeclient.Client {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
podInfo := &downward.PodInfo{
|
||||||
|
Namespace: namespace.Name,
|
||||||
|
Name: identity,
|
||||||
|
}
|
||||||
|
deployment := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: leaseName}}
|
||||||
|
|
||||||
|
client, leaderElector, err := leaderelection.New(podInfo, deployment, testlib.NewKubeclientOptions(t, testlib.NewClientConfig(t))...)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
controllerCtx, controllerCancel := context.WithCancel(context.Background())
|
||||||
|
leaderCtx, leaderCancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
|
t.Cleanup(func() {
|
||||||
|
controllerCancel()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-leaderCtx.Done():
|
||||||
|
// leader election client stopped correctly
|
||||||
|
|
||||||
|
case <-time.After(time.Minute):
|
||||||
|
t.Errorf("leader election client in namespace %q with lease %q and identity %q failed to stop",
|
||||||
|
namespace.Name, leaseName, identity)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
time.Sleep(time.Duration(rand.Int63nRange(1, 10)) * time.Second) // randomize start of client and controllers
|
||||||
|
|
||||||
|
// this blocks
|
||||||
|
leaderElector(controllerCtx, func(ctx context.Context) {
|
||||||
|
<-ctx.Done()
|
||||||
|
time.Sleep(time.Duration(rand.Int63nRange(1, 10)) * time.Second) // randomize stop of controllers
|
||||||
|
})
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-controllerCtx.Done():
|
||||||
|
// leaderElector correctly stopped but only after controllers stopped
|
||||||
|
|
||||||
|
default:
|
||||||
|
t.Errorf("leader election client in namespace %q with lease %q and identity %q stopped early",
|
||||||
|
namespace.Name, leaseName, identity)
|
||||||
|
}
|
||||||
|
|
||||||
|
leaderCancel()
|
||||||
|
}()
|
||||||
|
|
||||||
|
return client
|
||||||
|
}
|
||||||
|
|
||||||
|
func leaderElectionClients(t *testing.T, namespace *corev1.Namespace, leaseName string) map[string]*kubeclient.Client {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
count := rand.IntnRange(1, 6)
|
||||||
|
out := make(map[string]*kubeclient.Client, count)
|
||||||
|
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
identity := "leader-election-client-" + rand.String(5)
|
||||||
|
out[identity] = leaderElectionClient(t, namespace, leaseName, identity)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("running leader election client tests with %d clients: %v", len(out), sets.StringKeySet(out).List())
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func pickRandomLeaderElectionClient(clients map[string]*kubeclient.Client) *kubeclient.Client {
|
||||||
|
for _, client := range clients {
|
||||||
|
client := client
|
||||||
|
return client
|
||||||
|
}
|
||||||
|
panic("clients map was empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
func waitForIdentity(ctx context.Context, t *testing.T, namespace *corev1.Namespace, leaseName string, clients map[string]*kubeclient.Client) *coordinationv1.Lease {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
identities := sets.StringKeySet(clients)
|
||||||
|
var out *coordinationv1.Lease
|
||||||
|
|
||||||
|
testlib.RequireEventuallyWithoutError(t, func() (bool, error) {
|
||||||
|
lease, err := pickRandomLeaderElectionClient(clients).Kubernetes.CoordinationV1().Leases(namespace.Name).Get(ctx, leaseName, metav1.GetOptions{})
|
||||||
|
if apierrors.IsNotFound(err) {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
out = lease
|
||||||
|
return lease.Spec.HolderIdentity != nil && identities.Has(*lease.Spec.HolderIdentity), nil
|
||||||
|
}, 3*time.Minute, time.Second)
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func runWriteRequest(ctx context.Context, client *kubeclient.Client) error {
|
||||||
|
_, err := client.Kubernetes.AuthenticationV1().TokenReviews().Create(ctx, &authenticationv1.TokenReview{
|
||||||
|
Spec: authenticationv1.TokenReviewSpec{Token: "any-non-empty-value"},
|
||||||
|
}, metav1.CreateOptions{})
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func runWriteRequests(ctx context.Context, clients map[string]*kubeclient.Client) map[string]error {
|
||||||
|
out := make(map[string]error, len(clients))
|
||||||
|
|
||||||
|
for identity, client := range clients {
|
||||||
|
identity, client := identity, client
|
||||||
|
|
||||||
|
out[identity] = runWriteRequest(ctx, client)
|
||||||
|
}
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func pickCurrentLeaderClient(ctx context.Context, t *testing.T, namespace *corev1.Namespace, leaseName string, clients map[string]*kubeclient.Client) *kubeclient.Client {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
lease := waitForIdentity(ctx, t, namespace, leaseName, clients)
|
||||||
|
return clients[*lease.Spec.HolderIdentity]
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkOnlyLeaderCanWrite(ctx context.Context, t *testing.T, namespace *corev1.Namespace, leaseName string, clients map[string]*kubeclient.Client) *coordinationv1.Lease {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
lease := waitForIdentity(ctx, t, namespace, leaseName, clients)
|
||||||
|
|
||||||
|
testlib.RequireEventually(t, func(requireEventually *require.Assertions) {
|
||||||
|
var leaders, nonLeaders int
|
||||||
|
for identity, err := range runWriteRequests(ctx, clients) {
|
||||||
|
identity, err := identity, err
|
||||||
|
|
||||||
|
if identity == *lease.Spec.HolderIdentity {
|
||||||
|
leaders++
|
||||||
|
requireEventually.NoError(err, "leader client %q should have no error", identity)
|
||||||
|
} else {
|
||||||
|
nonLeaders++
|
||||||
|
requireEventually.Error(err, "non leader client %q should have write error but it was nil", identity)
|
||||||
|
requireEventually.True(errors.Is(err, leaderelection.ErrNotLeader), "non leader client %q should have write error: %v", identity, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
requireEventually.Equal(1, leaders, "did not see leader")
|
||||||
|
requireEventually.Equal(len(clients)-1, nonLeaders, "did not see non-leader")
|
||||||
|
}, time.Minute, time.Second)
|
||||||
|
|
||||||
|
return lease
|
||||||
|
}
|
||||||
|
|
||||||
|
func forceTransition(ctx context.Context, t *testing.T, namespace *corev1.Namespace, leaseName string, clients map[string]*kubeclient.Client) *coordinationv1.Lease {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
var startTransitions int32
|
||||||
|
var startTime metav1.MicroTime
|
||||||
|
|
||||||
|
errRetry := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||||
|
startLease := waitForIdentity(ctx, t, namespace, leaseName, clients)
|
||||||
|
startTransitions = *startLease.Spec.LeaseTransitions
|
||||||
|
startTime = *startLease.Spec.AcquireTime
|
||||||
|
|
||||||
|
startLease = startLease.DeepCopy()
|
||||||
|
startLease.Spec.HolderIdentity = pointer.String("some-other-client" + rand.String(5))
|
||||||
|
|
||||||
|
_, err := pickCurrentLeaderClient(ctx, t, namespace, leaseName, clients).
|
||||||
|
Kubernetes.CoordinationV1().Leases(namespace.Name).Update(ctx, startLease, metav1.UpdateOptions{})
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
require.NoError(t, errRetry)
|
||||||
|
|
||||||
|
finalLease := waitForIdentity(ctx, t, namespace, leaseName, clients)
|
||||||
|
finalTransitions := *finalLease.Spec.LeaseTransitions
|
||||||
|
finalTime := *finalLease.Spec.AcquireTime
|
||||||
|
|
||||||
|
require.Greater(t, finalTransitions, startTransitions)
|
||||||
|
require.Greater(t, finalTime.UnixNano(), startTime.UnixNano())
|
||||||
|
|
||||||
|
time.Sleep(2 * time.Minute) // need to give clients time to notice this change because leader election is polling based
|
||||||
|
|
||||||
|
return finalLease
|
||||||
|
}
|
||||||
|
|
||||||
|
func forceRestart(ctx context.Context, t *testing.T, namespace *corev1.Namespace, leaseName string, clients map[string]*kubeclient.Client) *coordinationv1.Lease {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
startLease := waitForIdentity(ctx, t, namespace, leaseName, clients)
|
||||||
|
|
||||||
|
err := pickCurrentLeaderClient(ctx, t, namespace, leaseName, clients).
|
||||||
|
Kubernetes.CoordinationV1().Leases(namespace.Name).Delete(ctx, leaseName, metav1.DeleteOptions{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
newLease := waitForIdentity(ctx, t, namespace, leaseName, clients)
|
||||||
|
require.Zero(t, *newLease.Spec.LeaseTransitions)
|
||||||
|
require.Greater(t, newLease.Spec.AcquireTime.UnixNano(), startLease.Spec.AcquireTime.UnixNano())
|
||||||
|
|
||||||
|
time.Sleep(2 * time.Minute) // need to give clients time to notice this change because leader election is polling based
|
||||||
|
|
||||||
|
return newLease
|
||||||
|
}
|
||||||
|
|
||||||
|
func logLease(t *testing.T, lease *coordinationv1.Lease) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
bytes, err := json.MarshalIndent(lease, "", "\t")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
t.Logf("current lease:\n%s", string(bytes))
|
||||||
|
}
|
@ -137,13 +137,19 @@ func newAnonymousClientRestConfigWithCertAndKeyAdded(t *testing.T, clientCertifi
|
|||||||
return config
|
return config
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func NewKubeclientOptions(t *testing.T, config *rest.Config) []kubeclient.Option {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
return []kubeclient.Option{
|
||||||
|
kubeclient.WithConfig(config),
|
||||||
|
kubeclient.WithMiddleware(groupsuffix.New(IntegrationEnv(t).APIGroupSuffix)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func NewKubeclient(t *testing.T, config *rest.Config) *kubeclient.Client {
|
func NewKubeclient(t *testing.T, config *rest.Config) *kubeclient.Client {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
env := IntegrationEnv(t)
|
|
||||||
client, err := kubeclient.New(
|
client, err := kubeclient.New(NewKubeclientOptions(t, config)...)
|
||||||
kubeclient.WithConfig(config),
|
|
||||||
kubeclient.WithMiddleware(groupsuffix.New(env.APIGroupSuffix)),
|
|
||||||
)
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
return client
|
return client
|
||||||
}
|
}
|
||||||
@ -502,6 +508,30 @@ func CreatePod(ctx context.Context, t *testing.T, name, namespace string, spec c
|
|||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func CreateNamespace(ctx context.Context, t *testing.T, name string) *corev1.Namespace {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
adminClient := NewKubernetesClientset(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(ctx, time.Minute)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
namespace, err := adminClient.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{GenerateName: name + "-integration-test-"},
|
||||||
|
}, metav1.CreateOptions{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
t.Cleanup(func() {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
t.Logf("cleaning up test namespace %s", namespace.Name)
|
||||||
|
require.NoError(t, adminClient.CoreV1().Namespaces().Delete(ctx, namespace.Name, metav1.DeleteOptions{}))
|
||||||
|
})
|
||||||
|
|
||||||
|
return namespace
|
||||||
|
}
|
||||||
|
|
||||||
func WaitForUserToHaveAccess(t *testing.T, user string, groups []string, shouldHaveAccessTo *authorizationv1.ResourceAttributes) {
|
func WaitForUserToHaveAccess(t *testing.T, user string, groups []string, shouldHaveAccessTo *authorizationv1.ResourceAttributes) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
client := NewKubernetesClientset(t)
|
client := NewKubernetesClientset(t)
|
||||||
|
Loading…
Reference in New Issue
Block a user