Merge branch 'main' into label_every_resource
This commit is contained in:
commit
f8e461dfc3
@ -3,15 +3,18 @@
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// +kubebuilder:validation:Enum=Success;Duplicate;Invalid
|
||||
type OIDCProviderStatus string
|
||||
|
||||
const (
|
||||
SuccessOIDCProviderStatus = OIDCProviderStatus("Success")
|
||||
SuccessOIDCProviderStatus = OIDCProviderStatus("Success")
|
||||
DuplicateOIDCProviderStatus = OIDCProviderStatus("Duplicate")
|
||||
InvalidOIDCProviderStatus = OIDCProviderStatus("Invalid")
|
||||
InvalidOIDCProviderStatus = OIDCProviderStatus("Invalid")
|
||||
)
|
||||
|
||||
// OIDCProviderConfigSpec is a struct that describes an OIDC Provider.
|
||||
@ -39,11 +42,17 @@ type OIDCProviderConfigStatus struct {
|
||||
// +optional
|
||||
Message string `json:"message,omitempty"`
|
||||
|
||||
// LastUpdateTime holds the time at which the Status was last updated. It is a pointer to get
|
||||
// around some undesirable behavior with respect to the empty metav1.Time value (see
|
||||
// https://github.com/kubernetes/kubernetes/issues/86811).
|
||||
// +optional
|
||||
LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
|
||||
// LastUpdateTime holds the time at which the Status was last updated. It is a pointer to get
|
||||
// around some undesirable behavior with respect to the empty metav1.Time value (see
|
||||
// https://github.com/kubernetes/kubernetes/issues/86811).
|
||||
// +optional
|
||||
LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
|
||||
|
||||
// JWKSSecret holds the name of the secret in which this OIDC Provider's signing/verification keys
|
||||
// are stored. If it is empty, then the signing/verification keys are either unknown or they don't
|
||||
// exist.
|
||||
// +optional
|
||||
JWKSSecret corev1.LocalObjectReference `json:"jwksSecret,omitempty"`
|
||||
}
|
||||
|
||||
// OIDCProviderConfig describes the configuration of an OIDC provider.
|
||||
|
@ -13,6 +13,8 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
kubeinformers "k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/pkg/version"
|
||||
"k8s.io/client-go/rest"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
@ -62,7 +64,9 @@ func waitForSignal() os.Signal {
|
||||
func startControllers(
|
||||
ctx context.Context,
|
||||
issuerProvider *manager.Manager,
|
||||
kubeClient kubernetes.Interface,
|
||||
pinnipedClient pinnipedclientset.Interface,
|
||||
kubeInformers kubeinformers.SharedInformerFactory,
|
||||
pinnipedInformers pinnipedinformers.SharedInformerFactory,
|
||||
) {
|
||||
// Create controller manager.
|
||||
@ -77,37 +81,60 @@ func startControllers(
|
||||
controllerlib.WithInformer,
|
||||
),
|
||||
singletonWorker,
|
||||
).
|
||||
WithController(
|
||||
supervisorconfig.NewJWKSController(
|
||||
kubeClient,
|
||||
pinnipedClient,
|
||||
kubeInformers.Core().V1().Secrets(),
|
||||
pinnipedInformers.Config().V1alpha1().OIDCProviderConfigs(),
|
||||
controllerlib.WithInformer,
|
||||
),
|
||||
singletonWorker,
|
||||
)
|
||||
|
||||
kubeInformers.Start(ctx.Done())
|
||||
pinnipedInformers.Start(ctx.Done())
|
||||
|
||||
go controllerManager.Start(ctx)
|
||||
}
|
||||
|
||||
func newPinnipedClient() (pinnipedclientset.Interface, error) {
|
||||
func newClients() (kubernetes.Interface, pinnipedclientset.Interface, error) {
|
||||
kubeConfig, err := restclient.InClusterConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not load in-cluster configuration: %w", err)
|
||||
return nil, nil, fmt.Errorf("could not load in-cluster configuration: %w", err)
|
||||
}
|
||||
|
||||
// Connect to the core Kubernetes API.
|
||||
pinnipedClient, err := pinnipedclientset.NewForConfig(kubeConfig)
|
||||
kubeClient, err := kubernetes.NewForConfig(kubeConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not load in-cluster configuration: %w", err)
|
||||
return nil, nil, fmt.Errorf("could not create kube client: %w", err)
|
||||
}
|
||||
|
||||
return pinnipedClient, nil
|
||||
// Connect to the Pinniped API.
|
||||
pinnipedClient, err := pinnipedclientset.NewForConfig(kubeConfig)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("could not create pinniped client: %w", err)
|
||||
}
|
||||
|
||||
return kubeClient, pinnipedClient, nil
|
||||
}
|
||||
|
||||
func run(serverInstallationNamespace string) error {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
pinnipedClient, err := newPinnipedClient()
|
||||
kubeClient, pinnipedClient, err := newClients()
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create k8s client: %w", err)
|
||||
}
|
||||
|
||||
kubeInformers := kubeinformers.NewSharedInformerFactoryWithOptions(
|
||||
kubeClient,
|
||||
defaultResyncInterval,
|
||||
kubeinformers.WithNamespace(serverInstallationNamespace),
|
||||
)
|
||||
|
||||
pinnipedInformers := pinnipedinformers.NewSharedInformerFactoryWithOptions(
|
||||
pinnipedClient,
|
||||
defaultResyncInterval,
|
||||
@ -115,7 +142,7 @@ func run(serverInstallationNamespace string) error {
|
||||
)
|
||||
|
||||
oidProvidersManager := manager.NewManager(http.NotFoundHandler())
|
||||
startControllers(ctx, oidProvidersManager, pinnipedClient, pinnipedInformers)
|
||||
startControllers(ctx, oidProvidersManager, kubeClient, pinnipedClient, kubeInformers, pinnipedInformers)
|
||||
|
||||
//nolint: gosec // Intentionally binding to all network interfaces.
|
||||
l, err := net.Listen("tcp", ":80")
|
||||
|
@ -55,6 +55,17 @@ spec:
|
||||
status:
|
||||
description: Status of the OIDC provider.
|
||||
properties:
|
||||
jwksSecret:
|
||||
description: JWKSSecret holds the name of the secret in which this
|
||||
OIDC Provider's signing/verification keys are stored. If it is empty,
|
||||
then the signing/verification keys are either unknown or they don't
|
||||
exist.
|
||||
properties:
|
||||
name:
|
||||
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
TODO: Add other useful fields. apiVersion, kind, uid?'
|
||||
type: string
|
||||
type: object
|
||||
lastUpdateTime:
|
||||
description: LastUpdateTime holds the time at which the Status was
|
||||
last updated. It is a pointer to get around some undesirable behavior
|
||||
|
@ -13,6 +13,9 @@ metadata:
|
||||
namespace: #@ namespace()
|
||||
labels: #@ labels()
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: [secrets]
|
||||
verbs: [create, get, list, patch, update, watch, delete]
|
||||
- apiGroups: [config.pinniped.dev]
|
||||
resources: [oidcproviderconfigs]
|
||||
verbs: [update, get, list, watch]
|
||||
|
1
generated/1.17/README.adoc
generated
1
generated/1.17/README.adoc
generated
@ -151,6 +151,7 @@ OIDCProviderConfigStatus is a struct that describes the actual state of an OIDC
|
||||
| *`status`* __OIDCProviderStatus__ | Status holds an enum that describes the state of this OIDC Provider. Note that this Status can represent success or failure.
|
||||
| *`message`* __string__ | Message provides human-readable details about the Status.
|
||||
| *`lastUpdateTime`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#time-v1-meta[$$Time$$]__ | LastUpdateTime holds the time at which the Status was last updated. It is a pointer to get around some undesirable behavior with respect to the empty metav1.Time value (see https://github.com/kubernetes/kubernetes/issues/86811).
|
||||
| *`jwksSecret`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#localobjectreference-v1-core[$$LocalObjectReference$$]__ | JWKSSecret holds the name of the secret in which this OIDC Provider's signing/verification keys are stored. If it is empty, then the signing/verification keys are either unknown or they don't exist.
|
||||
|===
|
||||
|
||||
|
||||
|
@ -3,15 +3,18 @@
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// +kubebuilder:validation:Enum=Success;Duplicate;Invalid
|
||||
type OIDCProviderStatus string
|
||||
|
||||
const (
|
||||
SuccessOIDCProviderStatus = OIDCProviderStatus("Success")
|
||||
SuccessOIDCProviderStatus = OIDCProviderStatus("Success")
|
||||
DuplicateOIDCProviderStatus = OIDCProviderStatus("Duplicate")
|
||||
InvalidOIDCProviderStatus = OIDCProviderStatus("Invalid")
|
||||
InvalidOIDCProviderStatus = OIDCProviderStatus("Invalid")
|
||||
)
|
||||
|
||||
// OIDCProviderConfigSpec is a struct that describes an OIDC Provider.
|
||||
@ -39,11 +42,17 @@ type OIDCProviderConfigStatus struct {
|
||||
// +optional
|
||||
Message string `json:"message,omitempty"`
|
||||
|
||||
// LastUpdateTime holds the time at which the Status was last updated. It is a pointer to get
|
||||
// around some undesirable behavior with respect to the empty metav1.Time value (see
|
||||
// https://github.com/kubernetes/kubernetes/issues/86811).
|
||||
// +optional
|
||||
LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
|
||||
// LastUpdateTime holds the time at which the Status was last updated. It is a pointer to get
|
||||
// around some undesirable behavior with respect to the empty metav1.Time value (see
|
||||
// https://github.com/kubernetes/kubernetes/issues/86811).
|
||||
// +optional
|
||||
LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
|
||||
|
||||
// JWKSSecret holds the name of the secret in which this OIDC Provider's signing/verification keys
|
||||
// are stored. If it is empty, then the signing/verification keys are either unknown or they don't
|
||||
// exist.
|
||||
// +optional
|
||||
JWKSSecret corev1.LocalObjectReference `json:"jwksSecret,omitempty"`
|
||||
}
|
||||
|
||||
// OIDCProviderConfig describes the configuration of an OIDC provider.
|
||||
|
@ -216,6 +216,7 @@ func (in *OIDCProviderConfigStatus) DeepCopyInto(out *OIDCProviderConfigStatus)
|
||||
in, out := &in.LastUpdateTime, &out.LastUpdateTime
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
out.JWKSSecret = in.JWKSSecret
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -431,11 +431,17 @@ func schema_117_apis_config_v1alpha1_OIDCProviderConfigStatus(ref common.Referen
|
||||
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
|
||||
},
|
||||
},
|
||||
"jwksSecret": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "JWKSSecret holds the name of the secret in which this OIDC Provider's signing/verification keys are stored. If it is empty, then the signing/verification keys are either unknown or they don't exist.",
|
||||
Ref: ref("k8s.io/api/core/v1.LocalObjectReference"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
|
||||
"k8s.io/api/core/v1.LocalObjectReference", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -55,6 +55,17 @@ spec:
|
||||
status:
|
||||
description: Status of the OIDC provider.
|
||||
properties:
|
||||
jwksSecret:
|
||||
description: JWKSSecret holds the name of the secret in which this
|
||||
OIDC Provider's signing/verification keys are stored. If it is empty,
|
||||
then the signing/verification keys are either unknown or they don't
|
||||
exist.
|
||||
properties:
|
||||
name:
|
||||
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
TODO: Add other useful fields. apiVersion, kind, uid?'
|
||||
type: string
|
||||
type: object
|
||||
lastUpdateTime:
|
||||
description: LastUpdateTime holds the time at which the Status was
|
||||
last updated. It is a pointer to get around some undesirable behavior
|
||||
|
1
generated/1.18/README.adoc
generated
1
generated/1.18/README.adoc
generated
@ -151,6 +151,7 @@ OIDCProviderConfigStatus is a struct that describes the actual state of an OIDC
|
||||
| *`status`* __OIDCProviderStatus__ | Status holds an enum that describes the state of this OIDC Provider. Note that this Status can represent success or failure.
|
||||
| *`message`* __string__ | Message provides human-readable details about the Status.
|
||||
| *`lastUpdateTime`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#time-v1-meta[$$Time$$]__ | LastUpdateTime holds the time at which the Status was last updated. It is a pointer to get around some undesirable behavior with respect to the empty metav1.Time value (see https://github.com/kubernetes/kubernetes/issues/86811).
|
||||
| *`jwksSecret`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#localobjectreference-v1-core[$$LocalObjectReference$$]__ | JWKSSecret holds the name of the secret in which this OIDC Provider's signing/verification keys are stored. If it is empty, then the signing/verification keys are either unknown or they don't exist.
|
||||
|===
|
||||
|
||||
|
||||
|
@ -3,15 +3,18 @@
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// +kubebuilder:validation:Enum=Success;Duplicate;Invalid
|
||||
type OIDCProviderStatus string
|
||||
|
||||
const (
|
||||
SuccessOIDCProviderStatus = OIDCProviderStatus("Success")
|
||||
SuccessOIDCProviderStatus = OIDCProviderStatus("Success")
|
||||
DuplicateOIDCProviderStatus = OIDCProviderStatus("Duplicate")
|
||||
InvalidOIDCProviderStatus = OIDCProviderStatus("Invalid")
|
||||
InvalidOIDCProviderStatus = OIDCProviderStatus("Invalid")
|
||||
)
|
||||
|
||||
// OIDCProviderConfigSpec is a struct that describes an OIDC Provider.
|
||||
@ -39,11 +42,17 @@ type OIDCProviderConfigStatus struct {
|
||||
// +optional
|
||||
Message string `json:"message,omitempty"`
|
||||
|
||||
// LastUpdateTime holds the time at which the Status was last updated. It is a pointer to get
|
||||
// around some undesirable behavior with respect to the empty metav1.Time value (see
|
||||
// https://github.com/kubernetes/kubernetes/issues/86811).
|
||||
// +optional
|
||||
LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
|
||||
// LastUpdateTime holds the time at which the Status was last updated. It is a pointer to get
|
||||
// around some undesirable behavior with respect to the empty metav1.Time value (see
|
||||
// https://github.com/kubernetes/kubernetes/issues/86811).
|
||||
// +optional
|
||||
LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
|
||||
|
||||
// JWKSSecret holds the name of the secret in which this OIDC Provider's signing/verification keys
|
||||
// are stored. If it is empty, then the signing/verification keys are either unknown or they don't
|
||||
// exist.
|
||||
// +optional
|
||||
JWKSSecret corev1.LocalObjectReference `json:"jwksSecret,omitempty"`
|
||||
}
|
||||
|
||||
// OIDCProviderConfig describes the configuration of an OIDC provider.
|
||||
|
@ -216,6 +216,7 @@ func (in *OIDCProviderConfigStatus) DeepCopyInto(out *OIDCProviderConfigStatus)
|
||||
in, out := &in.LastUpdateTime, &out.LastUpdateTime
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
out.JWKSSecret = in.JWKSSecret
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -431,11 +431,17 @@ func schema_118_apis_config_v1alpha1_OIDCProviderConfigStatus(ref common.Referen
|
||||
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
|
||||
},
|
||||
},
|
||||
"jwksSecret": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "JWKSSecret holds the name of the secret in which this OIDC Provider's signing/verification keys are stored. If it is empty, then the signing/verification keys are either unknown or they don't exist.",
|
||||
Ref: ref("k8s.io/api/core/v1.LocalObjectReference"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
|
||||
"k8s.io/api/core/v1.LocalObjectReference", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -55,6 +55,17 @@ spec:
|
||||
status:
|
||||
description: Status of the OIDC provider.
|
||||
properties:
|
||||
jwksSecret:
|
||||
description: JWKSSecret holds the name of the secret in which this
|
||||
OIDC Provider's signing/verification keys are stored. If it is empty,
|
||||
then the signing/verification keys are either unknown or they don't
|
||||
exist.
|
||||
properties:
|
||||
name:
|
||||
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
TODO: Add other useful fields. apiVersion, kind, uid?'
|
||||
type: string
|
||||
type: object
|
||||
lastUpdateTime:
|
||||
description: LastUpdateTime holds the time at which the Status was
|
||||
last updated. It is a pointer to get around some undesirable behavior
|
||||
|
1
generated/1.19/README.adoc
generated
1
generated/1.19/README.adoc
generated
@ -151,6 +151,7 @@ OIDCProviderConfigStatus is a struct that describes the actual state of an OIDC
|
||||
| *`status`* __OIDCProviderStatus__ | Status holds an enum that describes the state of this OIDC Provider. Note that this Status can represent success or failure.
|
||||
| *`message`* __string__ | Message provides human-readable details about the Status.
|
||||
| *`lastUpdateTime`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#time-v1-meta[$$Time$$]__ | LastUpdateTime holds the time at which the Status was last updated. It is a pointer to get around some undesirable behavior with respect to the empty metav1.Time value (see https://github.com/kubernetes/kubernetes/issues/86811).
|
||||
| *`jwksSecret`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#localobjectreference-v1-core[$$LocalObjectReference$$]__ | JWKSSecret holds the name of the secret in which this OIDC Provider's signing/verification keys are stored. If it is empty, then the signing/verification keys are either unknown or they don't exist.
|
||||
|===
|
||||
|
||||
|
||||
|
@ -3,15 +3,18 @@
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// +kubebuilder:validation:Enum=Success;Duplicate;Invalid
|
||||
type OIDCProviderStatus string
|
||||
|
||||
const (
|
||||
SuccessOIDCProviderStatus = OIDCProviderStatus("Success")
|
||||
SuccessOIDCProviderStatus = OIDCProviderStatus("Success")
|
||||
DuplicateOIDCProviderStatus = OIDCProviderStatus("Duplicate")
|
||||
InvalidOIDCProviderStatus = OIDCProviderStatus("Invalid")
|
||||
InvalidOIDCProviderStatus = OIDCProviderStatus("Invalid")
|
||||
)
|
||||
|
||||
// OIDCProviderConfigSpec is a struct that describes an OIDC Provider.
|
||||
@ -39,11 +42,17 @@ type OIDCProviderConfigStatus struct {
|
||||
// +optional
|
||||
Message string `json:"message,omitempty"`
|
||||
|
||||
// LastUpdateTime holds the time at which the Status was last updated. It is a pointer to get
|
||||
// around some undesirable behavior with respect to the empty metav1.Time value (see
|
||||
// https://github.com/kubernetes/kubernetes/issues/86811).
|
||||
// +optional
|
||||
LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
|
||||
// LastUpdateTime holds the time at which the Status was last updated. It is a pointer to get
|
||||
// around some undesirable behavior with respect to the empty metav1.Time value (see
|
||||
// https://github.com/kubernetes/kubernetes/issues/86811).
|
||||
// +optional
|
||||
LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
|
||||
|
||||
// JWKSSecret holds the name of the secret in which this OIDC Provider's signing/verification keys
|
||||
// are stored. If it is empty, then the signing/verification keys are either unknown or they don't
|
||||
// exist.
|
||||
// +optional
|
||||
JWKSSecret corev1.LocalObjectReference `json:"jwksSecret,omitempty"`
|
||||
}
|
||||
|
||||
// OIDCProviderConfig describes the configuration of an OIDC provider.
|
||||
|
@ -216,6 +216,7 @@ func (in *OIDCProviderConfigStatus) DeepCopyInto(out *OIDCProviderConfigStatus)
|
||||
in, out := &in.LastUpdateTime, &out.LastUpdateTime
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
out.JWKSSecret = in.JWKSSecret
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -432,11 +432,17 @@ func schema_119_apis_config_v1alpha1_OIDCProviderConfigStatus(ref common.Referen
|
||||
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
|
||||
},
|
||||
},
|
||||
"jwksSecret": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "JWKSSecret holds the name of the secret in which this OIDC Provider's signing/verification keys are stored. If it is empty, then the signing/verification keys are either unknown or they don't exist.",
|
||||
Ref: ref("k8s.io/api/core/v1.LocalObjectReference"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
|
||||
"k8s.io/api/core/v1.LocalObjectReference", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -55,6 +55,17 @@ spec:
|
||||
status:
|
||||
description: Status of the OIDC provider.
|
||||
properties:
|
||||
jwksSecret:
|
||||
description: JWKSSecret holds the name of the secret in which this
|
||||
OIDC Provider's signing/verification keys are stored. If it is empty,
|
||||
then the signing/verification keys are either unknown or they don't
|
||||
exist.
|
||||
properties:
|
||||
name:
|
||||
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
TODO: Add other useful fields. apiVersion, kind, uid?'
|
||||
type: string
|
||||
type: object
|
||||
lastUpdateTime:
|
||||
description: LastUpdateTime holds the time at which the Status was
|
||||
last updated. It is a pointer to get around some undesirable behavior
|
||||
|
@ -20,7 +20,7 @@ function tilt_mode() {
|
||||
function log_note() {
|
||||
GREEN='\033[0;32m'
|
||||
NC='\033[0m'
|
||||
if [[ $COLORTERM =~ ^(truecolor|24bit)$ ]]; then
|
||||
if [[ ${COLORTERM:-unknown} =~ ^(truecolor|24bit)$ ]]; then
|
||||
echo -e "${GREEN}$*${NC}"
|
||||
else
|
||||
echo "$*"
|
||||
@ -30,7 +30,7 @@ function log_note() {
|
||||
function log_error() {
|
||||
RED='\033[0;31m'
|
||||
NC='\033[0m'
|
||||
if [[ $COLORTERM =~ ^(truecolor|24bit)$ ]]; then
|
||||
if [[ ${COLORTERM:-unknown} =~ ^(truecolor|24bit)$ ]]; then
|
||||
echo -e "🙁${RED} Error: $* ${NC}"
|
||||
else
|
||||
echo ":( Error: $*"
|
||||
|
375
internal/controller/supervisorconfig/jwks.go
Normal file
375
internal/controller/supervisorconfig/jwks.go
Normal file
@ -0,0 +1,375 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package supervisorconfig
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"gopkg.in/square/go-jose.v2"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
configv1alpha1 "go.pinniped.dev/generated/1.19/apis/config/v1alpha1"
|
||||
pinnipedclientset "go.pinniped.dev/generated/1.19/client/clientset/versioned"
|
||||
configinformers "go.pinniped.dev/generated/1.19/client/informers/externalversions/config/v1alpha1"
|
||||
pinnipedcontroller "go.pinniped.dev/internal/controller"
|
||||
"go.pinniped.dev/internal/controllerlib"
|
||||
)
|
||||
|
||||
// These constants are the keys in an OPC's Secret's Data map.
|
||||
const (
|
||||
// activeJWKKey points to the current private key used for signing tokens.
|
||||
//
|
||||
// Note! The value for this key will contain private key material!
|
||||
activeJWKKey = "activeJWK"
|
||||
// jwksKey points to the current JWKS used to verify tokens.
|
||||
//
|
||||
// Note! The value for this key will contain only public key material!
|
||||
jwksKey = "jwks"
|
||||
)
|
||||
|
||||
const (
|
||||
opcKind = "OIDCProviderConfig"
|
||||
)
|
||||
|
||||
// generateKey is stubbed out for the purpose of testing. The default behavior is to generate an EC key.
|
||||
//nolint:gochecknoglobals
|
||||
var generateKey func(r io.Reader) (interface{}, error) = generateECKey
|
||||
|
||||
func generateECKey(r io.Reader) (interface{}, error) {
|
||||
return ecdsa.GenerateKey(elliptic.P256(), r)
|
||||
}
|
||||
|
||||
// jwkController holds the fields necessary for the JWKS controller to communicate with OPC's and
|
||||
// secrets, both via a cache and via the API.
|
||||
type jwksController struct {
|
||||
pinnipedClient pinnipedclientset.Interface
|
||||
kubeClient kubernetes.Interface
|
||||
opcInformer configinformers.OIDCProviderConfigInformer
|
||||
secretInformer corev1informers.SecretInformer
|
||||
}
|
||||
|
||||
// NewJWKSController returns a controllerlib.Controller that ensures an OPC has a corresponding
|
||||
// Secret that contains a valid active JWK and JWKS.
|
||||
func NewJWKSController(
|
||||
kubeClient kubernetes.Interface,
|
||||
pinnipedClient pinnipedclientset.Interface,
|
||||
secretInformer corev1informers.SecretInformer,
|
||||
opcInformer configinformers.OIDCProviderConfigInformer,
|
||||
withInformer pinnipedcontroller.WithInformerOptionFunc,
|
||||
) controllerlib.Controller {
|
||||
return controllerlib.New(
|
||||
controllerlib.Config{
|
||||
Name: "JWKSController",
|
||||
Syncer: &jwksController{
|
||||
kubeClient: kubeClient,
|
||||
pinnipedClient: pinnipedClient,
|
||||
secretInformer: secretInformer,
|
||||
opcInformer: opcInformer,
|
||||
},
|
||||
},
|
||||
// We want to be notified when a OPC's secret gets updated or deleted. When this happens, we
|
||||
// should get notified via the corresponding OPC key.
|
||||
withInformer(
|
||||
secretInformer,
|
||||
controllerlib.FilterFuncs{
|
||||
ParentFunc: func(obj metav1.Object) controllerlib.Key {
|
||||
if isOPCControllee(obj) {
|
||||
controller := metav1.GetControllerOf(obj)
|
||||
return controllerlib.Key{
|
||||
Name: controller.Name,
|
||||
Namespace: obj.GetNamespace(),
|
||||
}
|
||||
}
|
||||
return controllerlib.Key{}
|
||||
},
|
||||
AddFunc: isOPCControllee,
|
||||
UpdateFunc: func(oldObj, newObj metav1.Object) bool {
|
||||
return isOPCControllee(oldObj) || isOPCControllee(newObj)
|
||||
},
|
||||
DeleteFunc: isOPCControllee,
|
||||
},
|
||||
controllerlib.InformerOption{},
|
||||
),
|
||||
// We want to be notified when anything happens to an OPC.
|
||||
withInformer(
|
||||
opcInformer,
|
||||
pinnipedcontroller.NoOpFilter(),
|
||||
controllerlib.InformerOption{},
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
// Sync implements controllerlib.Syncer.
|
||||
func (c *jwksController) Sync(ctx controllerlib.Context) error {
|
||||
opc, err := c.opcInformer.Lister().OIDCProviderConfigs(ctx.Key.Namespace).Get(ctx.Key.Name)
|
||||
notFound := k8serrors.IsNotFound(err)
|
||||
if err != nil && !notFound {
|
||||
return fmt.Errorf(
|
||||
"failed to get %s/%s OIDCProviderConfig: %w",
|
||||
ctx.Key.Namespace,
|
||||
ctx.Key.Name,
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
||||
if notFound {
|
||||
// The corresponding secret to this OPC should have been garbage collected since it should have
|
||||
// had this OPC as its owner.
|
||||
klog.InfoS(
|
||||
"oidcproviderconfig deleted",
|
||||
"oidcproviderconfig",
|
||||
klog.KRef(ctx.Key.Namespace, ctx.Key.Name),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
secretNeedsUpdate, err := c.secretNeedsUpdate(opc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot determine secret status: %w", err)
|
||||
}
|
||||
if !secretNeedsUpdate {
|
||||
// Secret is up to date - we are good to go.
|
||||
klog.InfoS(
|
||||
"secret is up to date",
|
||||
"oidcproviderconfig",
|
||||
klog.KRef(ctx.Key.Namespace, ctx.Key.Name),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
// If the OPC does not have a secret associated with it, that secret does not exist, or the secret
|
||||
// is invalid, we will generate a new secret (i.e., a JWKS).
|
||||
secret, err := c.generateSecret(opc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot generate secret: %w", err)
|
||||
}
|
||||
|
||||
if err := c.createOrUpdateSecret(ctx.Context, secret); err != nil {
|
||||
return fmt.Errorf("cannot create or update secret: %w", err)
|
||||
}
|
||||
klog.InfoS("created/updated secret", "secret", klog.KObj(secret))
|
||||
|
||||
// Ensure that the OPC points to the secret.
|
||||
newOPC := opc.DeepCopy()
|
||||
newOPC.Status.JWKSSecret.Name = secret.Name
|
||||
if err := c.updateOPC(ctx.Context, newOPC); err != nil {
|
||||
return fmt.Errorf("cannot update opc: %w", err)
|
||||
}
|
||||
klog.InfoS("updated oidcproviderconfig", "oidcproviderconfig", klog.KObj(newOPC))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *jwksController) secretNeedsUpdate(opc *configv1alpha1.OIDCProviderConfig) (bool, error) {
|
||||
if opc.Status.JWKSSecret.Name == "" {
|
||||
// If the OPC says it doesn't have a secret associated with it, then let's create one.
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// This OPC says it has a secret associated with it. Let's try to get it from the cache.
|
||||
secret, err := c.secretInformer.Lister().Secrets(opc.Namespace).Get(opc.Status.JWKSSecret.Name)
|
||||
notFound := k8serrors.IsNotFound(err)
|
||||
if err != nil && !notFound {
|
||||
return false, fmt.Errorf("cannot get secret: %w", err)
|
||||
}
|
||||
if notFound {
|
||||
// If we can't find the secret, let's assume we need to create it.
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if !isValid(secret) {
|
||||
// If this secret is invalid, we need to generate a new one.
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (c *jwksController) generateSecret(opc *configv1alpha1.OIDCProviderConfig) (*corev1.Secret, error) {
|
||||
// Note! This is where we could potentially add more handling of OPC spec fields which tell us how
|
||||
// this OIDC provider should sign and verify ID tokens (e.g., hardcoded token secret, gRPC
|
||||
// connection to KMS, etc).
|
||||
//
|
||||
// For now, we just generate an new RSA keypair and put that in the secret.
|
||||
|
||||
key, err := generateKey(rand.Reader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot generate key: %w", err)
|
||||
}
|
||||
|
||||
jwk := jose.JSONWebKey{
|
||||
Key: key,
|
||||
KeyID: "pinniped-supervisor-key",
|
||||
Algorithm: "ES256",
|
||||
Use: "sig",
|
||||
}
|
||||
jwkData, err := json.Marshal(jwk)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot marshal jwk: %w", err)
|
||||
}
|
||||
|
||||
jwks := jose.JSONWebKeySet{
|
||||
Keys: []jose.JSONWebKey{jwk.Public()},
|
||||
}
|
||||
jwksData, err := json.Marshal(jwks)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot marshal jwks: %w", err)
|
||||
}
|
||||
|
||||
s := corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: opc.Name + "-jwks",
|
||||
Namespace: opc.Namespace,
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
*metav1.NewControllerRef(opc, schema.GroupVersionKind{
|
||||
Group: configv1alpha1.SchemeGroupVersion.Group,
|
||||
Version: configv1alpha1.SchemeGroupVersion.Version,
|
||||
Kind: opcKind,
|
||||
}),
|
||||
},
|
||||
// TODO: custom labels.
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
activeJWKKey: jwkData,
|
||||
jwksKey: jwksData,
|
||||
},
|
||||
}
|
||||
|
||||
return &s, nil
|
||||
}
|
||||
|
||||
func (c *jwksController) createOrUpdateSecret(
|
||||
ctx context.Context,
|
||||
newSecret *corev1.Secret,
|
||||
) error {
|
||||
secretClient := c.kubeClient.CoreV1().Secrets(newSecret.Namespace)
|
||||
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
oldSecret, err := secretClient.Get(ctx, newSecret.Name, metav1.GetOptions{})
|
||||
notFound := k8serrors.IsNotFound(err)
|
||||
if err != nil && !notFound {
|
||||
return fmt.Errorf("cannot get secret: %w", err)
|
||||
}
|
||||
|
||||
if notFound {
|
||||
// New secret doesn't exist, so create it.
|
||||
_, err := secretClient.Create(ctx, newSecret, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create secret: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// New secret already exists, so ensure it is up to date.
|
||||
|
||||
if isValid(oldSecret) {
|
||||
// If the secret already has valid JWK's, then we are good to go and we don't need an update.
|
||||
return nil
|
||||
}
|
||||
|
||||
oldSecret.Data = newSecret.Data
|
||||
_, err = secretClient.Update(ctx, oldSecret, metav1.UpdateOptions{})
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
func (c *jwksController) updateOPC(
|
||||
ctx context.Context,
|
||||
newOPC *configv1alpha1.OIDCProviderConfig,
|
||||
) error {
|
||||
opcClient := c.pinnipedClient.ConfigV1alpha1().OIDCProviderConfigs(newOPC.Namespace)
|
||||
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
oldOPC, err := opcClient.Get(ctx, newOPC.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot get opc: %w", err)
|
||||
}
|
||||
|
||||
if newOPC.Status.JWKSSecret.Name == oldOPC.Status.JWKSSecret.Name {
|
||||
// If the existing OPC is up to date, we don't need to update it.
|
||||
return nil
|
||||
}
|
||||
|
||||
oldOPC.Status.JWKSSecret.Name = newOPC.Status.JWKSSecret.Name
|
||||
_, err = opcClient.Update(ctx, oldOPC, metav1.UpdateOptions{})
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
// isOPCControlle returns whether the provided obj is controlled by an OPC.
|
||||
func isOPCControllee(obj metav1.Object) bool {
|
||||
controller := metav1.GetControllerOf(obj)
|
||||
return controller != nil &&
|
||||
controller.APIVersion == configv1alpha1.SchemeGroupVersion.String() &&
|
||||
controller.Kind == opcKind
|
||||
}
|
||||
|
||||
// isValid returns whether the provided secret contains a valid active JWK and verification JWKS.
|
||||
func isValid(secret *corev1.Secret) bool {
|
||||
jwkData, ok := secret.Data[activeJWKKey]
|
||||
if !ok {
|
||||
klog.InfoS("secret does not contain active jwk")
|
||||
return false
|
||||
}
|
||||
|
||||
var activeJWK jose.JSONWebKey
|
||||
if err := json.Unmarshal(jwkData, &activeJWK); err != nil {
|
||||
klog.InfoS("cannot unmarshal active jwk", "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
if activeJWK.IsPublic() {
|
||||
klog.InfoS("active jwk is public", "keyid", activeJWK.KeyID)
|
||||
return false
|
||||
}
|
||||
|
||||
if !activeJWK.Valid() {
|
||||
klog.InfoS("active jwk is not valid", "keyid", activeJWK.KeyID)
|
||||
return false
|
||||
}
|
||||
|
||||
jwksData, ok := secret.Data[jwksKey]
|
||||
if !ok {
|
||||
klog.InfoS("secret does not contain valid jwks")
|
||||
}
|
||||
|
||||
var validJWKS jose.JSONWebKeySet
|
||||
if err := json.Unmarshal(jwksData, &validJWKS); err != nil {
|
||||
klog.InfoS("cannot unmarshal valid jwks", "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
foundActiveJWK := false
|
||||
for _, validJWK := range validJWKS.Keys {
|
||||
if !validJWK.IsPublic() {
|
||||
klog.InfoS("jwks key is not public", "keyid", validJWK.KeyID)
|
||||
return false
|
||||
}
|
||||
if !validJWK.Valid() {
|
||||
klog.InfoS("jwks key is not valid", "keyid", validJWK.KeyID)
|
||||
return false
|
||||
}
|
||||
if validJWK.KeyID == activeJWK.KeyID {
|
||||
foundActiveJWK = true
|
||||
}
|
||||
}
|
||||
|
||||
if !foundActiveJWK {
|
||||
klog.InfoS("did not find active jwk in valid jwks", "keyid", activeJWK.KeyID)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
699
internal/controller/supervisorconfig/jwks_test.go
Normal file
699
internal/controller/supervisorconfig/jwks_test.go
Normal file
@ -0,0 +1,699 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package supervisorconfig
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
kubeinformers "k8s.io/client-go/informers"
|
||||
kubernetesfake "k8s.io/client-go/kubernetes/fake"
|
||||
kubetesting "k8s.io/client-go/testing"
|
||||
|
||||
configv1alpha1 "go.pinniped.dev/generated/1.19/apis/config/v1alpha1"
|
||||
pinnipedfake "go.pinniped.dev/generated/1.19/client/clientset/versioned/fake"
|
||||
pinnipedinformers "go.pinniped.dev/generated/1.19/client/informers/externalversions"
|
||||
"go.pinniped.dev/internal/controllerlib"
|
||||
"go.pinniped.dev/internal/testutil"
|
||||
)
|
||||
|
||||
func TestJWKSControllerFilterSecret(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
secret corev1.Secret
|
||||
wantAdd bool
|
||||
wantUpdate bool
|
||||
wantDelete bool
|
||||
wantParent controllerlib.Key
|
||||
}{
|
||||
{
|
||||
name: "no owner reference",
|
||||
secret: corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "owner reference without correct APIVersion",
|
||||
secret: corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "some-namespace",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
Kind: "OIDCProviderConfig",
|
||||
Name: "some-name",
|
||||
Controller: boolPtr(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "owner reference without correct Kind",
|
||||
secret: corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "some-namespace",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: configv1alpha1.SchemeGroupVersion.String(),
|
||||
Name: "some-name",
|
||||
Controller: boolPtr(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "owner reference without controller set to true",
|
||||
secret: corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "some-namespace",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: configv1alpha1.SchemeGroupVersion.String(),
|
||||
Kind: "OIDCProviderConfig",
|
||||
Name: "some-name",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "correct owner reference",
|
||||
secret: corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "some-namespace",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: configv1alpha1.SchemeGroupVersion.String(),
|
||||
Kind: "OIDCProviderConfig",
|
||||
Name: "some-name",
|
||||
Controller: boolPtr(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantAdd: true,
|
||||
wantUpdate: true,
|
||||
wantDelete: true,
|
||||
wantParent: controllerlib.Key{Namespace: "some-namespace", Name: "some-name"},
|
||||
},
|
||||
{
|
||||
name: "multiple owner references",
|
||||
secret: corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "some-namespace",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
Kind: "UnrelatedKind",
|
||||
},
|
||||
{
|
||||
APIVersion: configv1alpha1.SchemeGroupVersion.String(),
|
||||
Kind: "OIDCProviderConfig",
|
||||
Name: "some-name",
|
||||
Controller: boolPtr(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantAdd: true,
|
||||
wantUpdate: true,
|
||||
wantDelete: true,
|
||||
wantParent: controllerlib.Key{Namespace: "some-namespace", Name: "some-name"},
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
secretInformer := kubeinformers.NewSharedInformerFactory(
|
||||
kubernetesfake.NewSimpleClientset(),
|
||||
0,
|
||||
).Core().V1().Secrets()
|
||||
opcInformer := pinnipedinformers.NewSharedInformerFactory(
|
||||
pinnipedfake.NewSimpleClientset(),
|
||||
0,
|
||||
).Config().V1alpha1().OIDCProviderConfigs()
|
||||
withInformer := testutil.NewObservableWithInformerOption()
|
||||
_ = NewJWKSController(
|
||||
nil, // kubeClient, not needed
|
||||
nil, // pinnipedClient, not needed
|
||||
secretInformer,
|
||||
opcInformer,
|
||||
withInformer.WithInformer,
|
||||
)
|
||||
|
||||
unrelated := corev1.Secret{}
|
||||
filter := withInformer.GetFilterForInformer(secretInformer)
|
||||
require.Equal(t, test.wantAdd, filter.Add(&test.secret))
|
||||
require.Equal(t, test.wantUpdate, filter.Update(&unrelated, &test.secret))
|
||||
require.Equal(t, test.wantUpdate, filter.Update(&test.secret, &unrelated))
|
||||
require.Equal(t, test.wantDelete, filter.Delete(&test.secret))
|
||||
require.Equal(t, test.wantParent, filter.Parent(&test.secret))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestJWKSControllerFilterOPC(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
opc configv1alpha1.OIDCProviderConfig
|
||||
wantAdd bool
|
||||
wantUpdate bool
|
||||
wantDelete bool
|
||||
wantParent controllerlib.Key
|
||||
}{
|
||||
{
|
||||
name: "anything goes",
|
||||
opc: configv1alpha1.OIDCProviderConfig{},
|
||||
wantAdd: true,
|
||||
wantUpdate: true,
|
||||
wantDelete: true,
|
||||
wantParent: controllerlib.Key{},
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
secretInformer := kubeinformers.NewSharedInformerFactory(
|
||||
kubernetesfake.NewSimpleClientset(),
|
||||
0,
|
||||
).Core().V1().Secrets()
|
||||
opcInformer := pinnipedinformers.NewSharedInformerFactory(
|
||||
pinnipedfake.NewSimpleClientset(),
|
||||
0,
|
||||
).Config().V1alpha1().OIDCProviderConfigs()
|
||||
withInformer := testutil.NewObservableWithInformerOption()
|
||||
_ = NewJWKSController(
|
||||
nil, // kubeClient, not needed
|
||||
nil, // pinnipedClient, not needed
|
||||
secretInformer,
|
||||
opcInformer,
|
||||
withInformer.WithInformer,
|
||||
)
|
||||
|
||||
unrelated := configv1alpha1.OIDCProviderConfig{}
|
||||
filter := withInformer.GetFilterForInformer(opcInformer)
|
||||
require.Equal(t, test.wantAdd, filter.Add(&test.opc))
|
||||
require.Equal(t, test.wantUpdate, filter.Update(&unrelated, &test.opc))
|
||||
require.Equal(t, test.wantUpdate, filter.Update(&test.opc, &unrelated))
|
||||
require.Equal(t, test.wantDelete, filter.Delete(&test.opc))
|
||||
require.Equal(t, test.wantParent, filter.Parent(&test.opc))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestJWKSControllerSync(t *testing.T) {
|
||||
// We shouldn't run this test in parallel since it messes with a global function (generateKey).
|
||||
|
||||
const namespace = "tuna-namespace"
|
||||
|
||||
goodKeyPEM, err := ioutil.ReadFile("testdata/good-ec-key.pem")
|
||||
require.NoError(t, err)
|
||||
block, _ := pem.Decode(goodKeyPEM)
|
||||
require.NotNil(t, block, "expected block to be non-nil...is goodKeyPEM a valid PEM?")
|
||||
goodKey, err := x509.ParseECPrivateKey(block.Bytes)
|
||||
require.NoError(t, err)
|
||||
|
||||
opcGVR := schema.GroupVersionResource{
|
||||
Group: configv1alpha1.SchemeGroupVersion.Group,
|
||||
Version: configv1alpha1.SchemeGroupVersion.Version,
|
||||
Resource: "oidcproviderconfigs",
|
||||
}
|
||||
|
||||
goodOPC := &configv1alpha1.OIDCProviderConfig{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "good-opc",
|
||||
Namespace: namespace,
|
||||
UID: "good-opc-uid",
|
||||
},
|
||||
Spec: configv1alpha1.OIDCProviderConfigSpec{
|
||||
Issuer: "https://some-issuer.com",
|
||||
},
|
||||
}
|
||||
goodOPCWithStatus := goodOPC.DeepCopy()
|
||||
goodOPCWithStatus.Status.JWKSSecret.Name = goodOPCWithStatus.Name + "-jwks"
|
||||
|
||||
secretGVR := schema.GroupVersionResource{
|
||||
Group: corev1.SchemeGroupVersion.Group,
|
||||
Version: corev1.SchemeGroupVersion.Version,
|
||||
Resource: "secrets",
|
||||
}
|
||||
|
||||
newSecret := func(activeJWKPath, jwksPath string) *corev1.Secret {
|
||||
s := corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: goodOPCWithStatus.Status.JWKSSecret.Name,
|
||||
Namespace: namespace,
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: opcGVR.GroupVersion().String(),
|
||||
Kind: "OIDCProviderConfig",
|
||||
Name: goodOPC.Name,
|
||||
UID: goodOPC.UID,
|
||||
BlockOwnerDeletion: boolPtr(true),
|
||||
Controller: boolPtr(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
s.Data = make(map[string][]byte)
|
||||
if activeJWKPath != "" {
|
||||
s.Data["activeJWK"] = read(t, activeJWKPath)
|
||||
}
|
||||
if jwksPath != "" {
|
||||
s.Data["jwks"] = read(t, jwksPath)
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
||||
goodSecret := newSecret("testdata/good-jwk.json", "testdata/good-jwks.json")
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
key controllerlib.Key
|
||||
secrets []*corev1.Secret
|
||||
configKubeClient func(*kubernetesfake.Clientset)
|
||||
configPinnipedClient func(*pinnipedfake.Clientset)
|
||||
opcs []*configv1alpha1.OIDCProviderConfig
|
||||
generateKeyErr error
|
||||
wantGenerateKeyCount int
|
||||
wantSecretActions []kubetesting.Action
|
||||
wantOPCActions []kubetesting.Action
|
||||
wantError string
|
||||
}{
|
||||
{
|
||||
name: "new opc with no secret",
|
||||
key: controllerlib.Key{Namespace: goodOPC.Namespace, Name: goodOPC.Name},
|
||||
opcs: []*configv1alpha1.OIDCProviderConfig{
|
||||
goodOPC,
|
||||
},
|
||||
wantGenerateKeyCount: 1,
|
||||
wantSecretActions: []kubetesting.Action{
|
||||
kubetesting.NewGetAction(secretGVR, namespace, goodSecret.Name),
|
||||
kubetesting.NewCreateAction(secretGVR, namespace, goodSecret),
|
||||
},
|
||||
wantOPCActions: []kubetesting.Action{
|
||||
kubetesting.NewGetAction(opcGVR, namespace, goodOPC.Name),
|
||||
kubetesting.NewUpdateAction(opcGVR, namespace, goodOPCWithStatus),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "opc without status with existing secret",
|
||||
key: controllerlib.Key{Namespace: goodOPC.Namespace, Name: goodOPC.Name},
|
||||
opcs: []*configv1alpha1.OIDCProviderConfig{
|
||||
goodOPC,
|
||||
},
|
||||
secrets: []*corev1.Secret{
|
||||
goodSecret,
|
||||
},
|
||||
wantGenerateKeyCount: 1,
|
||||
wantSecretActions: []kubetesting.Action{
|
||||
kubetesting.NewGetAction(secretGVR, namespace, goodSecret.Name),
|
||||
},
|
||||
wantOPCActions: []kubetesting.Action{
|
||||
kubetesting.NewGetAction(opcGVR, namespace, goodOPC.Name),
|
||||
kubetesting.NewUpdateAction(opcGVR, namespace, goodOPCWithStatus),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "existing opc with no secret",
|
||||
key: controllerlib.Key{Namespace: goodOPC.Namespace, Name: goodOPC.Name},
|
||||
opcs: []*configv1alpha1.OIDCProviderConfig{
|
||||
goodOPCWithStatus,
|
||||
},
|
||||
wantGenerateKeyCount: 1,
|
||||
wantSecretActions: []kubetesting.Action{
|
||||
kubetesting.NewGetAction(secretGVR, namespace, goodSecret.Name),
|
||||
kubetesting.NewCreateAction(secretGVR, namespace, goodSecret),
|
||||
},
|
||||
wantOPCActions: []kubetesting.Action{
|
||||
kubetesting.NewGetAction(opcGVR, namespace, goodOPC.Name),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "existing opc with existing secret",
|
||||
key: controllerlib.Key{Namespace: goodOPC.Namespace, Name: goodOPC.Name},
|
||||
opcs: []*configv1alpha1.OIDCProviderConfig{
|
||||
goodOPCWithStatus,
|
||||
},
|
||||
secrets: []*corev1.Secret{
|
||||
goodSecret,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "deleted opc",
|
||||
key: controllerlib.Key{Namespace: goodOPC.Namespace, Name: goodOPC.Name},
|
||||
// Nothing to do here since Kube will garbage collect our child secret via its OwnerReference.
|
||||
},
|
||||
{
|
||||
name: "missing jwk in secret",
|
||||
key: controllerlib.Key{Namespace: goodOPC.Namespace, Name: goodOPC.Name},
|
||||
opcs: []*configv1alpha1.OIDCProviderConfig{
|
||||
goodOPCWithStatus,
|
||||
},
|
||||
secrets: []*corev1.Secret{
|
||||
newSecret("", "testdata/good-jwks.json"),
|
||||
},
|
||||
wantGenerateKeyCount: 1,
|
||||
wantSecretActions: []kubetesting.Action{
|
||||
kubetesting.NewGetAction(secretGVR, namespace, goodSecret.Name),
|
||||
kubetesting.NewUpdateAction(secretGVR, namespace, goodSecret),
|
||||
},
|
||||
wantOPCActions: []kubetesting.Action{
|
||||
kubetesting.NewGetAction(opcGVR, namespace, goodOPC.Name),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "missing jwks in secret",
|
||||
key: controllerlib.Key{Namespace: goodOPC.Namespace, Name: goodOPC.Name},
|
||||
opcs: []*configv1alpha1.OIDCProviderConfig{
|
||||
goodOPCWithStatus,
|
||||
},
|
||||
secrets: []*corev1.Secret{
|
||||
newSecret("testdata/good-jwk.json", ""),
|
||||
},
|
||||
wantGenerateKeyCount: 1,
|
||||
wantSecretActions: []kubetesting.Action{
|
||||
kubetesting.NewGetAction(secretGVR, namespace, goodSecret.Name),
|
||||
kubetesting.NewUpdateAction(secretGVR, namespace, goodSecret),
|
||||
},
|
||||
wantOPCActions: []kubetesting.Action{
|
||||
kubetesting.NewGetAction(opcGVR, namespace, goodOPC.Name),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid jwk JSON in secret",
|
||||
key: controllerlib.Key{Namespace: goodOPC.Namespace, Name: goodOPC.Name},
|
||||
opcs: []*configv1alpha1.OIDCProviderConfig{
|
||||
goodOPCWithStatus,
|
||||
},
|
||||
secrets: []*corev1.Secret{
|
||||
newSecret("testdata/not-json.txt", "testdata/good-jwks.json"),
|
||||
},
|
||||
wantGenerateKeyCount: 1,
|
||||
wantSecretActions: []kubetesting.Action{
|
||||
kubetesting.NewGetAction(secretGVR, namespace, goodSecret.Name),
|
||||
kubetesting.NewUpdateAction(secretGVR, namespace, goodSecret),
|
||||
},
|
||||
wantOPCActions: []kubetesting.Action{
|
||||
kubetesting.NewGetAction(opcGVR, namespace, goodOPC.Name),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid jwks JSON in secret",
|
||||
key: controllerlib.Key{Namespace: goodOPC.Namespace, Name: goodOPC.Name},
|
||||
opcs: []*configv1alpha1.OIDCProviderConfig{
|
||||
goodOPCWithStatus,
|
||||
},
|
||||
secrets: []*corev1.Secret{
|
||||
newSecret("testdata/good-jwk.json", "testdata/not-json.txt"),
|
||||
},
|
||||
wantGenerateKeyCount: 1,
|
||||
wantSecretActions: []kubetesting.Action{
|
||||
kubetesting.NewGetAction(secretGVR, namespace, goodSecret.Name),
|
||||
kubetesting.NewUpdateAction(secretGVR, namespace, goodSecret),
|
||||
},
|
||||
wantOPCActions: []kubetesting.Action{
|
||||
kubetesting.NewGetAction(opcGVR, namespace, goodOPC.Name),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "public jwk in secret",
|
||||
key: controllerlib.Key{Namespace: goodOPC.Namespace, Name: goodOPC.Name},
|
||||
opcs: []*configv1alpha1.OIDCProviderConfig{
|
||||
goodOPCWithStatus,
|
||||
},
|
||||
secrets: []*corev1.Secret{
|
||||
newSecret("testdata/public-jwk.json", "testdata/good-jwks.json"),
|
||||
},
|
||||
wantGenerateKeyCount: 1,
|
||||
wantSecretActions: []kubetesting.Action{
|
||||
kubetesting.NewGetAction(secretGVR, namespace, goodSecret.Name),
|
||||
kubetesting.NewUpdateAction(secretGVR, namespace, goodSecret),
|
||||
},
|
||||
wantOPCActions: []kubetesting.Action{
|
||||
kubetesting.NewGetAction(opcGVR, namespace, goodOPC.Name),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "private jwks in secret",
|
||||
key: controllerlib.Key{Namespace: goodOPC.Namespace, Name: goodOPC.Name},
|
||||
opcs: []*configv1alpha1.OIDCProviderConfig{
|
||||
goodOPCWithStatus,
|
||||
},
|
||||
secrets: []*corev1.Secret{
|
||||
newSecret("testdata/good-jwk.json", "testdata/private-jwks.json"),
|
||||
},
|
||||
wantGenerateKeyCount: 1,
|
||||
wantSecretActions: []kubetesting.Action{
|
||||
kubetesting.NewGetAction(secretGVR, namespace, goodSecret.Name),
|
||||
kubetesting.NewUpdateAction(secretGVR, namespace, goodSecret),
|
||||
},
|
||||
wantOPCActions: []kubetesting.Action{
|
||||
kubetesting.NewGetAction(opcGVR, namespace, goodOPC.Name),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid jwk key in secret",
|
||||
key: controllerlib.Key{Namespace: goodOPC.Namespace, Name: goodOPC.Name},
|
||||
opcs: []*configv1alpha1.OIDCProviderConfig{
|
||||
goodOPCWithStatus,
|
||||
},
|
||||
secrets: []*corev1.Secret{
|
||||
newSecret("testdata/invalid-key-jwk.json", "testdata/good-jwks.json"),
|
||||
},
|
||||
wantGenerateKeyCount: 1,
|
||||
wantSecretActions: []kubetesting.Action{
|
||||
kubetesting.NewGetAction(secretGVR, namespace, goodSecret.Name),
|
||||
kubetesting.NewUpdateAction(secretGVR, namespace, goodSecret),
|
||||
},
|
||||
wantOPCActions: []kubetesting.Action{
|
||||
kubetesting.NewGetAction(opcGVR, namespace, goodOPC.Name),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid jwks key in secret",
|
||||
key: controllerlib.Key{Namespace: goodOPC.Namespace, Name: goodOPC.Name},
|
||||
opcs: []*configv1alpha1.OIDCProviderConfig{
|
||||
goodOPCWithStatus,
|
||||
},
|
||||
secrets: []*corev1.Secret{
|
||||
newSecret("testdata/good-jwk.json", "testdata/invalid-key-jwks.json"),
|
||||
},
|
||||
wantGenerateKeyCount: 1,
|
||||
wantSecretActions: []kubetesting.Action{
|
||||
kubetesting.NewGetAction(secretGVR, namespace, goodSecret.Name),
|
||||
kubetesting.NewUpdateAction(secretGVR, namespace, goodSecret),
|
||||
},
|
||||
wantOPCActions: []kubetesting.Action{
|
||||
kubetesting.NewGetAction(opcGVR, namespace, goodOPC.Name),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "missing active jwks in secret",
|
||||
key: controllerlib.Key{Namespace: goodOPC.Namespace, Name: goodOPC.Name},
|
||||
opcs: []*configv1alpha1.OIDCProviderConfig{
|
||||
goodOPCWithStatus,
|
||||
},
|
||||
secrets: []*corev1.Secret{
|
||||
newSecret("testdata/good-jwk.json", "testdata/missing-active-jwks.json"),
|
||||
},
|
||||
wantGenerateKeyCount: 1,
|
||||
wantSecretActions: []kubetesting.Action{
|
||||
kubetesting.NewGetAction(secretGVR, namespace, goodSecret.Name),
|
||||
kubetesting.NewUpdateAction(secretGVR, namespace, goodSecret),
|
||||
},
|
||||
wantOPCActions: []kubetesting.Action{
|
||||
kubetesting.NewGetAction(opcGVR, namespace, goodOPC.Name),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "generate key fails",
|
||||
key: controllerlib.Key{Namespace: goodOPC.Namespace, Name: goodOPC.Name},
|
||||
opcs: []*configv1alpha1.OIDCProviderConfig{
|
||||
goodOPCWithStatus,
|
||||
},
|
||||
generateKeyErr: errors.New("some generate error"),
|
||||
wantError: "cannot generate secret: cannot generate key: some generate error",
|
||||
},
|
||||
{
|
||||
name: "get secret fails",
|
||||
key: controllerlib.Key{Namespace: goodOPC.Namespace, Name: goodOPC.Name},
|
||||
opcs: []*configv1alpha1.OIDCProviderConfig{
|
||||
goodOPC,
|
||||
},
|
||||
configKubeClient: func(client *kubernetesfake.Clientset) {
|
||||
client.PrependReactor("get", "secrets", func(_ kubetesting.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, errors.New("some get error")
|
||||
})
|
||||
},
|
||||
wantError: "cannot create or update secret: cannot get secret: some get error",
|
||||
},
|
||||
{
|
||||
name: "create secret fails",
|
||||
key: controllerlib.Key{Namespace: goodOPC.Namespace, Name: goodOPC.Name},
|
||||
opcs: []*configv1alpha1.OIDCProviderConfig{
|
||||
goodOPC,
|
||||
},
|
||||
configKubeClient: func(client *kubernetesfake.Clientset) {
|
||||
client.PrependReactor("create", "secrets", func(_ kubetesting.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, errors.New("some create error")
|
||||
})
|
||||
},
|
||||
wantError: "cannot create or update secret: cannot create secret: some create error",
|
||||
},
|
||||
{
|
||||
name: "update secret fails",
|
||||
key: controllerlib.Key{Namespace: goodOPC.Namespace, Name: goodOPC.Name},
|
||||
opcs: []*configv1alpha1.OIDCProviderConfig{
|
||||
goodOPC,
|
||||
},
|
||||
secrets: []*corev1.Secret{
|
||||
newSecret("", ""),
|
||||
},
|
||||
configKubeClient: func(client *kubernetesfake.Clientset) {
|
||||
client.PrependReactor("update", "secrets", func(_ kubetesting.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, errors.New("some update error")
|
||||
})
|
||||
},
|
||||
wantError: "cannot create or update secret: some update error",
|
||||
},
|
||||
{
|
||||
name: "get opc fails",
|
||||
key: controllerlib.Key{Namespace: goodOPC.Namespace, Name: goodOPC.Name},
|
||||
opcs: []*configv1alpha1.OIDCProviderConfig{
|
||||
goodOPC,
|
||||
},
|
||||
configPinnipedClient: func(client *pinnipedfake.Clientset) {
|
||||
client.PrependReactor("get", "oidcproviderconfigs", func(_ kubetesting.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, errors.New("some get error")
|
||||
})
|
||||
},
|
||||
wantError: "cannot update opc: cannot get opc: some get error",
|
||||
},
|
||||
{
|
||||
name: "update opc fails",
|
||||
key: controllerlib.Key{Namespace: goodOPC.Namespace, Name: goodOPC.Name},
|
||||
opcs: []*configv1alpha1.OIDCProviderConfig{
|
||||
goodOPC,
|
||||
},
|
||||
configPinnipedClient: func(client *pinnipedfake.Clientset) {
|
||||
client.PrependReactor("update", "oidcproviderconfigs", func(_ kubetesting.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, errors.New("some update error")
|
||||
})
|
||||
},
|
||||
wantError: "cannot update opc: some update error",
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
// We shouldn't run this test in parallel since it messes with a global function (generateKey).
|
||||
generateKeyCount := 0
|
||||
generateKey = func(_ io.Reader) (interface{}, error) {
|
||||
generateKeyCount++
|
||||
return goodKey, test.generateKeyErr
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*3)
|
||||
defer cancel()
|
||||
|
||||
kubeAPIClient := kubernetesfake.NewSimpleClientset()
|
||||
kubeInformerClient := kubernetesfake.NewSimpleClientset()
|
||||
for _, secret := range test.secrets {
|
||||
require.NoError(t, kubeAPIClient.Tracker().Add(secret))
|
||||
require.NoError(t, kubeInformerClient.Tracker().Add(secret))
|
||||
}
|
||||
if test.configKubeClient != nil {
|
||||
test.configKubeClient(kubeAPIClient)
|
||||
}
|
||||
|
||||
pinnipedAPIClient := pinnipedfake.NewSimpleClientset()
|
||||
pinnipedInformerClient := pinnipedfake.NewSimpleClientset()
|
||||
for _, opc := range test.opcs {
|
||||
require.NoError(t, pinnipedAPIClient.Tracker().Add(opc))
|
||||
require.NoError(t, pinnipedInformerClient.Tracker().Add(opc))
|
||||
}
|
||||
if test.configPinnipedClient != nil {
|
||||
test.configPinnipedClient(pinnipedAPIClient)
|
||||
}
|
||||
|
||||
kubeInformers := kubeinformers.NewSharedInformerFactory(
|
||||
kubeInformerClient,
|
||||
0,
|
||||
)
|
||||
pinnipedInformers := pinnipedinformers.NewSharedInformerFactory(
|
||||
pinnipedInformerClient,
|
||||
0,
|
||||
)
|
||||
|
||||
c := NewJWKSController(
|
||||
kubeAPIClient,
|
||||
pinnipedAPIClient,
|
||||
kubeInformers.Core().V1().Secrets(),
|
||||
pinnipedInformers.Config().V1alpha1().OIDCProviderConfigs(),
|
||||
controllerlib.WithInformer,
|
||||
)
|
||||
|
||||
// Must start informers before calling TestRunSynchronously().
|
||||
kubeInformers.Start(ctx.Done())
|
||||
pinnipedInformers.Start(ctx.Done())
|
||||
controllerlib.TestRunSynchronously(t, c)
|
||||
|
||||
err := controllerlib.TestSync(t, c, controllerlib.Context{
|
||||
Context: ctx,
|
||||
Key: test.key,
|
||||
})
|
||||
if test.wantError != "" {
|
||||
require.EqualError(t, err, test.wantError)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, test.wantGenerateKeyCount, generateKeyCount)
|
||||
|
||||
if test.wantSecretActions != nil {
|
||||
require.Equal(t, test.wantSecretActions, kubeAPIClient.Actions())
|
||||
}
|
||||
if test.wantOPCActions != nil {
|
||||
require.Equal(t, test.wantOPCActions, pinnipedAPIClient.Actions())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func read(t *testing.T, path string) []byte {
|
||||
t.Helper()
|
||||
|
||||
data, err := ioutil.ReadFile(path)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Trim whitespace from our testdata so that we match the compact JSON encoding of
|
||||
// our implementation.
|
||||
data = bytes.ReplaceAll(data, []byte(" "), []byte{})
|
||||
data = bytes.ReplaceAll(data, []byte("\n"), []byte{})
|
||||
|
||||
return data
|
||||
}
|
||||
|
||||
func boolPtr(b bool) *bool { return &b }
|
5
internal/controller/supervisorconfig/testdata/good-ec-key.pem
vendored
Normal file
5
internal/controller/supervisorconfig/testdata/good-ec-key.pem
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
-----BEGIN EC PRIVATE KEY-----
|
||||
MHcCAQEEINR2PAduYBO64CaDT4vLoMnn8y4UX5VTFdOA7wUQF0n/oAoGCCqGSM49
|
||||
AwEHoUQDQgAEawmmj6CIMhSoJyfsqH7sekbTeY72GGPLEy16tPWVz2UVwyHTq5ct
|
||||
qr1vYw6LGUtWJ1STJw7W7sgc6StOLs3RrA==
|
||||
-----END EC PRIVATE KEY-----
|
10
internal/controller/supervisorconfig/testdata/good-jwk.json
vendored
Normal file
10
internal/controller/supervisorconfig/testdata/good-jwk.json
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
{
|
||||
"use": "sig",
|
||||
"kty": "EC",
|
||||
"kid": "pinniped-supervisor-key",
|
||||
"crv": "P-256",
|
||||
"alg": "ES256",
|
||||
"x": "awmmj6CIMhSoJyfsqH7sekbTeY72GGPLEy16tPWVz2U",
|
||||
"y": "FcMh06uXLaq9b2MOixlLVidUkycO1u7IHOkrTi7N0aw",
|
||||
"d": "1HY8B25gE7rgJoNPi8ugyefzLhRflVMV04DvBRAXSf8"
|
||||
}
|
13
internal/controller/supervisorconfig/testdata/good-jwks.json
vendored
Normal file
13
internal/controller/supervisorconfig/testdata/good-jwks.json
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
{
|
||||
"keys": [
|
||||
{
|
||||
"use": "sig",
|
||||
"kty": "EC",
|
||||
"kid": "pinniped-supervisor-key",
|
||||
"crv": "P-256",
|
||||
"alg": "ES256",
|
||||
"x": "awmmj6CIMhSoJyfsqH7sekbTeY72GGPLEy16tPWVz2U",
|
||||
"y": "FcMh06uXLaq9b2MOixlLVidUkycO1u7IHOkrTi7N0aw"
|
||||
}
|
||||
]
|
||||
}
|
10
internal/controller/supervisorconfig/testdata/invalid-key-jwk.json
vendored
Normal file
10
internal/controller/supervisorconfig/testdata/invalid-key-jwk.json
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
{
|
||||
"use": "sig",
|
||||
"kty": "EC",
|
||||
"kid": "pinniped-supervisor-key",
|
||||
"crv": "P-256",
|
||||
"alg": "ES256",
|
||||
"x": "0",
|
||||
"y": "FcMh06uXLaq9b2MOixlLVidUkycO1u7IHOkrTi7N0aw",
|
||||
"d": "1HY8B25gE7rgJoNPi8ugyefzLhRflVMV04DvBRAXSf8"
|
||||
}
|
13
internal/controller/supervisorconfig/testdata/invalid-key-jwks.json
vendored
Normal file
13
internal/controller/supervisorconfig/testdata/invalid-key-jwks.json
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
{
|
||||
"keys": [
|
||||
{
|
||||
"use": "sig",
|
||||
"kty": "EC",
|
||||
"kid": "pinniped-supervisor-key",
|
||||
"crv": "P-256",
|
||||
"alg": "ES256",
|
||||
"x": "awmmj6CIMhSoJyfsqH7sekbTeY72GGPLEy16tPWVz2U",
|
||||
"y": "0"
|
||||
}
|
||||
]
|
||||
}
|
13
internal/controller/supervisorconfig/testdata/missing-active-jwks.json
vendored
Normal file
13
internal/controller/supervisorconfig/testdata/missing-active-jwks.json
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
{
|
||||
"keys": [
|
||||
{
|
||||
"use": "sig",
|
||||
"kty": "EC",
|
||||
"kid": "some-other-key",
|
||||
"crv": "P-256",
|
||||
"alg": "ES256",
|
||||
"x": "awmmj6CIMhSoJyfsqH7sekbTeY72GGPLEy16tPWVz2U",
|
||||
"y": "0"
|
||||
}
|
||||
]
|
||||
}
|
1
internal/controller/supervisorconfig/testdata/not-json.txt
vendored
Normal file
1
internal/controller/supervisorconfig/testdata/not-json.txt
vendored
Normal file
@ -0,0 +1 @@
|
||||
this ain't json
|
14
internal/controller/supervisorconfig/testdata/private-jwks.json
vendored
Normal file
14
internal/controller/supervisorconfig/testdata/private-jwks.json
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
{
|
||||
"keys": [
|
||||
{
|
||||
"use": "sig",
|
||||
"kty": "EC",
|
||||
"kid": "pinniped-supervisor-key",
|
||||
"crv": "P-256",
|
||||
"alg": "ES256",
|
||||
"x": "awmmj6CIMhSoJyfsqH7sekbTeY72GGPLEy16tPWVz2U",
|
||||
"y": "FcMh06uXLaq9b2MOixlLVidUkycO1u7IHOkrTi7N0aw",
|
||||
"d": "1HY8B25gE7rgJoNPi8ugyefzLhRflVMV04DvBRAXSf8"
|
||||
}
|
||||
]
|
||||
}
|
9
internal/controller/supervisorconfig/testdata/public-jwk.json
vendored
Normal file
9
internal/controller/supervisorconfig/testdata/public-jwk.json
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
{
|
||||
"use": "sig",
|
||||
"kty": "EC",
|
||||
"kid": "pinniped-supervisor-key",
|
||||
"crv": "P-256",
|
||||
"alg": "ES256",
|
||||
"x": "awmmj6CIMhSoJyfsqH7sekbTeY72GGPLEy16tPWVz2U",
|
||||
"y": "FcMh06uXLaq9b2MOixlLVidUkycO1u7IHOkrTi7N0aw"
|
||||
}
|
@ -13,7 +13,6 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"go.pinniped.dev/generated/1.19/apis/config/v1alpha1"
|
||||
@ -63,14 +62,14 @@ func TestSupervisorOIDCDiscovery(t *testing.T) {
|
||||
badIssuer := fmt.Sprintf("http://%s/badIssuer?cannot-use=queries", env.SupervisorAddress)
|
||||
|
||||
// When OIDCProviderConfig are created in sequence they each cause a discovery endpoint to appear only for as long as the OIDCProviderConfig exists.
|
||||
config1 := requireCreatingOIDCProviderConfigCausesWellKnownEndpointToAppear(t, client, ns, issuer1, "from-integration-test1")
|
||||
config1 := requireCreatingOIDCProviderConfigCausesWellKnownEndpointToAppear(ctx, t, issuer1, client)
|
||||
requireDeletingOIDCProviderConfigCausesWellKnownEndpointToDisappear(t, config1, client, ns, issuer1)
|
||||
config2 := requireCreatingOIDCProviderConfigCausesWellKnownEndpointToAppear(t, client, ns, issuer2, "from-integration-test2")
|
||||
config2 := requireCreatingOIDCProviderConfigCausesWellKnownEndpointToAppear(ctx, t, issuer2, client)
|
||||
requireDeletingOIDCProviderConfigCausesWellKnownEndpointToDisappear(t, config2, client, ns, issuer2)
|
||||
|
||||
// When multiple OIDCProviderConfigs exist at the same time they each serve a unique discovery endpoint.
|
||||
config3 := requireCreatingOIDCProviderConfigCausesWellKnownEndpointToAppear(t, client, ns, issuer3, "from-integration-test3")
|
||||
config4 := requireCreatingOIDCProviderConfigCausesWellKnownEndpointToAppear(t, client, ns, issuer4, "from-integration-test4")
|
||||
config3 := requireCreatingOIDCProviderConfigCausesWellKnownEndpointToAppear(ctx, t, issuer3, client)
|
||||
config4 := requireCreatingOIDCProviderConfigCausesWellKnownEndpointToAppear(ctx, t, issuer4, client)
|
||||
requireWellKnownEndpointIsWorking(t, issuer3) // discovery for issuer3 is still working after issuer4 started working
|
||||
|
||||
// When they are deleted they stop serving discovery endpoints.
|
||||
@ -78,8 +77,8 @@ func TestSupervisorOIDCDiscovery(t *testing.T) {
|
||||
requireDeletingOIDCProviderConfigCausesWellKnownEndpointToDisappear(t, config4, client, ns, issuer4)
|
||||
|
||||
// When the same issuer is added twice, both issuers are marked as duplicates, and neither provider is serving.
|
||||
config5Duplicate1 := requireCreatingOIDCProviderConfigCausesWellKnownEndpointToAppear(t, client, ns, issuer5, "from-integration-test5")
|
||||
config5Duplicate2 := createOIDCProviderConfig(t, "from-integration-test5-duplicate", client, ns, issuer5)
|
||||
config5Duplicate1 := requireCreatingOIDCProviderConfigCausesWellKnownEndpointToAppear(ctx, t, issuer5, client)
|
||||
config5Duplicate2 := library.CreateTestOIDCProvider(ctx, t, issuer5)
|
||||
requireStatus(t, client, ns, config5Duplicate1.Name, v1alpha1.DuplicateOIDCProviderStatus)
|
||||
requireStatus(t, client, ns, config5Duplicate2.Name, v1alpha1.DuplicateOIDCProviderStatus)
|
||||
requireDiscoveryEndpointIsNotFound(t, issuer5)
|
||||
@ -93,7 +92,7 @@ func TestSupervisorOIDCDiscovery(t *testing.T) {
|
||||
requireDeletingOIDCProviderConfigCausesWellKnownEndpointToDisappear(t, config5Duplicate2, client, ns, issuer5)
|
||||
|
||||
// When we create a provider with an invalid issuer, the status is set to invalid.
|
||||
badConfig := createOIDCProviderConfig(t, "from-integration-test6", client, ns, badIssuer)
|
||||
badConfig := library.CreateTestOIDCProvider(ctx, t, badIssuer)
|
||||
requireStatus(t, client, ns, badConfig.Name, v1alpha1.InvalidOIDCProviderStatus)
|
||||
requireDiscoveryEndpointIsNotFound(t, badIssuer)
|
||||
}
|
||||
@ -122,11 +121,16 @@ func requireDiscoveryEndpointIsNotFound(t *testing.T, issuerName string) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func requireCreatingOIDCProviderConfigCausesWellKnownEndpointToAppear(t *testing.T, client pinnipedclientset.Interface, ns string, issuerName string, oidcProviderConfigName string) *v1alpha1.OIDCProviderConfig {
|
||||
func requireCreatingOIDCProviderConfigCausesWellKnownEndpointToAppear(
|
||||
ctx context.Context,
|
||||
t *testing.T,
|
||||
issuerName string,
|
||||
client pinnipedclientset.Interface,
|
||||
) *v1alpha1.OIDCProviderConfig {
|
||||
t.Helper()
|
||||
newOIDCProviderConfig := createOIDCProviderConfig(t, oidcProviderConfigName, client, ns, issuerName)
|
||||
newOIDCProviderConfig := library.CreateTestOIDCProvider(ctx, t, issuerName)
|
||||
requireWellKnownEndpointIsWorking(t, issuerName)
|
||||
requireStatus(t, client, ns, oidcProviderConfigName, v1alpha1.SuccessOIDCProviderStatus)
|
||||
requireStatus(t, client, newOIDCProviderConfig.Namespace, newOIDCProviderConfig.Name, v1alpha1.SuccessOIDCProviderStatus)
|
||||
return newOIDCProviderConfig
|
||||
}
|
||||
|
||||
@ -192,43 +196,6 @@ func requireWellKnownEndpointIsWorking(t *testing.T, issuerName string) {
|
||||
require.JSONEq(t, expectedJSON, string(responseBody))
|
||||
}
|
||||
|
||||
func createOIDCProviderConfig(t *testing.T, oidcProviderConfigName string, client pinnipedclientset.Interface, ns string, issuerName string) *v1alpha1.OIDCProviderConfig {
|
||||
t.Helper()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
newOIDCProviderConfig := v1alpha1.OIDCProviderConfig{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "OIDCProviderConfig",
|
||||
APIVersion: v1alpha1.SchemeGroupVersion.String(),
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: oidcProviderConfigName,
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: v1alpha1.OIDCProviderConfigSpec{
|
||||
Issuer: issuerName,
|
||||
},
|
||||
}
|
||||
createdOIDCProviderConfig, err := client.ConfigV1alpha1().OIDCProviderConfigs(ns).Create(ctx, &newOIDCProviderConfig, metav1.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// When this test has finished, be sure to clean up the new OIDCProviderConfig.
|
||||
t.Cleanup(func() {
|
||||
cleanupCtx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
err = client.ConfigV1alpha1().OIDCProviderConfigs(ns).Delete(cleanupCtx, newOIDCProviderConfig.Name, metav1.DeleteOptions{})
|
||||
notFound := k8serrors.IsNotFound(err)
|
||||
// It's okay if it is not found, because it might have been deleted by another part of this test.
|
||||
if !notFound {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
|
||||
return createdOIDCProviderConfig
|
||||
}
|
||||
|
||||
func requireDelete(t *testing.T, client pinnipedclientset.Interface, ns, name string) {
|
||||
t.Helper()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
|
95
test/integration/supervisor_keys_test.go
Normal file
95
test/integration/supervisor_keys_test.go
Normal file
@ -0,0 +1,95 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gopkg.in/square/go-jose.v2"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
configv1alpha1 "go.pinniped.dev/generated/1.19/apis/config/v1alpha1"
|
||||
"go.pinniped.dev/test/library"
|
||||
)
|
||||
|
||||
func TestSupervisorOIDCKeys(t *testing.T) {
|
||||
env := library.IntegrationEnv(t)
|
||||
kubeClient := library.NewClientset(t)
|
||||
pinnipedClient := library.NewPinnipedClientset(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
// Create our OPC under test.
|
||||
opc := library.CreateTestOIDCProvider(ctx, t, "")
|
||||
|
||||
// Ensure a secret is created with the OPC's JWKS.
|
||||
var updatedOPC *configv1alpha1.OIDCProviderConfig
|
||||
var err error
|
||||
assert.Eventually(t, func() bool {
|
||||
updatedOPC, err = pinnipedClient.
|
||||
ConfigV1alpha1().
|
||||
OIDCProviderConfigs(env.SupervisorNamespace).
|
||||
Get(ctx, opc.Name, metav1.GetOptions{})
|
||||
return err == nil && updatedOPC.Status.JWKSSecret.Name != ""
|
||||
}, time.Second*10, time.Millisecond*500)
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, updatedOPC.Status.JWKSSecret.Name)
|
||||
|
||||
// Ensure the secret actually exists.
|
||||
secret, err := kubeClient.
|
||||
CoreV1().
|
||||
Secrets(env.SupervisorNamespace).
|
||||
Get(ctx, updatedOPC.Status.JWKSSecret.Name, metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Ensure the secret has an active key.
|
||||
jwkData, ok := secret.Data["activeJWK"]
|
||||
require.True(t, ok, "secret is missing active jwk")
|
||||
|
||||
// Ensure the secret's active key is valid.
|
||||
var activeJWK jose.JSONWebKey
|
||||
require.NoError(t, json.Unmarshal(jwkData, &activeJWK))
|
||||
require.True(t, activeJWK.Valid(), "active jwk is invalid")
|
||||
require.False(t, activeJWK.IsPublic(), "active jwk is public")
|
||||
|
||||
// Ensure the secret has a JWKS.
|
||||
jwksData, ok := secret.Data["jwks"]
|
||||
require.True(t, ok, "secret is missing jwks")
|
||||
|
||||
// Ensure the secret's JWKS is valid, public, and contains the singing key.
|
||||
var jwks jose.JSONWebKeySet
|
||||
require.NoError(t, json.Unmarshal(jwksData, &jwks))
|
||||
foundActiveJWK := false
|
||||
for _, jwk := range jwks.Keys {
|
||||
require.Truef(t, jwk.Valid(), "jwk %s is invalid", jwk.KeyID)
|
||||
require.Truef(t, jwk.IsPublic(), "jwk %s is not public", jwk.KeyID)
|
||||
if jwk.KeyID == activeJWK.KeyID {
|
||||
foundActiveJWK = true
|
||||
}
|
||||
}
|
||||
require.True(t, foundActiveJWK, "could not find active JWK in JWKS: %s", jwks)
|
||||
|
||||
// Ensure upon deleting the secret, it is eventually brought back.
|
||||
err = kubeClient.
|
||||
CoreV1().
|
||||
Secrets(env.SupervisorNamespace).
|
||||
Delete(ctx, updatedOPC.Status.JWKSSecret.Name, metav1.DeleteOptions{})
|
||||
require.NoError(t, err)
|
||||
assert.Eventually(t, func() bool {
|
||||
secret, err = kubeClient.
|
||||
CoreV1().
|
||||
Secrets(env.SupervisorNamespace).
|
||||
Get(ctx, updatedOPC.Status.JWKSSecret.Name, metav1.GetOptions{})
|
||||
return err == nil
|
||||
}, time.Second*10, time.Millisecond*500)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Upon deleting the OPC, the secret is deleted (we test this behavior in our uninstall tests).
|
||||
}
|
@ -5,6 +5,10 @@ package library
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
@ -12,12 +16,14 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
|
||||
|
||||
configv1alpha1 "go.pinniped.dev/generated/1.19/apis/config/v1alpha1"
|
||||
idpv1alpha1 "go.pinniped.dev/generated/1.19/apis/idp/v1alpha1"
|
||||
pinnipedclientset "go.pinniped.dev/generated/1.19/client/clientset/versioned"
|
||||
|
||||
@ -152,3 +158,60 @@ func CreateTestWebhookIDP(ctx context.Context, t *testing.T) corev1.TypedLocalOb
|
||||
Name: idp.Name,
|
||||
}
|
||||
}
|
||||
|
||||
// CreateTestOIDCProvider creates and returns a test OIDCProviderConfig in
|
||||
// $PINNIPED_TEST_SUPERVISOR_NAMESPACE, which will be automatically deleted at the end of the
|
||||
// current test's lifetime. It generates a random, valid, issuer for the OIDCProviderConfig.
|
||||
//
|
||||
// If the provided issuer is not the empty string, then it will be used for the
|
||||
// OIDCProviderConfig.Spec.Issuer field. Else, a random issuer will be generated.
|
||||
func CreateTestOIDCProvider(ctx context.Context, t *testing.T, issuer string) *configv1alpha1.OIDCProviderConfig {
|
||||
t.Helper()
|
||||
testEnv := IntegrationEnv(t)
|
||||
|
||||
createContext, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if issuer == "" {
|
||||
var err error
|
||||
issuer, err = randomIssuer()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
opcs := NewPinnipedClientset(t).ConfigV1alpha1().OIDCProviderConfigs(testEnv.SupervisorNamespace)
|
||||
opc, err := opcs.Create(createContext, &configv1alpha1.OIDCProviderConfig{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "test-oidc-provider-",
|
||||
Labels: map[string]string{"pinniped.dev/test": ""},
|
||||
Annotations: map[string]string{"pinniped.dev/testName": t.Name()},
|
||||
},
|
||||
Spec: configv1alpha1.OIDCProviderConfigSpec{
|
||||
Issuer: issuer,
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
require.NoError(t, err, "could not create test OIDCProviderConfig")
|
||||
t.Logf("created test OIDCProviderConfig %s/%s", opc.Namespace, opc.Name)
|
||||
|
||||
t.Cleanup(func() {
|
||||
t.Helper()
|
||||
t.Logf("cleaning up test OIDCProviderConfig %s/%s", opc.Namespace, opc.Name)
|
||||
deleteCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
err := opcs.Delete(deleteCtx, opc.Name, metav1.DeleteOptions{})
|
||||
notFound := k8serrors.IsNotFound(err)
|
||||
// It's okay if it is not found, because it might have been deleted by another part of this test.
|
||||
if !notFound {
|
||||
require.NoErrorf(t, err, "could not cleanup test OIDCProviderConfig %s/%s", opc.Namespace, opc.Name)
|
||||
}
|
||||
})
|
||||
|
||||
return opc
|
||||
}
|
||||
|
||||
func randomIssuer() (string, error) {
|
||||
var buf [8]byte
|
||||
if _, err := io.ReadFull(rand.Reader, buf[:]); err != nil {
|
||||
return "", fmt.Errorf("could not generate random state: %w", err)
|
||||
}
|
||||
return fmt.Sprintf("http://test-issuer-%s.pinniped.dev", hex.EncodeToString(buf[:])), nil
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user