Merge branch 'main' into oidc-upstream-watcher-supports-proxy
This commit is contained in:
commit
9f91c6c884
@ -102,6 +102,15 @@ spec:
|
|||||||
protocol: TCP
|
protocol: TCP
|
||||||
- containerPort: 8443
|
- containerPort: 8443
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
|
env:
|
||||||
|
#@ if data.values.https_proxy:
|
||||||
|
- name: HTTPS_PROXY
|
||||||
|
value: #@ data.values.https_proxy
|
||||||
|
#@ end
|
||||||
|
#@ if data.values.no_proxy:
|
||||||
|
- name: NO_PROXY
|
||||||
|
value: #@ data.values.no_proxy
|
||||||
|
#@ end
|
||||||
livenessProbe:
|
livenessProbe:
|
||||||
httpGet:
|
httpGet:
|
||||||
path: /healthz
|
path: /healthz
|
||||||
|
@ -65,3 +65,11 @@ run_as_group: 1001 #! run_as_group specifies the group ID that will own the proc
|
|||||||
#! authentication.concierge.pinniped.dev, etc. As an example, if this is set to tuna.io, then
|
#! authentication.concierge.pinniped.dev, etc. As an example, if this is set to tuna.io, then
|
||||||
#! Pinniped API groups will look like foo.tuna.io. authentication.concierge.tuna.io, etc.
|
#! Pinniped API groups will look like foo.tuna.io. authentication.concierge.tuna.io, etc.
|
||||||
api_group_suffix: pinniped.dev
|
api_group_suffix: pinniped.dev
|
||||||
|
|
||||||
|
#! Set the standard golang HTTPS_PROXY and NO_PROXY environment variables on the Supervisor containers.
|
||||||
|
#! These will be used when the Supervisor makes backend-to-backend calls to upstream identity providers using HTTPS,
|
||||||
|
#! e.g. when the Supervisor fetches discovery documents, JWKS keys, and tokens from an upstream OIDC Provider.
|
||||||
|
#! The Supervisor never makes insecure HTTP calls, so there is no reason to set HTTP_PROXY.
|
||||||
|
#! Optional.
|
||||||
|
https_proxy: #! e.g. http://proxy.example.com
|
||||||
|
no_proxy: #! e.g. 127.0.0.1
|
||||||
|
@ -263,7 +263,9 @@ func (c *oidcWatcherController) validateIssuer(ctx context.Context, upstream *v1
|
|||||||
Message: err.Error(),
|
Message: err.Error(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
httpClient = &http.Client{
|
httpClient = &http.Client{
|
||||||
|
Timeout: time.Minute,
|
||||||
Transport: &http.Transport{
|
Transport: &http.Transport{
|
||||||
Proxy: http.ProxyFromEnvironment,
|
Proxy: http.ProxyFromEnvironment,
|
||||||
TLSClientConfig: tlsConfig,
|
TLSClientConfig: tlsConfig,
|
||||||
|
@ -9,6 +9,7 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -797,6 +798,17 @@ oidc: issuer did not match the issuer returned by provider, expected "` + testIs
|
|||||||
require.Equal(t, tt.wantResultingCache[i].GetUsernameClaim(), actualIDP.GetUsernameClaim())
|
require.Equal(t, tt.wantResultingCache[i].GetUsernameClaim(), actualIDP.GetUsernameClaim())
|
||||||
require.Equal(t, tt.wantResultingCache[i].GetGroupsClaim(), actualIDP.GetGroupsClaim())
|
require.Equal(t, tt.wantResultingCache[i].GetGroupsClaim(), actualIDP.GetGroupsClaim())
|
||||||
require.ElementsMatch(t, tt.wantResultingCache[i].GetScopes(), actualIDP.GetScopes())
|
require.ElementsMatch(t, tt.wantResultingCache[i].GetScopes(), actualIDP.GetScopes())
|
||||||
|
|
||||||
|
// We always want to use the proxy from env on these clients, so although the following assertions
|
||||||
|
// are a little hacky, this is a cheap way to test that we are using it.
|
||||||
|
actualTransport, ok := actualIDP.Client.Transport.(*http.Transport)
|
||||||
|
require.True(t, ok, "expected cached provider to have client with Transport of type *http.Transport")
|
||||||
|
httpProxyFromEnvFunction := reflect.ValueOf(http.ProxyFromEnvironment).Pointer()
|
||||||
|
actualTransportProxyFunction := reflect.ValueOf(actualTransport.Proxy).Pointer()
|
||||||
|
require.Equal(t, httpProxyFromEnvFunction, actualTransportProxyFunction,
|
||||||
|
"Transport should have used http.ProxyFromEnvironment as its Proxy func")
|
||||||
|
// We also want a reasonable timeout on each request/response cycle for OIDC discovery and JWKS.
|
||||||
|
require.Equal(t, time.Minute, actualIDP.Client.Timeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
actualUpstreams, err := fakePinnipedClient.IDPV1alpha1().OIDCIdentityProviders(testNamespace).List(ctx, metav1.ListOptions{})
|
actualUpstreams, err := fakePinnipedClient.IDPV1alpha1().OIDCIdentityProviders(testNamespace).List(ctx, metav1.ListOptions{})
|
||||||
|
@ -1,6 +1,9 @@
|
|||||||
# Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
# Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
# The name of the cluster type.
|
||||||
|
kubernetesDistribution: AKS
|
||||||
|
|
||||||
# Describe the capabilities of the cluster against which the integration tests will run.
|
# Describe the capabilities of the cluster against which the integration tests will run.
|
||||||
capabilities:
|
capabilities:
|
||||||
|
|
||||||
|
@ -1,6 +1,9 @@
|
|||||||
# Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
# Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
# The name of the cluster type.
|
||||||
|
kubernetesDistribution: EKS
|
||||||
|
|
||||||
# Describe the capabilities of the cluster against which the integration tests will run.
|
# Describe the capabilities of the cluster against which the integration tests will run.
|
||||||
capabilities:
|
capabilities:
|
||||||
|
|
||||||
|
@ -1,6 +1,9 @@
|
|||||||
# Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
# Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
# The name of the cluster type.
|
||||||
|
kubernetesDistribution: GKE
|
||||||
|
|
||||||
# Describe the capabilities of the cluster against which the integration tests will run.
|
# Describe the capabilities of the cluster against which the integration tests will run.
|
||||||
capabilities:
|
capabilities:
|
||||||
|
|
||||||
|
@ -1,6 +1,9 @@
|
|||||||
# Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
# Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
# The name of the cluster type.
|
||||||
|
kubernetesDistribution: Kind
|
||||||
|
|
||||||
# Describe the capabilities of the cluster against which the integration tests will run.
|
# Describe the capabilities of the cluster against which the integration tests will run.
|
||||||
capabilities:
|
capabilities:
|
||||||
|
|
||||||
|
@ -1,6 +1,9 @@
|
|||||||
# Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
# Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
# The name of the cluster type.
|
||||||
|
kubernetesDistribution: TKGS
|
||||||
|
|
||||||
# Describe the capabilities of the cluster against which the integration tests will run.
|
# Describe the capabilities of the cluster against which the integration tests will run.
|
||||||
capabilities:
|
capabilities:
|
||||||
|
|
||||||
@ -15,4 +18,4 @@ capabilities:
|
|||||||
anonymousAuthenticationSupported: true
|
anonymousAuthenticationSupported: true
|
||||||
|
|
||||||
# Are LDAP ports on the Internet reachable without interference from network firewalls or proxies?
|
# Are LDAP ports on the Internet reachable without interference from network firewalls or proxies?
|
||||||
canReachInternetLDAPPorts: false
|
canReachInternetLDAPPorts: true
|
||||||
|
@ -111,7 +111,7 @@ func TestAPIServingCertificateAutoCreationAndRotation(t *testing.T) {
|
|||||||
var err error
|
var err error
|
||||||
secret, err = kubeClient.CoreV1().Secrets(env.ConciergeNamespace).Get(ctx, defaultServingCertResourceName, metav1.GetOptions{})
|
secret, err = kubeClient.CoreV1().Secrets(env.ConciergeNamespace).Get(ctx, defaultServingCertResourceName, metav1.GetOptions{})
|
||||||
requireEventually.NoError(err)
|
requireEventually.NoError(err)
|
||||||
}, 10*time.Second, 250*time.Millisecond)
|
}, time.Minute, 250*time.Millisecond)
|
||||||
regeneratedCACert := secret.Data["caCertificate"]
|
regeneratedCACert := secret.Data["caCertificate"]
|
||||||
regeneratedPrivateKey := secret.Data["tlsPrivateKey"]
|
regeneratedPrivateKey := secret.Data["tlsPrivateKey"]
|
||||||
regeneratedCertChain := secret.Data["tlsCertificateChain"]
|
regeneratedCertChain := secret.Data["tlsCertificateChain"]
|
||||||
@ -131,7 +131,7 @@ func TestAPIServingCertificateAutoCreationAndRotation(t *testing.T) {
|
|||||||
apiService, err := aggregatedClient.ApiregistrationV1().APIServices().Get(ctx, apiServiceName, metav1.GetOptions{})
|
apiService, err := aggregatedClient.ApiregistrationV1().APIServices().Get(ctx, apiServiceName, metav1.GetOptions{})
|
||||||
requireEventually.NoErrorf(err, "get for APIService %q returned error", apiServiceName)
|
requireEventually.NoErrorf(err, "get for APIService %q returned error", apiServiceName)
|
||||||
requireEventually.Equalf(regeneratedCACert, apiService.Spec.CABundle, "CA bundle in APIService %q does not yet have the expected value", apiServiceName)
|
requireEventually.Equalf(regeneratedCACert, apiService.Spec.CABundle, "CA bundle in APIService %q does not yet have the expected value", apiServiceName)
|
||||||
}, 10*time.Second, 250*time.Millisecond, "never saw CA certificate rotate to expected value")
|
}, time.Minute, 250*time.Millisecond, "never saw CA certificate rotate to expected value")
|
||||||
|
|
||||||
// Check that we can still make requests to the aggregated API through the kube API server,
|
// Check that we can still make requests to the aggregated API through the kube API server,
|
||||||
// because the kube API server uses these certs when proxying requests to the aggregated API server,
|
// because the kube API server uses these certs when proxying requests to the aggregated API server,
|
||||||
@ -150,7 +150,7 @@ func TestAPIServingCertificateAutoCreationAndRotation(t *testing.T) {
|
|||||||
}, metav1.CreateOptions{})
|
}, metav1.CreateOptions{})
|
||||||
requireEventually.NoError(err, "dynamiccertificates.Notifier broken?")
|
requireEventually.NoError(err, "dynamiccertificates.Notifier broken?")
|
||||||
}
|
}
|
||||||
}, 30*time.Second, 250*time.Millisecond)
|
}, time.Minute, 250*time.Millisecond)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -25,7 +25,11 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestLDAPSearch(t *testing.T) {
|
func TestLDAPSearch(t *testing.T) {
|
||||||
env := testlib.IntegrationEnv(t)
|
// This test does not interact with Kubernetes itself. It is a test of our LDAP client code, and only interacts
|
||||||
|
// with our test OpenLDAP server, which is exposed directly to this test via kubectl port-forward.
|
||||||
|
// Theoretically we should always be able to run this test, but something about the kubectl port forwarding
|
||||||
|
// was very flaky on AKS, so we'll get the coverage by only running it on kind.
|
||||||
|
env := testlib.IntegrationEnv(t).WithKubeDistribution(testlib.KindDistro)
|
||||||
|
|
||||||
// Note that these tests depend on the values hard-coded in the LDIF file in test/deploy/tools/ldap.yaml.
|
// Note that these tests depend on the values hard-coded in the LDIF file in test/deploy/tools/ldap.yaml.
|
||||||
// It requires the test LDAP server from the tools deployment.
|
// It requires the test LDAP server from the tools deployment.
|
||||||
@ -613,7 +617,11 @@ func TestLDAPSearch(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSimultaneousLDAPRequestsOnSingleProvider(t *testing.T) {
|
func TestSimultaneousLDAPRequestsOnSingleProvider(t *testing.T) {
|
||||||
env := testlib.IntegrationEnv(t)
|
// This test does not interact with Kubernetes itself. It is a test of our LDAP client code, and only interacts
|
||||||
|
// with our test OpenLDAP server, which is exposed directly to this test via kubectl port-forward.
|
||||||
|
// Theoretically we should always be able to run this test, but something about the kubectl port forwarding
|
||||||
|
// was very flaky on AKS, so we'll get the coverage by only running it on kind.
|
||||||
|
env := testlib.IntegrationEnv(t).WithKubeDistribution(testlib.KindDistro)
|
||||||
|
|
||||||
// Note that these tests depend on the values hard-coded in the LDIF file in test/deploy/tools/ldap.yaml.
|
// Note that these tests depend on the values hard-coded in the LDIF file in test/deploy/tools/ldap.yaml.
|
||||||
// It requires the test LDAP server from the tools deployment.
|
// It requires the test LDAP server from the tools deployment.
|
||||||
|
@ -19,12 +19,19 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type Capability string
|
type Capability string
|
||||||
|
type KubeDistro string
|
||||||
|
|
||||||
const (
|
const (
|
||||||
ClusterSigningKeyIsAvailable Capability = "clusterSigningKeyIsAvailable"
|
ClusterSigningKeyIsAvailable Capability = "clusterSigningKeyIsAvailable"
|
||||||
AnonymousAuthenticationSupported Capability = "anonymousAuthenticationSupported"
|
AnonymousAuthenticationSupported Capability = "anonymousAuthenticationSupported"
|
||||||
HasExternalLoadBalancerProvider Capability = "hasExternalLoadBalancerProvider"
|
HasExternalLoadBalancerProvider Capability = "hasExternalLoadBalancerProvider"
|
||||||
CanReachInternetLDAPPorts Capability = "canReachInternetLDAPPorts"
|
CanReachInternetLDAPPorts Capability = "canReachInternetLDAPPorts"
|
||||||
|
|
||||||
|
KindDistro KubeDistro = "Kind"
|
||||||
|
GKEDistro KubeDistro = "GKE"
|
||||||
|
AKSDistro KubeDistro = "AKS"
|
||||||
|
EKSDistro KubeDistro = "EKS"
|
||||||
|
TKGSDistro KubeDistro = "TKGS"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestEnv captures all the external parameters consumed by our integration tests.
|
// TestEnv captures all the external parameters consumed by our integration tests.
|
||||||
@ -38,6 +45,7 @@ type TestEnv struct {
|
|||||||
SupervisorAppName string `json:"supervisorAppName"`
|
SupervisorAppName string `json:"supervisorAppName"`
|
||||||
SupervisorCustomLabels map[string]string `json:"supervisorCustomLabels"`
|
SupervisorCustomLabels map[string]string `json:"supervisorCustomLabels"`
|
||||||
ConciergeCustomLabels map[string]string `json:"conciergeCustomLabels"`
|
ConciergeCustomLabels map[string]string `json:"conciergeCustomLabels"`
|
||||||
|
KubernetesDistribution KubeDistro `json:"kubernetesDistribution"`
|
||||||
Capabilities map[Capability]bool `json:"capabilities"`
|
Capabilities map[Capability]bool `json:"capabilities"`
|
||||||
TestWebhook auth1alpha1.WebhookAuthenticatorSpec `json:"testWebhook"`
|
TestWebhook auth1alpha1.WebhookAuthenticatorSpec `json:"testWebhook"`
|
||||||
SupervisorHTTPAddress string `json:"supervisorHttpAddress"`
|
SupervisorHTTPAddress string `json:"supervisorHttpAddress"`
|
||||||
@ -285,3 +293,16 @@ func (e *TestEnv) WithoutCapability(cap Capability) *TestEnv {
|
|||||||
}
|
}
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithKubeDistribution skips the test unless it will run on the expected cluster type.
|
||||||
|
// Please use this sparingly. We would prefer that a test run on every cluster type where it can possibly run, so
|
||||||
|
// prefer to run everywhere when possible or use cluster capabilities when needed, rather than looking at the
|
||||||
|
// type of cluster to decide to skip a test. However, there are some tests that do not depend on or interact with
|
||||||
|
// Kubernetes itself which really only need to run on on a single platform to give us the coverage that we desire.
|
||||||
|
func (e *TestEnv) WithKubeDistribution(distro KubeDistro) *TestEnv {
|
||||||
|
e.t.Helper()
|
||||||
|
if e.KubernetesDistribution != distro {
|
||||||
|
e.t.Skipf("skipping integration test because test environment is running %q but this test wants %q", e.KubernetesDistribution, distro)
|
||||||
|
}
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user