Add integration test for the OIDC discovery endpoint
- Intended to be a red test in this commit; will make it go green in a future commit - Enhance env.go and prepare-for-integration-tests.sh to make it possible to write integration tests for the supervisor app by setting more env vars and by exposing the service to the kind host on a localhost port - Add `--clean` option to prepare-for-integration-tests.sh to make it easier to start fresh - Make prepare-for-integration-tests.sh advise you to run `go test -v -count 1 ./test/integration` because this does not buffer the test output - Make concierge_api_discovery_test.go pass by adding expectations for the new OIDCProviderConfig type
This commit is contained in:
parent
78cc49d658
commit
ae56fcb46a
8
hack/lib/kind-config/multi-node.yaml
Normal file
8
hack/lib/kind-config/multi-node.yaml
Normal file
@ -0,0 +1,8 @@
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
nodes:
|
||||
- role: control-plane
|
||||
- role: worker
|
||||
extraPortMappings: [{containerPort: 31234, hostPort: 12345, protocol: TCP}]
|
||||
- role: worker
|
||||
extraPortMappings: [{containerPort: 31234, hostPort: 12345, protocol: TCP}]
|
5
hack/lib/kind-config/single-node.yaml
Normal file
5
hack/lib/kind-config/single-node.yaml
Normal file
@ -0,0 +1,5 @@
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
nodes:
|
||||
- role: control-plane
|
||||
extraPortMappings: [{containerPort: 31234, hostPort: 12345, protocol: TCP}]
|
@ -42,6 +42,7 @@ function check_dependency() {
|
||||
#
|
||||
help=no
|
||||
skip_build=no
|
||||
clean_kind=no
|
||||
|
||||
while (("$#")); do
|
||||
case "$1" in
|
||||
@ -53,6 +54,10 @@ while (("$#")); do
|
||||
skip_build=yes
|
||||
shift
|
||||
;;
|
||||
-c | --clean)
|
||||
clean_kind=yes
|
||||
shift
|
||||
;;
|
||||
-*)
|
||||
log_error "Unsupported flag $1" >&2
|
||||
exit 1
|
||||
@ -90,17 +95,23 @@ check_dependency htpasswd "Please install htpasswd. Should be pre-installed on M
|
||||
|
||||
# Require kubectl >= 1.18.x
|
||||
if [ "$(kubectl version --client=true --short | cut -d '.' -f 2)" -lt 18 ]; then
|
||||
echo "kubectl >= 1.18.x is required, you have $(kubectl version --client=true --short | cut -d ':' -f2)"
|
||||
log_error "kubectl >= 1.18.x is required, you have $(kubectl version --client=true --short | cut -d ':' -f2)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$clean_kind" == "yes" ]]; then
|
||||
log_note "Deleting running kind clusters to prepare from a clean slate..."
|
||||
kind delete cluster
|
||||
fi
|
||||
|
||||
#
|
||||
# Setup kind and build the app
|
||||
#
|
||||
log_note "Checking for running kind clusters..."
|
||||
if ! kind get clusters | grep -q -e '^kind$'; then
|
||||
log_note "Creating a kind cluster..."
|
||||
kind create cluster
|
||||
# single-node.yaml exposes node port 31234 as localhost:12345
|
||||
kind create cluster --config "$pinniped_path/hack/lib/kind-config/single-node.yaml"
|
||||
else
|
||||
if ! kubectl cluster-info | grep master | grep -q 127.0.0.1; then
|
||||
log_error "Seems like your kubeconfig is not targeting a local cluster."
|
||||
@ -177,15 +188,37 @@ kubectl create secret generic "$test_username" \
|
||||
#
|
||||
# Deploy the Pinniped Supervisor
|
||||
#
|
||||
supervisor_app_name="pinniped-supervisor"
|
||||
supervisor_namespace="pinniped-supervisor"
|
||||
|
||||
pushd deploy-supervisor >/dev/null
|
||||
|
||||
log_note "Deploying the Pinniped Supervisor app to the cluster..."
|
||||
ytt --file . \
|
||||
--data-value "app_name=$supervisor_app_name" \
|
||||
--data-value "namespace=$supervisor_namespace" \
|
||||
--data-value "image_repo=$registry_repo" \
|
||||
--data-value "image_tag=$tag" >"$manifest"
|
||||
|
||||
kapp deploy --yes --app "pinniped-supervisor" --diff-changes --file "$manifest"
|
||||
|
||||
log_note "Adding NodePort service to expose the Pinniped Supervisor app on the kind node..."
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: ${supervisor_app_name}-node-port
|
||||
namespace: $supervisor_namespace
|
||||
spec:
|
||||
type: NodePort
|
||||
selector:
|
||||
app: $supervisor_app_name
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 80
|
||||
nodePort: 31234
|
||||
EOF
|
||||
|
||||
popd >/dev/null
|
||||
|
||||
#
|
||||
@ -226,6 +259,9 @@ export PINNIPED_TEST_USER_GROUPS=${test_groups}
|
||||
export PINNIPED_TEST_USER_TOKEN=${test_username}:${test_password}
|
||||
export PINNIPED_TEST_WEBHOOK_ENDPOINT=${webhook_url}
|
||||
export PINNIPED_TEST_WEBHOOK_CA_BUNDLE=${webhook_ca_bundle}
|
||||
export PINNIPED_SUPERVISOR_NAMESPACE=${namespace}
|
||||
export PINNIPED_SUPERVISOR_APP_NAME=${app_name}
|
||||
export PINNIPED_TEST_SUPERVISOR_ADDRESS="localhost:12345"
|
||||
|
||||
read -r -d '' PINNIPED_CLUSTER_CAPABILITY_YAML << PINNIPED_CLUSTER_CAPABILITY_YAML_EOF || true
|
||||
${pinniped_cluster_capability_file_content}
|
||||
@ -242,7 +278,7 @@ goland_vars=$(grep -v '^#' /tmp/integration-test-env | grep -E '^export .+=' | s
|
||||
log_note
|
||||
log_note "🚀 Ready to run integration tests! For example..."
|
||||
log_note " cd $pinniped_path"
|
||||
log_note ' source /tmp/integration-test-env && go test -v -count 1 ./test/...'
|
||||
log_note ' source /tmp/integration-test-env && go test -v -count 1 ./test/integration'
|
||||
log_note
|
||||
log_note 'Want to run integration tests in GoLand? Copy/paste this "Environment" value for GoLand run configurations:'
|
||||
log_note " ${goland_vars}PINNIPED_CLUSTER_CAPABILITY_FILE=${kind_capabilities_file}"
|
||||
|
@ -78,6 +78,14 @@ func TestGetAPIResourceList(t *testing.T) {
|
||||
Verbs: []string{"delete", "deletecollection", "get", "list", "patch", "create", "update", "watch"},
|
||||
ShortNames: []string{"cic"},
|
||||
},
|
||||
{
|
||||
Name: "oidcproviderconfigs",
|
||||
SingularName: "oidcproviderconfig",
|
||||
Namespaced: true,
|
||||
Kind: "OIDCProviderConfig",
|
||||
Verbs: []string{"delete", "deletecollection", "get", "list", "patch", "create", "update", "watch"},
|
||||
ShortNames: []string{"opc"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -5,33 +5,118 @@ package integration
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.pinniped.dev/test/library"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"go.pinniped.dev/generated/1.19/apis/config/v1alpha1"
|
||||
"go.pinniped.dev/internal/here"
|
||||
"go.pinniped.dev/test/library"
|
||||
)
|
||||
|
||||
func TestSupervisorOIDCDiscovery(t *testing.T) {
|
||||
env := library.IntegrationEnv(t)
|
||||
client := library.NewPinnipedClientset(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
httpClient := &http.Client{}
|
||||
ns := env.SupervisorNamespace
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
_, err := client.
|
||||
ConfigV1alpha1().
|
||||
OIDCProviderConfigs(env.Namespace).
|
||||
List(ctx, metav1.ListOptions{})
|
||||
// Temporarily remove any existing OIDCProviderConfigs from the cluster so we can test from a clean slate.
|
||||
originalConfigList, err := client.ConfigV1alpha1().OIDCProviderConfigs(ns).List(ctx, metav1.ListOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// 0. Create CRD with single issuer field in config group and generate code.
|
||||
// 1. Add test hook that restores these CRDs at the end of the test.
|
||||
// 2. Get all CRDs and save them in an array somewhere; also delete them after we store them.
|
||||
// 3. Test behavior of when we have no CRD - make sure we get the status code that we want back
|
||||
// from the discovery endpoint?
|
||||
// 4. Add a CRD with a known issuer.
|
||||
// 5. Test behavior of when we have a CRD - make sure we get the status code and response body
|
||||
// that we want back from the discovery endpoint?
|
||||
for _, config := range originalConfigList.Items {
|
||||
err := client.ConfigV1alpha1().OIDCProviderConfigs(ns).Delete(ctx, config.Name, metav1.DeleteOptions{})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// When this test has finished, recreate any OIDCProviderConfigs that had existed on the cluster before this test.
|
||||
t.Cleanup(func() {
|
||||
for _, config := range originalConfigList.Items {
|
||||
thisConfig := config
|
||||
_, err := client.ConfigV1alpha1().OIDCProviderConfigs(ns).Create(ctx, &thisConfig, metav1.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
|
||||
// Test that there is no default discovery endpoint available when there are no OIDCProviderConfigs.
|
||||
requestNonExistentPath, err := http.NewRequestWithContext(
|
||||
ctx,
|
||||
http.MethodGet,
|
||||
fmt.Sprintf("http://%s/.well-known/openid-configuration", env.SupervisorAddress),
|
||||
nil,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
notFoundResponse, err := httpClient.Do(requestNonExistentPath)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 404, notFoundResponse.StatusCode)
|
||||
err = notFoundResponse.Body.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a new OIDCProviderConfig with a known issuer.
|
||||
issuer := fmt.Sprintf("http://%s/nested/issuer", env.SupervisorAddress)
|
||||
newOIDCProviderConfig := v1alpha1.OIDCProviderConfig{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "nested-issuser-config-from-integration-test",
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: v1alpha1.OIDCProviderConfigSpec{
|
||||
Issuer: issuer,
|
||||
},
|
||||
}
|
||||
_, err = client.ConfigV1alpha1().OIDCProviderConfigs(ns).Create(ctx, &newOIDCProviderConfig, metav1.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// When this test has finished, clean up the new OIDCProviderConfig.
|
||||
t.Cleanup(func() {
|
||||
err = client.ConfigV1alpha1().OIDCProviderConfigs(ns).Delete(ctx, newOIDCProviderConfig.Name, metav1.DeleteOptions{})
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
// Define a request to the new discovery endpoint which should have been created for the above OIDCProviderConfig.
|
||||
requestDiscoveryEndpoint, err := http.NewRequestWithContext(
|
||||
ctx,
|
||||
http.MethodGet,
|
||||
fmt.Sprintf("http://%s/nested/issuer/.well-known/openid-configuration", env.SupervisorAddress),
|
||||
nil,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Fetch that discovery endpoint. Give it some time for the endpoint to come into existence.
|
||||
var response *http.Response
|
||||
assert.Eventually(t, func() bool {
|
||||
response, err = httpClient.Do(requestDiscoveryEndpoint) //nolint:bodyclose // the body is closed below after it is read
|
||||
return err == nil
|
||||
}, 10*time.Second, 200*time.Millisecond)
|
||||
require.NoError(t, err)
|
||||
responseBody, err := ioutil.ReadAll(response.Body)
|
||||
require.NoError(t, err)
|
||||
err = response.Body.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that the response matches our expectations.
|
||||
expectedResultTemplate := here.Doc(`{
|
||||
"issuer": "%s",
|
||||
"authorization_endpoint": "%s/connect/authorize",
|
||||
"token_endpoint": "%s/connect/token",
|
||||
"token_endpoint_auth_methods_supported": ["client_secret_basic"],
|
||||
"token_endpoint_auth_signing_alg_values_supported": ["RS256"],
|
||||
"jwks_uri": "%s/jwks.json",
|
||||
"scopes_supported": ["openid", "offline"],
|
||||
"response_types_supported": ["code"],
|
||||
"claims_supported": ["groups"],
|
||||
}`)
|
||||
expectedJSON := fmt.Sprintf(expectedResultTemplate, issuer, issuer, issuer, issuer)
|
||||
|
||||
require.Equal(t, 200, response.StatusCode)
|
||||
require.Equal(t, "application/json", response.Header.Get("content-type"))
|
||||
require.JSONEq(t, expectedJSON, string(responseBody))
|
||||
}
|
||||
|
@ -25,11 +25,14 @@ const (
|
||||
type TestEnv struct {
|
||||
t *testing.T
|
||||
|
||||
Namespace string `json:"namespace"`
|
||||
AppName string `json:"appName"`
|
||||
Capabilities map[TestClusterCapability]bool `json:"capabilities"`
|
||||
TestWebhook idpv1alpha1.WebhookIdentityProviderSpec `json:"testWebhook"`
|
||||
TestUser struct {
|
||||
Namespace string `json:"namespace"`
|
||||
SupervisorNamespace string `json:"supervisorNamespace"`
|
||||
AppName string `json:"appName"`
|
||||
SupervisorAppName string `json:"supervisorAppName"`
|
||||
Capabilities map[TestClusterCapability]bool `json:"capabilities"`
|
||||
TestWebhook idpv1alpha1.WebhookIdentityProviderSpec `json:"testWebhook"`
|
||||
SupervisorAddress string `json:"supervisorAddress"`
|
||||
TestUser struct {
|
||||
Token string `json:"token"`
|
||||
ExpectedUsername string `json:"expectedUsername"`
|
||||
ExpectedGroups []string `json:"expectedGroups"`
|
||||
@ -71,6 +74,9 @@ func IntegrationEnv(t *testing.T) *TestEnv {
|
||||
result.TestUser.ExpectedGroups = strings.Split(strings.ReplaceAll(needEnv("PINNIPED_TEST_USER_GROUPS"), " ", ""), ",")
|
||||
result.TestUser.Token = needEnv("PINNIPED_TEST_USER_TOKEN")
|
||||
result.TestWebhook.Endpoint = needEnv("PINNIPED_TEST_WEBHOOK_ENDPOINT")
|
||||
result.SupervisorNamespace = needEnv("PINNIPED_SUPERVISOR_NAMESPACE")
|
||||
result.SupervisorAppName = needEnv("PINNIPED_SUPERVISOR_APP_NAME")
|
||||
result.SupervisorAddress = needEnv("PINNIPED_TEST_SUPERVISOR_ADDRESS")
|
||||
result.TestWebhook.TLS = &idpv1alpha1.TLSSpec{CertificateAuthorityData: needEnv("PINNIPED_TEST_WEBHOOK_CA_BUNDLE")}
|
||||
result.t = t
|
||||
return &result
|
||||
|
Loading…
Reference in New Issue
Block a user