Add integration test for the OIDC discovery endpoint

- Intended to be a red test in this commit; will make it go
  green in a future commit
- Enhance env.go and prepare-for-integration-tests.sh to make it
  possible to write integration tests for the supervisor app
  by setting more env vars and by exposing the service to the kind
  host on a localhost port
- Add `--clean` option to prepare-for-integration-tests.sh
  to make it easier to start fresh
- Make prepare-for-integration-tests.sh advise you to run
  `go test -v -count 1 ./test/integration` because this does
  not buffer the test output
- Make concierge_api_discovery_test.go pass by adding expectations
  for the new OIDCProviderConfig type
This commit is contained in:
Ryan Richard 2020-10-06 17:53:29 -07:00
parent 78cc49d658
commit ae56fcb46a
6 changed files with 170 additions and 22 deletions

View File

@ -0,0 +1,8 @@
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
- role: worker
extraPortMappings: [{containerPort: 31234, hostPort: 12345, protocol: TCP}]
- role: worker
extraPortMappings: [{containerPort: 31234, hostPort: 12345, protocol: TCP}]

View File

@ -0,0 +1,5 @@
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
extraPortMappings: [{containerPort: 31234, hostPort: 12345, protocol: TCP}]

View File

@ -42,6 +42,7 @@ function check_dependency() {
# #
help=no help=no
skip_build=no skip_build=no
clean_kind=no
while (("$#")); do while (("$#")); do
case "$1" in case "$1" in
@ -53,6 +54,10 @@ while (("$#")); do
skip_build=yes skip_build=yes
shift shift
;; ;;
-c | --clean)
clean_kind=yes
shift
;;
-*) -*)
log_error "Unsupported flag $1" >&2 log_error "Unsupported flag $1" >&2
exit 1 exit 1
@ -90,17 +95,23 @@ check_dependency htpasswd "Please install htpasswd. Should be pre-installed on M
# Require kubectl >= 1.18.x # Require kubectl >= 1.18.x
if [ "$(kubectl version --client=true --short | cut -d '.' -f 2)" -lt 18 ]; then if [ "$(kubectl version --client=true --short | cut -d '.' -f 2)" -lt 18 ]; then
echo "kubectl >= 1.18.x is required, you have $(kubectl version --client=true --short | cut -d ':' -f2)" log_error "kubectl >= 1.18.x is required, you have $(kubectl version --client=true --short | cut -d ':' -f2)"
exit 1 exit 1
fi fi
if [[ "$clean_kind" == "yes" ]]; then
log_note "Deleting running kind clusters to prepare from a clean slate..."
kind delete cluster
fi
# #
# Setup kind and build the app # Setup kind and build the app
# #
log_note "Checking for running kind clusters..." log_note "Checking for running kind clusters..."
if ! kind get clusters | grep -q -e '^kind$'; then if ! kind get clusters | grep -q -e '^kind$'; then
log_note "Creating a kind cluster..." log_note "Creating a kind cluster..."
kind create cluster # single-node.yaml exposes node port 31234 as localhost:12345
kind create cluster --config "$pinniped_path/hack/lib/kind-config/single-node.yaml"
else else
if ! kubectl cluster-info | grep master | grep -q 127.0.0.1; then if ! kubectl cluster-info | grep master | grep -q 127.0.0.1; then
log_error "Seems like your kubeconfig is not targeting a local cluster." log_error "Seems like your kubeconfig is not targeting a local cluster."
@ -177,15 +188,37 @@ kubectl create secret generic "$test_username" \
# #
# Deploy the Pinniped Supervisor # Deploy the Pinniped Supervisor
# #
supervisor_app_name="pinniped-supervisor"
supervisor_namespace="pinniped-supervisor"
pushd deploy-supervisor >/dev/null pushd deploy-supervisor >/dev/null
log_note "Deploying the Pinniped Supervisor app to the cluster..." log_note "Deploying the Pinniped Supervisor app to the cluster..."
ytt --file . \ ytt --file . \
--data-value "app_name=$supervisor_app_name" \
--data-value "namespace=$supervisor_namespace" \
--data-value "image_repo=$registry_repo" \ --data-value "image_repo=$registry_repo" \
--data-value "image_tag=$tag" >"$manifest" --data-value "image_tag=$tag" >"$manifest"
kapp deploy --yes --app "pinniped-supervisor" --diff-changes --file "$manifest" kapp deploy --yes --app "pinniped-supervisor" --diff-changes --file "$manifest"
log_note "Adding NodePort service to expose the Pinniped Supervisor app on the kind node..."
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Service
metadata:
name: ${supervisor_app_name}-node-port
namespace: $supervisor_namespace
spec:
type: NodePort
selector:
app: $supervisor_app_name
ports:
- port: 80
targetPort: 80
nodePort: 31234
EOF
popd >/dev/null popd >/dev/null
# #
@ -226,6 +259,9 @@ export PINNIPED_TEST_USER_GROUPS=${test_groups}
export PINNIPED_TEST_USER_TOKEN=${test_username}:${test_password} export PINNIPED_TEST_USER_TOKEN=${test_username}:${test_password}
export PINNIPED_TEST_WEBHOOK_ENDPOINT=${webhook_url} export PINNIPED_TEST_WEBHOOK_ENDPOINT=${webhook_url}
export PINNIPED_TEST_WEBHOOK_CA_BUNDLE=${webhook_ca_bundle} export PINNIPED_TEST_WEBHOOK_CA_BUNDLE=${webhook_ca_bundle}
export PINNIPED_SUPERVISOR_NAMESPACE=${namespace}
export PINNIPED_SUPERVISOR_APP_NAME=${app_name}
export PINNIPED_TEST_SUPERVISOR_ADDRESS="localhost:12345"
read -r -d '' PINNIPED_CLUSTER_CAPABILITY_YAML << PINNIPED_CLUSTER_CAPABILITY_YAML_EOF || true read -r -d '' PINNIPED_CLUSTER_CAPABILITY_YAML << PINNIPED_CLUSTER_CAPABILITY_YAML_EOF || true
${pinniped_cluster_capability_file_content} ${pinniped_cluster_capability_file_content}
@ -242,7 +278,7 @@ goland_vars=$(grep -v '^#' /tmp/integration-test-env | grep -E '^export .+=' | s
log_note log_note
log_note "🚀 Ready to run integration tests! For example..." log_note "🚀 Ready to run integration tests! For example..."
log_note " cd $pinniped_path" log_note " cd $pinniped_path"
log_note ' source /tmp/integration-test-env && go test -v -count 1 ./test/...' log_note ' source /tmp/integration-test-env && go test -v -count 1 ./test/integration'
log_note log_note
log_note 'Want to run integration tests in GoLand? Copy/paste this "Environment" value for GoLand run configurations:' log_note 'Want to run integration tests in GoLand? Copy/paste this "Environment" value for GoLand run configurations:'
log_note " ${goland_vars}PINNIPED_CLUSTER_CAPABILITY_FILE=${kind_capabilities_file}" log_note " ${goland_vars}PINNIPED_CLUSTER_CAPABILITY_FILE=${kind_capabilities_file}"

View File

@ -78,6 +78,14 @@ func TestGetAPIResourceList(t *testing.T) {
Verbs: []string{"delete", "deletecollection", "get", "list", "patch", "create", "update", "watch"}, Verbs: []string{"delete", "deletecollection", "get", "list", "patch", "create", "update", "watch"},
ShortNames: []string{"cic"}, ShortNames: []string{"cic"},
}, },
{
Name: "oidcproviderconfigs",
SingularName: "oidcproviderconfig",
Namespaced: true,
Kind: "OIDCProviderConfig",
Verbs: []string{"delete", "deletecollection", "get", "list", "patch", "create", "update", "watch"},
ShortNames: []string{"opc"},
},
}, },
}, },
}, },

View File

@ -5,33 +5,118 @@ package integration
import ( import (
"context" "context"
"fmt"
"io/ioutil"
"net/http"
"testing" "testing"
"time" "time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.pinniped.dev/test/library"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"go.pinniped.dev/generated/1.19/apis/config/v1alpha1"
"go.pinniped.dev/internal/here"
"go.pinniped.dev/test/library"
) )
func TestSupervisorOIDCDiscovery(t *testing.T) { func TestSupervisorOIDCDiscovery(t *testing.T) {
env := library.IntegrationEnv(t) env := library.IntegrationEnv(t)
client := library.NewPinnipedClientset(t) client := library.NewPinnipedClientset(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) httpClient := &http.Client{}
ns := env.SupervisorNamespace
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
defer cancel() defer cancel()
_, err := client. // Temporarily remove any existing OIDCProviderConfigs from the cluster so we can test from a clean slate.
ConfigV1alpha1(). originalConfigList, err := client.ConfigV1alpha1().OIDCProviderConfigs(ns).List(ctx, metav1.ListOptions{})
OIDCProviderConfigs(env.Namespace).
List(ctx, metav1.ListOptions{})
require.NoError(t, err) require.NoError(t, err)
// 0. Create CRD with single issuer field in config group and generate code. for _, config := range originalConfigList.Items {
// 1. Add test hook that restores these CRDs at the end of the test. err := client.ConfigV1alpha1().OIDCProviderConfigs(ns).Delete(ctx, config.Name, metav1.DeleteOptions{})
// 2. Get all CRDs and save them in an array somewhere; also delete them after we store them. require.NoError(t, err)
// 3. Test behavior of when we have no CRD - make sure we get the status code that we want back }
// from the discovery endpoint?
// 4. Add a CRD with a known issuer. // When this test has finished, recreate any OIDCProviderConfigs that had existed on the cluster before this test.
// 5. Test behavior of when we have a CRD - make sure we get the status code and response body t.Cleanup(func() {
// that we want back from the discovery endpoint? for _, config := range originalConfigList.Items {
thisConfig := config
_, err := client.ConfigV1alpha1().OIDCProviderConfigs(ns).Create(ctx, &thisConfig, metav1.CreateOptions{})
require.NoError(t, err)
}
})
// Test that there is no default discovery endpoint available when there are no OIDCProviderConfigs.
requestNonExistentPath, err := http.NewRequestWithContext(
ctx,
http.MethodGet,
fmt.Sprintf("http://%s/.well-known/openid-configuration", env.SupervisorAddress),
nil,
)
require.NoError(t, err)
notFoundResponse, err := httpClient.Do(requestNonExistentPath)
require.NoError(t, err)
require.Equal(t, 404, notFoundResponse.StatusCode)
err = notFoundResponse.Body.Close()
require.NoError(t, err)
// Create a new OIDCProviderConfig with a known issuer.
issuer := fmt.Sprintf("http://%s/nested/issuer", env.SupervisorAddress)
newOIDCProviderConfig := v1alpha1.OIDCProviderConfig{
ObjectMeta: metav1.ObjectMeta{
Name: "nested-issuser-config-from-integration-test",
Namespace: ns,
},
Spec: v1alpha1.OIDCProviderConfigSpec{
Issuer: issuer,
},
}
_, err = client.ConfigV1alpha1().OIDCProviderConfigs(ns).Create(ctx, &newOIDCProviderConfig, metav1.CreateOptions{})
require.NoError(t, err)
// When this test has finished, clean up the new OIDCProviderConfig.
t.Cleanup(func() {
err = client.ConfigV1alpha1().OIDCProviderConfigs(ns).Delete(ctx, newOIDCProviderConfig.Name, metav1.DeleteOptions{})
require.NoError(t, err)
})
// Define a request to the new discovery endpoint which should have been created for the above OIDCProviderConfig.
requestDiscoveryEndpoint, err := http.NewRequestWithContext(
ctx,
http.MethodGet,
fmt.Sprintf("http://%s/nested/issuer/.well-known/openid-configuration", env.SupervisorAddress),
nil,
)
require.NoError(t, err)
// Fetch that discovery endpoint. Give it some time for the endpoint to come into existence.
var response *http.Response
assert.Eventually(t, func() bool {
response, err = httpClient.Do(requestDiscoveryEndpoint) //nolint:bodyclose // the body is closed below after it is read
return err == nil
}, 10*time.Second, 200*time.Millisecond)
require.NoError(t, err)
responseBody, err := ioutil.ReadAll(response.Body)
require.NoError(t, err)
err = response.Body.Close()
require.NoError(t, err)
// Check that the response matches our expectations.
expectedResultTemplate := here.Doc(`{
"issuer": "%s",
"authorization_endpoint": "%s/connect/authorize",
"token_endpoint": "%s/connect/token",
"token_endpoint_auth_methods_supported": ["client_secret_basic"],
"token_endpoint_auth_signing_alg_values_supported": ["RS256"],
"jwks_uri": "%s/jwks.json",
"scopes_supported": ["openid", "offline"],
"response_types_supported": ["code"],
"claims_supported": ["groups"],
}`)
expectedJSON := fmt.Sprintf(expectedResultTemplate, issuer, issuer, issuer, issuer)
require.Equal(t, 200, response.StatusCode)
require.Equal(t, "application/json", response.Header.Get("content-type"))
require.JSONEq(t, expectedJSON, string(responseBody))
} }

View File

@ -25,11 +25,14 @@ const (
type TestEnv struct { type TestEnv struct {
t *testing.T t *testing.T
Namespace string `json:"namespace"` Namespace string `json:"namespace"`
AppName string `json:"appName"` SupervisorNamespace string `json:"supervisorNamespace"`
Capabilities map[TestClusterCapability]bool `json:"capabilities"` AppName string `json:"appName"`
TestWebhook idpv1alpha1.WebhookIdentityProviderSpec `json:"testWebhook"` SupervisorAppName string `json:"supervisorAppName"`
TestUser struct { Capabilities map[TestClusterCapability]bool `json:"capabilities"`
TestWebhook idpv1alpha1.WebhookIdentityProviderSpec `json:"testWebhook"`
SupervisorAddress string `json:"supervisorAddress"`
TestUser struct {
Token string `json:"token"` Token string `json:"token"`
ExpectedUsername string `json:"expectedUsername"` ExpectedUsername string `json:"expectedUsername"`
ExpectedGroups []string `json:"expectedGroups"` ExpectedGroups []string `json:"expectedGroups"`
@ -71,6 +74,9 @@ func IntegrationEnv(t *testing.T) *TestEnv {
result.TestUser.ExpectedGroups = strings.Split(strings.ReplaceAll(needEnv("PINNIPED_TEST_USER_GROUPS"), " ", ""), ",") result.TestUser.ExpectedGroups = strings.Split(strings.ReplaceAll(needEnv("PINNIPED_TEST_USER_GROUPS"), " ", ""), ",")
result.TestUser.Token = needEnv("PINNIPED_TEST_USER_TOKEN") result.TestUser.Token = needEnv("PINNIPED_TEST_USER_TOKEN")
result.TestWebhook.Endpoint = needEnv("PINNIPED_TEST_WEBHOOK_ENDPOINT") result.TestWebhook.Endpoint = needEnv("PINNIPED_TEST_WEBHOOK_ENDPOINT")
result.SupervisorNamespace = needEnv("PINNIPED_SUPERVISOR_NAMESPACE")
result.SupervisorAppName = needEnv("PINNIPED_SUPERVISOR_APP_NAME")
result.SupervisorAddress = needEnv("PINNIPED_TEST_SUPERVISOR_ADDRESS")
result.TestWebhook.TLS = &idpv1alpha1.TLSSpec{CertificateAuthorityData: needEnv("PINNIPED_TEST_WEBHOOK_CA_BUNDLE")} result.TestWebhook.TLS = &idpv1alpha1.TLSSpec{CertificateAuthorityData: needEnv("PINNIPED_TEST_WEBHOOK_CA_BUNDLE")}
result.t = t result.t = t
return &result return &result