Optionally use Contour in hack/prepare-supervisor-on-kind.sh

Using Contour for ingress allows us to avoid using the hacky proxy
server approach. This makes it easy to use any web browser to complete
the login process, since there is no need to configure the proxy server
for the browser.
This commit is contained in:
Ryan Richard 2023-09-27 12:32:49 -07:00
parent 24069b56dc
commit 0ab6311cf5
4 changed files with 149 additions and 28 deletions

View File

@ -5,9 +5,16 @@
set -euo pipefail
ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )"
ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
cd "${ROOT}"
# To choose a specific version of kube, add this option to the command below: `--image kindest/node:v1.28.0`.
# To debug the kind config, add this option to the command below: `-v 10`
kind create cluster --config "hack/lib/kind-config/single-node.yaml" --name pinniped
if [[ "${PINNIPED_USE_CONTOUR:-}" != "" ]]; then
echo "Adding Contour port mapping to Kind config."
ytt -f "${ROOT}/hack/lib/kind-config/single-node.yaml" \
-f "${ROOT}/hack/lib/kind-config/contour-overlay.yaml" >/tmp/kind-config.yaml
kind create cluster --config /tmp/kind-config.yaml --name pinniped
else
# To choose a specific version of kube, add this option to the command below: `--image kindest/node:v1.28.0`.
# To debug the kind config, add this option to the command below: `-v 10`
kind create cluster --config "hack/lib/kind-config/single-node.yaml" --name pinniped
fi

View File

@ -0,0 +1,16 @@
#! Copyright 2023 the Pinniped contributors. All Rights Reserved.
#! SPDX-License-Identifier: Apache-2.0
#@ load("@ytt:overlay", "overlay")
#@overlay/match by=overlay.subset({"kind": "Cluster"}), expects=1
---
#! Appends another port mapping to every node in the CLuster config.
#! Contour will use this port mapping to expose the https endpoints
#! of in-cluster apps at localhost:443 on your host.
nodes:
#@overlay/match by=overlay.all, expects="1+"
- extraPortMappings:
- protocol: TCP
containerPort: 443
hostPort: 443
listenAddress: 127.0.0.1

View File

@ -104,7 +104,7 @@ while (("$#")); do
dockerfile_path=$1
shift
;;
-d | --alternate-deploy)
--alternate-deploy)
shift
if [[ "$#" == "0" || "$1" == -* ]]; then
log_error "--alternate-deploy requires a script path to be specified"
@ -113,7 +113,7 @@ while (("$#")); do
alternate_deploy=$1
shift
;;
-p | --alternate-deploy-supervisor)
--alternate-deploy-supervisor)
shift
if [[ "$#" == "0" || "$1" == -* ]]; then
log_error "--alternate-deploy-supervisor requires a script path to be specified"
@ -122,7 +122,7 @@ while (("$#")); do
alternate_deploy_supervisor=$1
shift
;;
-c | --alternate-deploy-concierge)
--alternate-deploy-concierge)
shift
if [[ "$#" == "0" || "$1" == -* ]]; then
log_error "--alternate-deploy-concierge requires a script path to be specified"
@ -131,7 +131,7 @@ while (("$#")); do
alternate_deploy_concierge=$1
shift
;;
-l | --alternate-deploy-local-user-authenticator)
--alternate-deploy-local-user-authenticator)
shift
if [[ "$#" == "0" || "$1" == -* ]]; then
log_error "--alternate-deploy-local-user-authenticator requires a script path to be specified"
@ -165,10 +165,10 @@ if [[ "$help" == "yes" ]]; then
log_note " -g, --api-group-suffix: deploy Pinniped with an alternate API group suffix"
log_note " -s, --skip-build: reuse the most recently built image of the app instead of building"
log_note " -a, --get-active-directory-vars: specify a script that exports active directory environment variables"
log_note " -d, --alternate-deploy: specify an alternate deploy script to install each component of Pinniped (Supervisor, Concierge, local-user-authenticator)"
log_note " -p, --alternate-deploy-supervisor: specify an alternate deploy script to install Pinniped Supervisor"
log_note " -c, --alternate-deploy-concierge: specify an alternate deploy script to install Pinniped Concierge"
log_note " -l, --alternate-deploy-local-user-authenticator: specify an alternate deploy script to install Pinniped local-user-authenticator"
log_note " --alternate-deploy: specify an alternate deploy script to install all components of Pinniped"
log_note " --alternate-deploy-supervisor: specify an alternate deploy script to install Pinniped Supervisor"
log_note " --alternate-deploy-concierge: specify an alternate deploy script to install Pinniped Concierge"
log_note " --alternate-deploy-local-user-authenticator: specify an alternate deploy script to install Pinniped local-user-authenticator"
exit 1
fi

View File

@ -7,14 +7,26 @@
# A script to perform the setup required to manually test using the supervisor on a kind cluster.
# Assumes that you installed the apps already using hack/prepare-for-integration-tests.sh.
#
# This script is a little hacky to avoid setting up any kind of ingress or load balancer on Kind.
# It uses an http proxy server and port forwarding to route the requests into the cluster.
# By default, this script does something hacky to avoid setting up any type of ingress or load balancer
# on Kind. It uses an http proxy server and port forwarding to route the requests into the cluster.
# This is only intended for quick manual testing of features by contributors and is not a
# representation of how to really deploy or configure Pinniped.
#
# This uses the Supervisor and Concierge in the same cluster. Usually the Supervisor would be
# deployed in one cluster while each workload cluster would have a Concierge. All the workload
# cluster Concierge configurations would be similar to each other, all trusting the same Supervisor.
# When invoked with the PINNIPED_USE_CONTOUR environment variable set to a non-empty value,
# the script will install Contour into Kind and configure ingress using Contour. This requires that you
# also set PINNIPED_USE_CONTOUR to a non-empty value when you ran hack/prepare-for-integration-tests.sh.
# This will require editing your /etc/hosts file, and this script will echo instructions for doing so.
# When using PINNIPED_USE_CONTOUR, the proxy server is not needed, which makes it easier
# to use any web browser to complete web-based login flows (e.g. Safari, where configuring proxies
# on localhost is painfully difficult). This still does something a little hacky though, which is that
# it uses .cluster.local hostnames, because the Supervisor must be accessible by the same hostname both
# inside and outside the cluster, since the Concierge's JWTAuthenticator needs to be able to
# reach the discovery endpoint of the Supervisor by using the same hostname that is configured in the
# Supervisor's FederationDomain.
#
# Example usage:
# PINNIPED_USE_CONTOUR=1 hack/prepare-for-integration-tests.sh -c
# PINNIPED_USE_CONTOUR=1 hack/prepare-supervisor-on-kind.sh --oidc --ldap
#
# Depends on `step` which can be installed by `brew install step` on MacOS.
#
@ -102,6 +114,74 @@ audience="my-workload-cluster-$(openssl rand -hex 4)"
# the cluster whenever we want to be able to connect to it.
issuer_host="pinniped-supervisor-clusterip.supervisor.svc.cluster.local"
issuer="https://$issuer_host/some/path"
dex_host="dex.tools.svc.cluster.local"
if [[ "${PINNIPED_USE_CONTOUR:-}" != "" ]]; then
# Install Contour.
kubectl apply -f https://projectcontour.io/quickstart/contour.yaml
# Wait for its pods to be ready.
echo "Waiting for Contour to be ready..."
kubectl wait --for 'jsonpath={.status.phase}=Succeeded' pods -l 'app=contour-certgen' -n projectcontour --timeout 60s
kubectl wait --for 'jsonpath={.status.phase}=Running' pods -l 'app!=contour-certgen' -n projectcontour --timeout 60s
# Create an ingress for the Supervisor which uses TLS passthrough to allow the Supervisor to terminate TLS.
cat <<EOF | kubectl apply --namespace "$PINNIPED_TEST_SUPERVISOR_NAMESPACE" -f -
apiVersion: projectcontour.io/v1
kind: HTTPProxy
metadata:
name: supervisor-proxy
spec:
virtualhost:
fqdn: $issuer_host
tls:
passthrough: true
tcpproxy:
services:
- name: pinniped-supervisor-clusterip
port: 443
EOF
# Create an ingress for Dex which uses TLS passthrough to allow Dex to terminate TLS.
cat <<EOF | kubectl apply --namespace "$PINNIPED_TEST_TOOLS_NAMESPACE" -f -
apiVersion: projectcontour.io/v1
kind: HTTPProxy
metadata:
name: dex-proxy
spec:
virtualhost:
fqdn: $dex_host
tls:
passthrough: true
tcpproxy:
services:
- name: dex
port: 443
EOF
issuer_host_missing=no
if ! grep -q "$issuer_host" /etc/hosts; then
issuer_host_missing=yes
fi
dex_host_missing=no
if ! grep -q "$dex_host" /etc/hosts; then
dex_host_missing=yes
fi
if [[ "$issuer_host_missing" == "yes" || ("$dex_host_missing" == "yes" && "$use_oidc_upstream" == "yes") ]]; then
echo
log_error "Please run these commands to edit /etc/hosts, and then run this script again with the same options."
if [[ "$issuer_host_missing" == "yes" ]]; then
echo "sudo bash -c \"echo '127.0.0.1 $issuer_host' >> /etc/hosts\""
fi
if [[ "$dex_host_missing" == "yes" && "$use_oidc_upstream" == "yes" ]]; then
echo "sudo bash -c \"echo '127.0.0.1 $dex_host' >> /etc/hosts\""
fi
log_error "When you are finished with your Kind cluster, you can remove these lines from /etc/hosts."
exit 1
fi
fi
if [[ "$use_oidc_upstream" == "yes" ]]; then
# Make an OIDCIdentityProvider which uses Dex to provide identity.
@ -348,9 +428,22 @@ kubectl apply --namespace "$PINNIPED_TEST_SUPERVISOR_NAMESPACE" -f "$fd_file"
echo "Waiting for FederationDomain to initialize or update..."
kubectl wait --for=condition=Ready FederationDomain/my-federation-domain -n "$PINNIPED_TEST_SUPERVISOR_NAMESPACE"
# Decide if we need to use the proxy settings for certain commands below.
if [[ "${PINNIPED_USE_CONTOUR:-}" != "" ]]; then
# Using the proxy is not needed with Contour.
proxy_server=""
proxy_except=""
proxy_env_vars=""
else
# Without Contour, we will use the proxy for several commands below.
proxy_server="$PINNIPED_TEST_PROXY"
proxy_except="127.0.0.1"
proxy_env_vars="https_proxy=$proxy_server no_proxy=$proxy_except "
fi
# Test that the federation domain is working before we proceed.
echo "Fetching FederationDomain discovery info via command: https_proxy=\"$PINNIPED_TEST_PROXY\" curl -fLsS --cacert \"$root_ca_crt_path\" \"$issuer/.well-known/openid-configuration\""
https_proxy="$PINNIPED_TEST_PROXY" curl -fLsS --cacert "$root_ca_crt_path" "$issuer/.well-known/openid-configuration" | jq .
echo "Fetching FederationDomain discovery info via command: ${proxy_env_vars}curl -fLsS --cacert \"$root_ca_crt_path\" \"$issuer/.well-known/openid-configuration\""
https_proxy="$proxy_server" no_proxy="$proxy_except" curl -fLsS --cacert "$root_ca_crt_path" "$issuer/.well-known/openid-configuration" | jq .
if [[ "$OSTYPE" == "darwin"* ]]; then
certificateAuthorityData=$(cat "$root_ca_crt_path" | base64)
@ -378,6 +471,7 @@ echo "Waiting for JWTAuthenticator to initialize or update..."
sleep 5
# Compile the CLI.
echo "Building the Pinniped CLI..."
go build ./cmd/pinniped
# In case Pinniped was just installed moments ago, wait for the CredentialIssuer to be ready.
@ -387,21 +481,24 @@ while [[ -z "$(kubectl get credentialissuer pinniped-concierge-config -o=jsonpat
done
# Use the CLI to get the kubeconfig. Tell it that you don't want the browser to automatically open for browser-based
# flows so we can open our own browser with the proxy settings. Generate a kubeconfig for each IDP.
# flows so we can open our own browser with the proxy settings (if needed). Generate a kubeconfig for each IDP.
flow_arg=""
if [[ -n "$use_flow" ]]; then
flow_arg="--upstream-identity-provider-flow $use_flow"
fi
if [[ "$use_oidc_upstream" == "yes" ]]; then
https_proxy="$PINNIPED_TEST_PROXY" no_proxy="127.0.0.1" \
echo "Generating OIDC kubeconfig..."
https_proxy="$proxy_server" no_proxy="$proxy_except" \
./pinniped get kubeconfig --oidc-skip-browser $flow_arg --upstream-identity-provider-type oidc >kubeconfig-oidc.yaml
fi
if [[ "$use_ldap_upstream" == "yes" ]]; then
https_proxy="$PINNIPED_TEST_PROXY" no_proxy="127.0.0.1" \
echo "Generating LDAP kubeconfig..."
https_proxy="$proxy_server" no_proxy="$proxy_except" \
./pinniped get kubeconfig --oidc-skip-browser $flow_arg --upstream-identity-provider-type ldap >kubeconfig-ldap.yaml
fi
if [[ "$use_ad_upstream" == "yes" ]]; then
https_proxy="$PINNIPED_TEST_PROXY" no_proxy="127.0.0.1" \
echo "Generating AD kubeconfig..."
https_proxy="$proxy_server" no_proxy="$proxy_except" \
./pinniped get kubeconfig --oidc-skip-browser $flow_arg --upstream-identity-provider-type activedirectory >kubeconfig-ad.yaml
fi
@ -412,10 +509,11 @@ rm -f "$HOME/.config/pinniped/credentials.yaml"
echo
echo "Ready! 🚀"
if [[ "$use_oidc_upstream" == "yes" || "$use_flow" == "browser_authcode" ]]; then
# These instructions only apply when you are not using Contour and you will need a browser to log in.
if [[ "${PINNIPED_USE_CONTOUR:-}" == "" && ("$use_oidc_upstream" == "yes" || "$use_flow" == "browser_authcode") ]]; then
echo
echo "To be able to access the Supervisor URL during login, start Chrome like this:"
echo " open -a \"Google Chrome\" --args --proxy-server=\"$PINNIPED_TEST_PROXY\""
echo " open -a \"Google Chrome\" --args --proxy-server=\"$proxy_server\""
echo "Note that Chrome must be fully quit before being started with --proxy-server."
echo "Then open the login URL shown below in that new Chrome window."
echo
@ -446,16 +544,16 @@ fi
# they expire, so you should not be prompted to log in again for the rest of the day.
if [[ "$use_oidc_upstream" == "yes" ]]; then
echo "To log in using OIDC, run:"
echo "PINNIPED_DEBUG=true https_proxy=\"$PINNIPED_TEST_PROXY\" no_proxy=\"127.0.0.1\" ./pinniped whoami --kubeconfig ./kubeconfig-oidc.yaml"
echo "PINNIPED_DEBUG=true ${proxy_env_vars}./pinniped whoami --kubeconfig ./kubeconfig-oidc.yaml"
echo
fi
if [[ "$use_ldap_upstream" == "yes" ]]; then
echo "To log in using LDAP, run:"
echo "PINNIPED_DEBUG=true https_proxy=\"$PINNIPED_TEST_PROXY\" no_proxy=\"127.0.0.1\" ./pinniped whoami --kubeconfig ./kubeconfig-ldap.yaml"
echo "PINNIPED_DEBUG=true ${proxy_env_vars}./pinniped whoami --kubeconfig ./kubeconfig-ldap.yaml"
echo
fi
if [[ "$use_ad_upstream" == "yes" ]]; then
echo "To log in using AD, run:"
echo "PINNIPED_DEBUG=true https_proxy=\"$PINNIPED_TEST_PROXY\" no_proxy=\"127.0.0.1\" ./pinniped whoami --kubeconfig ./kubeconfig-ad.yaml"
echo "PINNIPED_DEBUG=true ${proxy_env_vars}./pinniped whoami --kubeconfig ./kubeconfig-ad.yaml"
echo
fi