#!/usr/bin/env bash # Copyright 2021-2023 the Pinniped contributors. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 # # A script to perform the setup required to manually test using the supervisor on a kind cluster. # Assumes that you installed the apps already using hack/prepare-for-integration-tests.sh. # # By default, this script does something hacky to avoid setting up any type of ingress or load balancer # on Kind. It uses an http proxy server and port forwarding to route the requests into the cluster. # This is only intended for quick manual testing of features by contributors and is not a # representation of how to really deploy or configure Pinniped. # # When invoked with the PINNIPED_USE_CONTOUR environment variable set to a non-empty value, # the script will install Contour into Kind and configure ingress using Contour. This requires that you # also set PINNIPED_USE_CONTOUR to a non-empty value when you ran hack/prepare-for-integration-tests.sh. # This will require editing your /etc/hosts file, and this script will echo instructions for doing so. # When using PINNIPED_USE_CONTOUR, the proxy server is not needed, which makes it easier # to use any web browser to complete web-based login flows (e.g. Safari, where configuring proxies # on localhost is painfully difficult). This still does something a little hacky though, which is that # it uses .cluster.local hostnames, because the Supervisor must be accessible by the same hostname both # inside and outside the cluster, since the Concierge's JWTAuthenticator needs to be able to # reach the discovery endpoint of the Supervisor by using the same hostname that is configured in the # Supervisor's FederationDomain. # # Example usage: # PINNIPED_USE_CONTOUR=1 hack/prepare-for-integration-tests.sh -c # PINNIPED_USE_CONTOUR=1 hack/prepare-supervisor-on-kind.sh --oidc --ldap # # Depends on `step` which can be installed by `brew install step` on MacOS. # set -euo pipefail # Change working directory to the top of the repo. ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" cd "$ROOT" function log_error() { RED='\033[0;31m' NC='\033[0m' if [[ ${COLORTERM:-unknown} =~ ^(truecolor|24bit)$ ]]; then echo -e "🙁${RED} Error: $* ${NC}" else echo ":( Error: $*" fi } use_oidc_upstream=no use_ldap_upstream=no use_ad_upstream=no use_flow="" while (("$#")); do case "$1" in --flow) shift # If there are no more command line arguments, or there is another command line argument but it starts with a dash, then error if [[ "$#" == "0" || "$1" == -* ]]; then log_error "--flow requires a flow name to be specified (e.g. cli_password or browser_authcode" exit 1 fi if [[ "$1" != "browser_authcode" && "$1" != "cli_password" ]]; then log_error "--flow must be cli_password or browser_authcode" exit 1 fi use_flow=$1 shift ;; --ldap) use_ldap_upstream=yes shift ;; --oidc) use_oidc_upstream=yes shift ;; --ad) # Use an ActiveDirectoryIdentityProvider. # This assumes that you used the --get-active-directory-vars flag with hack/prepare-for-integration-tests.sh. use_ad_upstream=yes shift ;; -*) log_error "Unsupported flag $1" >&2 exit 1 ;; *) log_error "Unsupported positional arg $1" >&2 exit 1 ;; esac done if [[ "$use_oidc_upstream" == "no" && "$use_ldap_upstream" == "no" && "$use_ad_upstream" == "no" ]]; then log_error "Error: Please use --oidc, --ldap, or --ad to specify which type(s) of upstream identity provider(s) you would like. May use one or multiple." exit 1 fi # Read the env vars output by hack/prepare-for-integration-tests.sh source /tmp/integration-test-env # Choose some filenames. root_ca_crt_path=root_ca.crt root_ca_key_path=root_ca.key tls_crt_path=tls.crt tls_key_path=tls.key # Choose an audience name for the Concierge. audience="my-workload-cluster-$(openssl rand -hex 4)" # These settings align with how the Dex redirect URI is configured by hack/prepare-for-integration-tests.sh. # Note that this hostname can only be resolved inside the cluster, so we will use a web proxy running inside # the cluster whenever we want to be able to connect to it. issuer_host="pinniped-supervisor-clusterip.supervisor.svc.cluster.local" issuer="https://$issuer_host/some/path" dex_host="dex.tools.svc.cluster.local" if [[ "${PINNIPED_USE_CONTOUR:-}" != "" ]]; then # Install Contour. kubectl apply -f https://projectcontour.io/quickstart/contour.yaml # Wait for its pods to be ready. echo "Waiting for Contour to be ready..." kubectl wait --for 'jsonpath={.status.phase}=Succeeded' pods -l 'app=contour-certgen' -n projectcontour --timeout 60s kubectl wait --for 'jsonpath={.status.phase}=Running' pods -l 'app!=contour-certgen' -n projectcontour --timeout 60s # Create an ingress for the Supervisor which uses TLS passthrough to allow the Supervisor to terminate TLS. cat <> /etc/hosts\"" fi if [[ "$dex_host_missing" == "yes" && "$use_oidc_upstream" == "yes" ]]; then echo "sudo bash -c \"echo '127.0.0.1 $dex_host' >> /etc/hosts\"" fi log_error "When you are finished with your Kind cluster, you can remove these lines from /etc/hosts." exit 1 fi fi if [[ "$use_oidc_upstream" == "yes" ]]; then # Make an OIDCIdentityProvider which uses Dex to provide identity. cat < $fd_file apiVersion: config.supervisor.pinniped.dev/v1alpha1 kind: FederationDomain metadata: name: my-federation-domain spec: issuer: $issuer tls: secretName: my-federation-domain-tls identityProviders: EOF if [[ "$use_oidc_upstream" == "yes" ]]; then # Indenting the heredoc by 4 spaces to make it indented the correct amount in the FederationDomain below. cat << EOF >> $fd_file - displayName: "My OIDC IDP 🚀" objectRef: apiGroup: idp.supervisor.pinniped.dev kind: OIDCIdentityProvider name: my-oidc-provider transforms: expressions: - type: username/v1 expression: '"oidc:" + username' - type: groups/v1 # the pinny user doesn't belong to any groups in Dex, so this isn't strictly needed, but doesn't hurt expression: 'groups.map(group, "oidc:" + group)' examples: - username: ryan@example.com groups: [ a, b ] expects: username: oidc:ryan@example.com groups: [ oidc:a, oidc:b ] EOF fi if [[ "$use_ldap_upstream" == "yes" ]]; then # Indenting the heredoc by 4 spaces to make it indented the correct amount in the FederationDomain below. cat << EOF >> $fd_file - displayName: "My LDAP IDP 🚀" objectRef: apiGroup: idp.supervisor.pinniped.dev kind: LDAPIdentityProvider name: my-ldap-provider transforms: # these are contrived to exercise all the available features constants: - name: prefix type: string stringValue: "ldap:" - name: onlyIncludeGroupsWithThisPrefix type: string stringValue: "ball-" # pinny belongs to ball-game-players in openldap - name: mustBelongToOneOfThese type: stringList stringListValue: [ ball-admins, seals ] # pinny belongs to seals in openldap - name: additionalAdmins type: stringList stringListValue: [ pinny.ldap@example.com, ryan@example.com ] # pinny's email address in openldap expressions: - type: policy/v1 expression: 'groups.exists(g, g in strListConst.mustBelongToOneOfThese)' message: "Only users in certain kube groups are allowed to authenticate" - type: groups/v1 expression: 'username in strListConst.additionalAdmins ? groups + ["ball-admins"] : groups' - type: groups/v1 expression: 'groups.filter(group, group.startsWith(strConst.onlyIncludeGroupsWithThisPrefix))' - type: username/v1 expression: 'strConst.prefix + username' - type: groups/v1 expression: 'groups.map(group, strConst.prefix + group)' examples: - username: ryan@example.com groups: [ ball-developers, seals, non-ball-group ] # allowed to auth because belongs to seals expects: username: ldap:ryan@example.com groups: [ ldap:ball-developers, ldap:ball-admins ] # gets ball-admins because of username, others dropped because they lack "ball-" prefix - username: someone_else@example.com groups: [ ball-developers, ball-admins, non-ball-group ] # allowed to auth because belongs to ball-admins expects: username: ldap:someone_else@example.com groups: [ ldap:ball-developers, ldap:ball-admins ] # seals dropped because it lacks prefix - username: paul@example.com groups: [ not-ball-admins-group, not-seals-group ] # reject because does not belong to any of the required groups expects: rejected: true message: "Only users in certain kube groups are allowed to authenticate" EOF fi if [[ "$use_ad_upstream" == "yes" ]]; then # Indenting the heredoc by 4 spaces to make it indented the correct amount in the FederationDomain below. cat << EOF >> $fd_file - displayName: "My AD IDP" objectRef: apiGroup: idp.supervisor.pinniped.dev kind: ActiveDirectoryIdentityProvider name: my-ad-provider EOF fi # Apply the FederationDomain from the file created above. kubectl apply --namespace "$PINNIPED_TEST_SUPERVISOR_NAMESPACE" -f "$fd_file" echo "Waiting for FederationDomain to initialize or update..." kubectl wait --for=condition=Ready FederationDomain/my-federation-domain -n "$PINNIPED_TEST_SUPERVISOR_NAMESPACE" # Decide if we need to use the proxy settings for certain commands below. if [[ "${PINNIPED_USE_CONTOUR:-}" != "" ]]; then # Using the proxy is not needed with Contour. proxy_server="" proxy_except="" proxy_env_vars="" else # Without Contour, we will use the proxy for several commands below. proxy_server="$PINNIPED_TEST_PROXY" proxy_except="127.0.0.1" proxy_env_vars="https_proxy=$proxy_server no_proxy=$proxy_except " fi # Test that the federation domain is working before we proceed. echo "Fetching FederationDomain discovery info via command: ${proxy_env_vars}curl -fLsS --cacert \"$root_ca_crt_path\" \"$issuer/.well-known/openid-configuration\"" https_proxy="$proxy_server" no_proxy="$proxy_except" curl -fLsS --cacert "$root_ca_crt_path" "$issuer/.well-known/openid-configuration" | jq . if [[ "$OSTYPE" == "darwin"* ]]; then certificateAuthorityData=$(cat "$root_ca_crt_path" | base64) else # Linux base64 requires an extra flag to keep the output on one line. certificateAuthorityData=$(cat "$root_ca_crt_path" | base64 -w 0) fi # Make a JWTAuthenticator which respects JWTs from the Supervisor's issuer. # The issuer URL must be accessible from within the cluster for OIDC discovery. cat <kubeconfig-oidc.yaml fi if [[ "$use_ldap_upstream" == "yes" ]]; then echo "Generating LDAP kubeconfig..." https_proxy="$proxy_server" no_proxy="$proxy_except" \ ./pinniped get kubeconfig --oidc-skip-browser $flow_arg --upstream-identity-provider-type ldap >kubeconfig-ldap.yaml fi if [[ "$use_ad_upstream" == "yes" ]]; then echo "Generating AD kubeconfig..." https_proxy="$proxy_server" no_proxy="$proxy_except" \ ./pinniped get kubeconfig --oidc-skip-browser $flow_arg --upstream-identity-provider-type activedirectory >kubeconfig-ad.yaml fi # Clear the local CLI cache to ensure that the kubectl command below will need to perform a fresh login. rm -f "$HOME/.config/pinniped/sessions.yaml" rm -f "$HOME/.config/pinniped/credentials.yaml" echo echo "Ready! 🚀" # These instructions only apply when you are not using Contour and you will need a browser to log in. if [[ "${PINNIPED_USE_CONTOUR:-}" == "" && ("$use_oidc_upstream" == "yes" || "$use_flow" == "browser_authcode") ]]; then echo echo "To be able to access the Supervisor URL during login, start Chrome like this:" echo " open -a \"Google Chrome\" --args --proxy-server=\"$proxy_server\"" echo "Note that Chrome must be fully quit before being started with --proxy-server." echo "Then open the login URL shown below in that new Chrome window." echo echo "When prompted for username and password, use these values:" echo fi if [[ "$use_oidc_upstream" == "yes" ]]; then echo " OIDC Username: $PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_USERNAME" echo " OIDC Password: $PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_PASSWORD" echo fi if [[ "$use_ldap_upstream" == "yes" ]]; then echo " LDAP Username: $PINNIPED_TEST_LDAP_USER_CN" echo " LDAP Password: $PINNIPED_TEST_LDAP_USER_PASSWORD" echo fi if [[ "$use_ad_upstream" == "yes" ]]; then echo " AD Username: $PINNIPED_TEST_AD_USER_USER_PRINCIPAL_NAME" echo " AD Password: $PINNIPED_TEST_AD_USER_PASSWORD" echo fi # Echo the commands that may be used to login and print the identity of the currently logged in user. # Once the CLI has cached your tokens, it will automatically refresh your short-lived credentials whenever # they expire, so you should not be prompted to log in again for the rest of the day. if [[ "$use_oidc_upstream" == "yes" ]]; then echo "To log in using OIDC, run:" echo "PINNIPED_DEBUG=true ${proxy_env_vars}./pinniped whoami --kubeconfig ./kubeconfig-oidc.yaml" echo fi if [[ "$use_ldap_upstream" == "yes" ]]; then echo "To log in using LDAP, run:" echo "PINNIPED_DEBUG=true ${proxy_env_vars}./pinniped whoami --kubeconfig ./kubeconfig-ldap.yaml" echo fi if [[ "$use_ad_upstream" == "yes" ]]; then echo "To log in using AD, run:" echo "PINNIPED_DEBUG=true ${proxy_env_vars}./pinniped whoami --kubeconfig ./kubeconfig-ad.yaml" echo fi