commit
b13a8075e4
@ -31,8 +31,11 @@ COPY --from=build-env /work/out/pinniped-concierge /usr/local/bin/pinniped-conci
|
|||||||
COPY --from=build-env /work/out/pinniped-supervisor /usr/local/bin/pinniped-supervisor
|
COPY --from=build-env /work/out/pinniped-supervisor /usr/local/bin/pinniped-supervisor
|
||||||
COPY --from=build-env /work/out/local-user-authenticator /usr/local/bin/local-user-authenticator
|
COPY --from=build-env /work/out/local-user-authenticator /usr/local/bin/local-user-authenticator
|
||||||
|
|
||||||
# Document the port
|
# Document the ports
|
||||||
EXPOSE 443
|
EXPOSE 8080 8443
|
||||||
|
|
||||||
|
# Run as non-root for security posture
|
||||||
|
USER 1001:1001
|
||||||
|
|
||||||
# Set the entrypoint
|
# Set the entrypoint
|
||||||
ENTRYPOINT ["/usr/local/bin/pinniped-concierge"]
|
ENTRYPOINT ["/usr/local/bin/pinniped-concierge"]
|
||||||
|
@ -375,7 +375,7 @@ func run() error {
|
|||||||
klog.InfoS("controllers are ready")
|
klog.InfoS("controllers are ready")
|
||||||
|
|
||||||
//nolint: gosec // Intentionally binding to all network interfaces.
|
//nolint: gosec // Intentionally binding to all network interfaces.
|
||||||
l, err := net.Listen("tcp", ":443")
|
l, err := net.Listen("tcp", ":8443")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot create listener: %w", err)
|
return fmt.Errorf("cannot create listener: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -198,7 +198,7 @@ func run(serverInstallationNamespace string, cfg *supervisor.Config) error {
|
|||||||
)
|
)
|
||||||
|
|
||||||
//nolint: gosec // Intentionally binding to all network interfaces.
|
//nolint: gosec // Intentionally binding to all network interfaces.
|
||||||
httpListener, err := net.Listen("tcp", ":80")
|
httpListener, err := net.Listen("tcp", ":8080")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot create listener: %w", err)
|
return fmt.Errorf("cannot create listener: %w", err)
|
||||||
}
|
}
|
||||||
@ -206,12 +206,12 @@ func run(serverInstallationNamespace string, cfg *supervisor.Config) error {
|
|||||||
start(ctx, httpListener, oidProvidersManager)
|
start(ctx, httpListener, oidProvidersManager)
|
||||||
|
|
||||||
//nolint: gosec // Intentionally binding to all network interfaces.
|
//nolint: gosec // Intentionally binding to all network interfaces.
|
||||||
httpsListener, err := tls.Listen("tcp", ":443", &tls.Config{
|
httpsListener, err := tls.Listen("tcp", ":8443", &tls.Config{
|
||||||
MinVersion: tls.VersionTLS12, // Allow v1.2 because clients like the default `curl` on MacOS don't support 1.3 yet.
|
MinVersion: tls.VersionTLS12, // Allow v1.2 because clients like the default `curl` on MacOS don't support 1.3 yet.
|
||||||
GetCertificate: func(info *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
GetCertificate: func(info *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||||
cert := dynamicTLSCertProvider.GetTLSCert(strings.ToLower(info.ServerName))
|
cert := dynamicTLSCertProvider.GetTLSCert(strings.ToLower(info.ServerName))
|
||||||
defaultCert := dynamicTLSCertProvider.GetDefaultTLSCert()
|
defaultCert := dynamicTLSCertProvider.GetDefaultTLSCert()
|
||||||
klog.InfoS("GetCertificate called for port 443",
|
klog.InfoS("GetCertificate called for port 8443",
|
||||||
"info.ServerName", info.ServerName,
|
"info.ServerName", info.ServerName,
|
||||||
"foundSNICert", cert != nil,
|
"foundSNICert", cert != nil,
|
||||||
"foundDefaultCert", defaultCert != nil,
|
"foundDefaultCert", defaultCert != nil,
|
||||||
|
@ -86,6 +86,9 @@ spec:
|
|||||||
annotations:
|
annotations:
|
||||||
scheduler.alpha.kubernetes.io/critical-pod: ""
|
scheduler.alpha.kubernetes.io/critical-pod: ""
|
||||||
spec:
|
spec:
|
||||||
|
securityContext:
|
||||||
|
runAsUser: 1001
|
||||||
|
runAsGroup: 1001
|
||||||
serviceAccountName: #@ defaultResourceName()
|
serviceAccountName: #@ defaultResourceName()
|
||||||
#@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "":
|
#@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "":
|
||||||
imagePullSecrets:
|
imagePullSecrets:
|
||||||
@ -113,7 +116,7 @@ spec:
|
|||||||
livenessProbe:
|
livenessProbe:
|
||||||
httpGet:
|
httpGet:
|
||||||
path: /healthz
|
path: /healthz
|
||||||
port: 443
|
port: 8443
|
||||||
scheme: HTTPS
|
scheme: HTTPS
|
||||||
initialDelaySeconds: 2
|
initialDelaySeconds: 2
|
||||||
timeoutSeconds: 15
|
timeoutSeconds: 15
|
||||||
@ -122,7 +125,7 @@ spec:
|
|||||||
readinessProbe:
|
readinessProbe:
|
||||||
httpGet:
|
httpGet:
|
||||||
path: /healthz
|
path: /healthz
|
||||||
port: 443
|
port: 8443
|
||||||
scheme: HTTPS
|
scheme: HTTPS
|
||||||
initialDelaySeconds: 2
|
initialDelaySeconds: 2
|
||||||
timeoutSeconds: 3
|
timeoutSeconds: 3
|
||||||
@ -173,7 +176,7 @@ spec:
|
|||||||
ports:
|
ports:
|
||||||
- protocol: TCP
|
- protocol: TCP
|
||||||
port: 443
|
port: 443
|
||||||
targetPort: 443
|
targetPort: 8443
|
||||||
---
|
---
|
||||||
apiVersion: apiregistration.k8s.io/v1
|
apiVersion: apiregistration.k8s.io/v1
|
||||||
kind: APIService
|
kind: APIService
|
||||||
|
@ -47,6 +47,9 @@ spec:
|
|||||||
labels:
|
labels:
|
||||||
app: local-user-authenticator
|
app: local-user-authenticator
|
||||||
spec:
|
spec:
|
||||||
|
securityContext:
|
||||||
|
runAsUser: 1001
|
||||||
|
runAsGroup: 1001
|
||||||
serviceAccountName: local-user-authenticator
|
serviceAccountName: local-user-authenticator
|
||||||
#@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "":
|
#@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "":
|
||||||
imagePullSecrets:
|
imagePullSecrets:
|
||||||
@ -77,4 +80,4 @@ spec:
|
|||||||
ports:
|
ports:
|
||||||
- protocol: TCP
|
- protocol: TCP
|
||||||
port: 443
|
port: 443
|
||||||
targetPort: 443
|
targetPort: 8443
|
||||||
|
@ -47,7 +47,7 @@ The most common ways are:
|
|||||||
|
|
||||||
1. Define an [`Ingress` resource](https://kubernetes.io/docs/concepts/services-networking/ingress/) with TLS certificates.
|
1. Define an [`Ingress` resource](https://kubernetes.io/docs/concepts/services-networking/ingress/) with TLS certificates.
|
||||||
In this case, the ingress will terminate TLS. Typically, the ingress will then talk plain HTTP to its backend,
|
In this case, the ingress will terminate TLS. Typically, the ingress will then talk plain HTTP to its backend,
|
||||||
which would be a NodePort or LoadBalancer Service in front of the HTTP port 80 of the Supervisor pods.
|
which would be a NodePort or LoadBalancer Service in front of the HTTP port 8080 of the Supervisor pods.
|
||||||
|
|
||||||
The required configuration of the Ingress is specific to your cluster's Ingress Controller, so please refer to the
|
The required configuration of the Ingress is specific to your cluster's Ingress Controller, so please refer to the
|
||||||
documentation from your Kubernetes provider. If you are using a cluster from a cloud provider, then you'll probably
|
documentation from your Kubernetes provider. If you are using a cluster from a cloud provider, then you'll probably
|
||||||
@ -60,10 +60,10 @@ The most common ways are:
|
|||||||
1. Or, define a [TCP LoadBalancer Service](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer)
|
1. Or, define a [TCP LoadBalancer Service](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer)
|
||||||
which is a layer 4 load balancer and does not terminate TLS. In this case, the Supervisor app will need to be
|
which is a layer 4 load balancer and does not terminate TLS. In this case, the Supervisor app will need to be
|
||||||
configured with TLS certificates and will terminate the TLS connection itself (see the section about
|
configured with TLS certificates and will terminate the TLS connection itself (see the section about
|
||||||
OIDCProviderConfig below). The LoadBalancer Service should be configured to use the HTTPS port 443 of
|
OIDCProviderConfig below). The LoadBalancer Service should be configured to use the HTTPS port 8443 of
|
||||||
the Supervisor pods as its `targetPort`.
|
the Supervisor pods as its `targetPort`.
|
||||||
|
|
||||||
*Warning:* Do not expose the Supervisor's port 80 to the public. It would not be secure for the OIDC protocol
|
*Warning:* Do not expose the Supervisor's port 8080 to the public. It would not be secure for the OIDC protocol
|
||||||
to use HTTP, because the user's secret OIDC tokens would be transmitted across the network without encryption.
|
to use HTTP, because the user's secret OIDC tokens would be transmitted across the network without encryption.
|
||||||
|
|
||||||
1. Or, expose the Supervisor app using a Kubernetes service mesh technology, e.g. [Istio](https://istio.io/).
|
1. Or, expose the Supervisor app using a Kubernetes service mesh technology, e.g. [Istio](https://istio.io/).
|
||||||
@ -79,7 +79,7 @@ so if you choose to use an Ingress then you'll need to create that separately af
|
|||||||
|
|
||||||
#### Example: Using a LoadBalancer Service
|
#### Example: Using a LoadBalancer Service
|
||||||
|
|
||||||
This is an example of creating a LoadBalancer Service to expose port 443 of the Supervisor app outside the cluster.
|
This is an example of creating a LoadBalancer Service to expose port 8443 of the Supervisor app outside the cluster.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
@ -96,7 +96,7 @@ spec:
|
|||||||
ports:
|
ports:
|
||||||
- protocol: TCP
|
- protocol: TCP
|
||||||
port: 443
|
port: 443
|
||||||
targetPort: 443
|
targetPort: 8443
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Example: Using a NodePort Service
|
#### Example: Using a NodePort Service
|
||||||
@ -127,7 +127,7 @@ spec:
|
|||||||
ports:
|
ports:
|
||||||
- protocol: TCP
|
- protocol: TCP
|
||||||
port: 80
|
port: 80
|
||||||
targetPort: 80
|
targetPort: 8080
|
||||||
nodePort: 31234 # This is the port that you would forward to the kind host. Or omit this key for a random port.
|
nodePort: 31234 # This is the port that you would forward to the kind host. Or omit this key for a random port.
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -60,6 +60,9 @@ spec:
|
|||||||
metadata:
|
metadata:
|
||||||
labels: #@ defaultLabel()
|
labels: #@ defaultLabel()
|
||||||
spec:
|
spec:
|
||||||
|
securityContext:
|
||||||
|
runAsUser: 1001
|
||||||
|
runAsGroup: 1001
|
||||||
serviceAccountName: #@ defaultResourceName()
|
serviceAccountName: #@ defaultResourceName()
|
||||||
#@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "":
|
#@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "":
|
||||||
imagePullSecrets:
|
imagePullSecrets:
|
||||||
@ -87,14 +90,14 @@ spec:
|
|||||||
- name: podinfo
|
- name: podinfo
|
||||||
mountPath: /etc/podinfo
|
mountPath: /etc/podinfo
|
||||||
ports:
|
ports:
|
||||||
- containerPort: 80
|
- containerPort: 8080
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
- containerPort: 443
|
- containerPort: 8443
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
livenessProbe:
|
livenessProbe:
|
||||||
httpGet:
|
httpGet:
|
||||||
path: /healthz
|
path: /healthz
|
||||||
port: 80
|
port: 8080
|
||||||
scheme: HTTP
|
scheme: HTTP
|
||||||
initialDelaySeconds: 2
|
initialDelaySeconds: 2
|
||||||
timeoutSeconds: 15
|
timeoutSeconds: 15
|
||||||
@ -103,7 +106,7 @@ spec:
|
|||||||
readinessProbe:
|
readinessProbe:
|
||||||
httpGet:
|
httpGet:
|
||||||
path: /healthz
|
path: /healthz
|
||||||
port: 80
|
port: 8080
|
||||||
scheme: HTTP
|
scheme: HTTP
|
||||||
initialDelaySeconds: 2
|
initialDelaySeconds: 2
|
||||||
timeoutSeconds: 3
|
timeoutSeconds: 3
|
||||||
|
@ -21,7 +21,7 @@ spec:
|
|||||||
- name: http
|
- name: http
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
port: #@ data.values.service_http_nodeport_port
|
port: #@ data.values.service_http_nodeport_port
|
||||||
targetPort: 80
|
targetPort: 8080
|
||||||
#@ if data.values.service_http_nodeport_nodeport:
|
#@ if data.values.service_http_nodeport_nodeport:
|
||||||
nodePort: #@ data.values.service_http_nodeport_nodeport
|
nodePort: #@ data.values.service_http_nodeport_nodeport
|
||||||
#@ end
|
#@ end
|
||||||
@ -30,7 +30,7 @@ spec:
|
|||||||
- name: https
|
- name: https
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
port: #@ data.values.service_https_nodeport_port
|
port: #@ data.values.service_https_nodeport_port
|
||||||
targetPort: 443
|
targetPort: 8443
|
||||||
#@ if data.values.service_https_nodeport_nodeport:
|
#@ if data.values.service_https_nodeport_nodeport:
|
||||||
nodePort: #@ data.values.service_https_nodeport_nodeport
|
nodePort: #@ data.values.service_https_nodeport_nodeport
|
||||||
#@ end
|
#@ end
|
||||||
@ -53,13 +53,13 @@ spec:
|
|||||||
- name: http
|
- name: http
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
port: #@ data.values.service_http_clusterip_port
|
port: #@ data.values.service_http_clusterip_port
|
||||||
targetPort: 80
|
targetPort: 8080
|
||||||
#@ end
|
#@ end
|
||||||
#@ if data.values.service_https_clusterip_port:
|
#@ if data.values.service_https_clusterip_port:
|
||||||
- name: https
|
- name: https
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
port: #@ data.values.service_https_clusterip_port
|
port: #@ data.values.service_https_clusterip_port
|
||||||
targetPort: 443
|
targetPort: 8443
|
||||||
#@ end
|
#@ end
|
||||||
#@ end
|
#@ end
|
||||||
|
|
||||||
@ -82,12 +82,12 @@ spec:
|
|||||||
- name: http
|
- name: http
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
port: #@ data.values.service_http_loadbalancer_port
|
port: #@ data.values.service_http_loadbalancer_port
|
||||||
targetPort: 80
|
targetPort: 8080
|
||||||
#@ end
|
#@ end
|
||||||
#@ if data.values.service_https_loadbalancer_port:
|
#@ if data.values.service_https_loadbalancer_port:
|
||||||
- name: https
|
- name: https
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
port: #@ data.values.service_https_loadbalancer_port
|
port: #@ data.values.service_https_loadbalancer_port
|
||||||
targetPort: 443
|
targetPort: 8443
|
||||||
#@ end
|
#@ end
|
||||||
#@ end
|
#@ end
|
||||||
|
@ -40,14 +40,14 @@ image_pull_dockerconfigjson: #! e.g. {"auths":{"https://registry.example.com":{"
|
|||||||
#! An HTTP service should not be exposed outside the cluster. It would not be secure to serve OIDC endpoints to end users via HTTP.
|
#! An HTTP service should not be exposed outside the cluster. It would not be secure to serve OIDC endpoints to end users via HTTP.
|
||||||
#! Setting any of these values means that a Service of that type will be created.
|
#! Setting any of these values means that a Service of that type will be created.
|
||||||
#! Note that all port numbers should be numbers (not strings), i.e. use ytt's `--data-value-yaml` instead of `--data-value`.
|
#! Note that all port numbers should be numbers (not strings), i.e. use ytt's `--data-value-yaml` instead of `--data-value`.
|
||||||
service_http_nodeport_port: #! when specified, creates a NodePort Service with this `port` value, with port 80 as its `targetPort`; e.g. 31234
|
service_http_nodeport_port: #! when specified, creates a NodePort Service with this `port` value, with port 8080 as its `targetPort`; e.g. 31234
|
||||||
service_http_nodeport_nodeport: #! the `nodePort` value of the NodePort Service, optional when `service_http_nodeport_port` is specified; e.g. 31234
|
service_http_nodeport_nodeport: #! the `nodePort` value of the NodePort Service, optional when `service_http_nodeport_port` is specified; e.g. 31234
|
||||||
service_http_loadbalancer_port: #! when specified, creates a LoadBalancer Service with this `port` value, with port 80 as its `targetPort`; e.g. 443
|
service_http_loadbalancer_port: #! when specified, creates a LoadBalancer Service with this `port` value, with port 8080 as its `targetPort`; e.g. 8443
|
||||||
service_http_clusterip_port: #! when specified, creates a ClusterIP Service with this `port` value, with port 80 as its `targetPort`; e.g. 443
|
service_http_clusterip_port: #! when specified, creates a ClusterIP Service with this `port` value, with port 8080 as its `targetPort`; e.g. 8443
|
||||||
service_https_nodeport_port: #! when specified, creates a NodePort Service with this `port` value, with port 443 as its `targetPort`; e.g. 31243
|
service_https_nodeport_port: #! when specified, creates a NodePort Service with this `port` value, with port 8443 as its `targetPort`; e.g. 31243
|
||||||
service_https_nodeport_nodeport: #! the `nodePort` value of the NodePort Service, optional when `service_http_nodeport_port` is specified; e.g. 31243
|
service_https_nodeport_nodeport: #! the `nodePort` value of the NodePort Service, optional when `service_http_nodeport_port` is specified; e.g. 31243
|
||||||
service_https_loadbalancer_port: #! when specified, creates a LoadBalancer Service with this `port` value, with port 443 as its `targetPort`; e.g. 443
|
service_https_loadbalancer_port: #! when specified, creates a LoadBalancer Service with this `port` value, with port 8443 as its `targetPort`; e.g. 8443
|
||||||
service_https_clusterip_port: #! when specified, creates a ClusterIP Service with this `port` value, with port 443 as its `targetPort`; e.g. 443
|
service_https_clusterip_port: #! when specified, creates a ClusterIP Service with this `port` value, with port 8443 as its `targetPort`; e.g. 8443
|
||||||
#! The `loadBalancerIP` value of the LoadBalancer Service.
|
#! The `loadBalancerIP` value of the LoadBalancer Service.
|
||||||
#! Ignored unless service_http_loadbalancer_port and/or service_https_loadbalancer_port are provided.
|
#! Ignored unless service_http_loadbalancer_port and/or service_https_loadbalancer_port are provided.
|
||||||
#! Optional.
|
#! Optional.
|
||||||
|
@ -8,7 +8,10 @@ FROM debian:10.5-slim
|
|||||||
COPY build/pinniped-concierge /usr/local/bin/pinniped-concierge
|
COPY build/pinniped-concierge /usr/local/bin/pinniped-concierge
|
||||||
|
|
||||||
# Document the port
|
# Document the port
|
||||||
EXPOSE 443
|
EXPOSE 8443
|
||||||
|
|
||||||
|
# Run as non-root for security posture
|
||||||
|
USER 1001:1001
|
||||||
|
|
||||||
# Set the entrypoint
|
# Set the entrypoint
|
||||||
ENTRYPOINT ["/usr/local/bin/pinniped-concierge"]
|
ENTRYPOINT ["/usr/local/bin/pinniped-concierge"]
|
||||||
|
@ -8,7 +8,10 @@ FROM debian:10.5-slim
|
|||||||
COPY build/local-user-authenticator /usr/local/bin/local-user-authenticator
|
COPY build/local-user-authenticator /usr/local/bin/local-user-authenticator
|
||||||
|
|
||||||
# Document the port
|
# Document the port
|
||||||
EXPOSE 443
|
EXPOSE 8443
|
||||||
|
|
||||||
|
# Run as non-root for security posture
|
||||||
|
USER 1001:1001
|
||||||
|
|
||||||
# Set the entrypoint
|
# Set the entrypoint
|
||||||
ENTRYPOINT ["/usr/local/bin/local-user-authenticator"]
|
ENTRYPOINT ["/usr/local/bin/local-user-authenticator"]
|
||||||
|
@ -8,7 +8,10 @@ FROM debian:10.5-slim
|
|||||||
COPY build/pinniped-supervisor /usr/local/bin/pinniped-supervisor
|
COPY build/pinniped-supervisor /usr/local/bin/pinniped-supervisor
|
||||||
|
|
||||||
# Document the port
|
# Document the port
|
||||||
EXPOSE 443
|
EXPOSE 8080 8443
|
||||||
|
|
||||||
|
# Run as non-root for security posture
|
||||||
|
USER 1001:1001
|
||||||
|
|
||||||
# Set the entrypoint
|
# Set the entrypoint
|
||||||
ENTRYPOINT ["/usr/local/bin/pinniped-supervisor"]
|
ENTRYPOINT ["/usr/local/bin/pinniped-supervisor"]
|
||||||
|
@ -173,6 +173,7 @@ func getAggregatedAPIServerConfig(
|
|||||||
)
|
)
|
||||||
recommendedOptions.Etcd = nil // turn off etcd storage because we don't need it yet
|
recommendedOptions.Etcd = nil // turn off etcd storage because we don't need it yet
|
||||||
recommendedOptions.SecureServing.ServerCert.GeneratedCert = dynamicCertProvider
|
recommendedOptions.SecureServing.ServerCert.GeneratedCert = dynamicCertProvider
|
||||||
|
recommendedOptions.SecureServing.BindPort = 8443 // Don't run on default 443 because that requires root
|
||||||
|
|
||||||
serverConfig := genericapiserver.NewRecommendedConfig(apiserver.Codecs)
|
serverConfig := genericapiserver.NewRecommendedConfig(apiserver.Codecs)
|
||||||
// Note that among other things, this ApplyTo() function copies
|
// Note that among other things, this ApplyTo() function copies
|
||||||
|
@ -116,7 +116,7 @@ func (c *createrController) Sync(ctx controllerlib.Context) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if agentPod == nil {
|
if agentPod == nil {
|
||||||
agentPod = newAgentPod(controllerManagerPod, c.agentPodConfig.PodTemplate())
|
agentPod = c.agentPodConfig.newAgentPod(controllerManagerPod)
|
||||||
|
|
||||||
klog.InfoS(
|
klog.InfoS(
|
||||||
"creating agent pod",
|
"creating agent pod",
|
||||||
|
@ -70,7 +70,7 @@ func (c *deleterController) Sync(ctx controllerlib.Context) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if controllerManagerPod == nil ||
|
if controllerManagerPod == nil ||
|
||||||
!isAgentPodUpToDate(agentPod, newAgentPod(controllerManagerPod, c.agentPodConfig.PodTemplate())) {
|
!isAgentPodUpToDate(agentPod, c.agentPodConfig.newAgentPod(controllerManagerPod)) {
|
||||||
klog.InfoS("deleting agent pod", "pod", klog.KObj(agentPod))
|
klog.InfoS("deleting agent pod", "pod", klog.KObj(agentPod))
|
||||||
err := c.k8sClient.
|
err := c.k8sClient.
|
||||||
CoreV1().
|
CoreV1().
|
||||||
|
@ -264,7 +264,8 @@ func TestDeleterControllerSync(t *testing.T) {
|
|||||||
when("the agent pod is out of sync via automount service account token", func() {
|
when("the agent pod is out of sync via automount service account token", func() {
|
||||||
it.Before(func() {
|
it.Before(func() {
|
||||||
updatedAgentPod := agentPod.DeepCopy()
|
updatedAgentPod := agentPod.DeepCopy()
|
||||||
updatedAgentPod.Spec.AutomountServiceAccountToken = boolPtr(true)
|
t := true
|
||||||
|
updatedAgentPod.Spec.AutomountServiceAccountToken = &t
|
||||||
r.NoError(agentInformerClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
r.NoError(agentInformerClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
||||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
||||||
})
|
})
|
||||||
@ -312,6 +313,59 @@ func TestDeleterControllerSync(t *testing.T) {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
when("the agent pod is out of sync with the template via runAsUser", func() {
|
||||||
|
it.Before(func() {
|
||||||
|
updatedAgentPod := agentPod.DeepCopy()
|
||||||
|
notRoot := int64(1234)
|
||||||
|
updatedAgentPod.Spec.SecurityContext.RunAsUser = ¬Root
|
||||||
|
r.NoError(agentInformerClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
||||||
|
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
||||||
|
})
|
||||||
|
|
||||||
|
it("deletes the agent pod", func() {
|
||||||
|
startInformersAndController()
|
||||||
|
err := controllerlib.TestSync(t, subject, *syncContext)
|
||||||
|
|
||||||
|
r.NoError(err)
|
||||||
|
requireAgentPodWasDeleted()
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("the agent pod is out of sync with the template via runAsGroup", func() {
|
||||||
|
it.Before(func() {
|
||||||
|
updatedAgentPod := agentPod.DeepCopy()
|
||||||
|
notRoot := int64(1234)
|
||||||
|
updatedAgentPod.Spec.SecurityContext.RunAsGroup = ¬Root
|
||||||
|
r.NoError(agentInformerClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
||||||
|
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
||||||
|
})
|
||||||
|
|
||||||
|
it("deletes the agent pod", func() {
|
||||||
|
startInformersAndController()
|
||||||
|
err := controllerlib.TestSync(t, subject, *syncContext)
|
||||||
|
|
||||||
|
r.NoError(err)
|
||||||
|
requireAgentPodWasDeleted()
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("the agent pod is out of sync with the template via having a nil SecurityContext", func() {
|
||||||
|
it.Before(func() {
|
||||||
|
updatedAgentPod := agentPod.DeepCopy()
|
||||||
|
updatedAgentPod.Spec.SecurityContext = nil
|
||||||
|
r.NoError(agentInformerClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
||||||
|
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
||||||
|
})
|
||||||
|
|
||||||
|
it("deletes the agent pod", func() {
|
||||||
|
startInformersAndController()
|
||||||
|
err := controllerlib.TestSync(t, subject, *syncContext)
|
||||||
|
|
||||||
|
r.NoError(err)
|
||||||
|
requireAgentPodWasDeleted()
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
when("the agent pod is out of sync with the template via labels", func() {
|
when("the agent pod is out of sync with the template via labels", func() {
|
||||||
when("an additional label's value was changed", func() {
|
when("an additional label's value was changed", func() {
|
||||||
it.Before(func() {
|
it.Before(func() {
|
||||||
|
@ -95,8 +95,11 @@ func (c *AgentPodConfig) AgentSelector() labels.Selector {
|
|||||||
return labels.SelectorFromSet(map[string]string{agentPodLabelKey: agentPodLabelValue})
|
return labels.SelectorFromSet(map[string]string{agentPodLabelKey: agentPodLabelValue})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *AgentPodConfig) PodTemplate() *corev1.Pod {
|
func (c *AgentPodConfig) newAgentPod(controllerManagerPod *corev1.Pod) *corev1.Pod {
|
||||||
terminateImmediately := int64(0)
|
terminateImmediately := int64(0)
|
||||||
|
rootID := int64(0)
|
||||||
|
f := false
|
||||||
|
falsePtr := &f
|
||||||
|
|
||||||
imagePullSecrets := []corev1.LocalObjectReference{}
|
imagePullSecrets := []corev1.LocalObjectReference{}
|
||||||
for _, imagePullSecret := range c.ContainerImagePullSecrets {
|
for _, imagePullSecret := range c.ContainerImagePullSecrets {
|
||||||
@ -108,11 +111,15 @@ func (c *AgentPodConfig) PodTemplate() *corev1.Pod {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
pod := &corev1.Pod{
|
return &corev1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: c.PodNamePrefix,
|
Name: fmt.Sprintf("%s%s", c.PodNamePrefix, hash(controllerManagerPod)),
|
||||||
Namespace: c.Namespace,
|
Namespace: c.Namespace,
|
||||||
Labels: c.Labels(),
|
Labels: c.Labels(),
|
||||||
|
Annotations: map[string]string{
|
||||||
|
controllerManagerNameAnnotationKey: controllerManagerPod.Name,
|
||||||
|
controllerManagerUIDAnnotationKey: string(controllerManagerPod.UID),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Spec: corev1.PodSpec{
|
Spec: corev1.PodSpec{
|
||||||
TerminationGracePeriodSeconds: &terminateImmediately,
|
TerminationGracePeriodSeconds: &terminateImmediately,
|
||||||
@ -123,6 +130,7 @@ func (c *AgentPodConfig) PodTemplate() *corev1.Pod {
|
|||||||
Image: c.ContainerImage,
|
Image: c.ContainerImage,
|
||||||
ImagePullPolicy: corev1.PullIfNotPresent,
|
ImagePullPolicy: corev1.PullIfNotPresent,
|
||||||
Command: []string{"/bin/sleep", "infinity"},
|
Command: []string{"/bin/sleep", "infinity"},
|
||||||
|
VolumeMounts: controllerManagerPod.Spec.Containers[0].VolumeMounts,
|
||||||
Resources: corev1.ResourceRequirements{
|
Resources: corev1.ResourceRequirements{
|
||||||
Limits: corev1.ResourceList{
|
Limits: corev1.ResourceList{
|
||||||
corev1.ResourceMemory: resource.MustParse("16Mi"),
|
corev1.ResourceMemory: resource.MustParse("16Mi"),
|
||||||
@ -135,37 +143,20 @@ func (c *AgentPodConfig) PodTemplate() *corev1.Pod {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
Volumes: controllerManagerPod.Spec.Volumes,
|
||||||
|
RestartPolicy: corev1.RestartPolicyNever,
|
||||||
|
NodeSelector: controllerManagerPod.Spec.NodeSelector,
|
||||||
|
AutomountServiceAccountToken: falsePtr,
|
||||||
|
NodeName: controllerManagerPod.Spec.NodeName,
|
||||||
|
Tolerations: controllerManagerPod.Spec.Tolerations,
|
||||||
|
// We need to run the agent pod as root since the file permissions
|
||||||
|
// on the cluster keypair usually restricts access to only root.
|
||||||
|
SecurityContext: &corev1.PodSecurityContext{
|
||||||
|
RunAsUser: &rootID,
|
||||||
|
RunAsGroup: &rootID,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
return pod
|
|
||||||
}
|
|
||||||
|
|
||||||
func newAgentPod(
|
|
||||||
controllerManagerPod *corev1.Pod,
|
|
||||||
template *corev1.Pod,
|
|
||||||
) *corev1.Pod {
|
|
||||||
agentPod := template.DeepCopy()
|
|
||||||
|
|
||||||
agentPod.Name = fmt.Sprintf("%s%s", agentPod.Name, hash(controllerManagerPod))
|
|
||||||
|
|
||||||
// It would be nice to use the OwnerReferences field here, but the agent pod is most likely in a
|
|
||||||
// different namespace than the kube-controller-manager pod, and therefore that breaks the
|
|
||||||
// OwnerReferences contract (see metav1.OwnerReference doc).
|
|
||||||
if agentPod.Annotations == nil {
|
|
||||||
agentPod.Annotations = make(map[string]string)
|
|
||||||
}
|
|
||||||
agentPod.Annotations[controllerManagerNameAnnotationKey] = controllerManagerPod.Name
|
|
||||||
agentPod.Annotations[controllerManagerUIDAnnotationKey] = string(controllerManagerPod.UID)
|
|
||||||
|
|
||||||
agentPod.Spec.Containers[0].VolumeMounts = controllerManagerPod.Spec.Containers[0].VolumeMounts
|
|
||||||
agentPod.Spec.Volumes = controllerManagerPod.Spec.Volumes
|
|
||||||
agentPod.Spec.RestartPolicy = corev1.RestartPolicyNever
|
|
||||||
agentPod.Spec.NodeSelector = controllerManagerPod.Spec.NodeSelector
|
|
||||||
agentPod.Spec.AutomountServiceAccountToken = boolPtr(false)
|
|
||||||
agentPod.Spec.NodeName = controllerManagerPod.Spec.NodeName
|
|
||||||
agentPod.Spec.Tolerations = controllerManagerPod.Spec.Tolerations
|
|
||||||
|
|
||||||
return agentPod
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func isAgentPodUpToDate(actualAgentPod, expectedAgentPod *corev1.Pod) bool {
|
func isAgentPodUpToDate(actualAgentPod, expectedAgentPod *corev1.Pod) bool {
|
||||||
@ -177,6 +168,11 @@ func isAgentPodUpToDate(actualAgentPod, expectedAgentPod *corev1.Pod) bool {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if actualAgentPod.Spec.SecurityContext == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
return requiredLabelsAllPresentWithCorrectValues &&
|
return requiredLabelsAllPresentWithCorrectValues &&
|
||||||
equality.Semantic.DeepEqual(
|
equality.Semantic.DeepEqual(
|
||||||
actualAgentPod.Spec.Containers[0].VolumeMounts,
|
actualAgentPod.Spec.Containers[0].VolumeMounts,
|
||||||
@ -217,6 +213,14 @@ func isAgentPodUpToDate(actualAgentPod, expectedAgentPod *corev1.Pod) bool {
|
|||||||
equality.Semantic.DeepEqual(
|
equality.Semantic.DeepEqual(
|
||||||
actualAgentPod.Spec.Tolerations,
|
actualAgentPod.Spec.Tolerations,
|
||||||
expectedAgentPod.Spec.Tolerations,
|
expectedAgentPod.Spec.Tolerations,
|
||||||
|
) &&
|
||||||
|
equality.Semantic.DeepEqual(
|
||||||
|
actualAgentPod.Spec.SecurityContext.RunAsUser,
|
||||||
|
expectedAgentPod.Spec.SecurityContext.RunAsUser,
|
||||||
|
) &&
|
||||||
|
equality.Semantic.DeepEqual(
|
||||||
|
actualAgentPod.Spec.SecurityContext.RunAsGroup,
|
||||||
|
expectedAgentPod.Spec.SecurityContext.RunAsGroup,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -326,8 +330,6 @@ func strategyError(clock clock.Clock, err error) configv1alpha1.CredentialIssuer
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func boolPtr(b bool) *bool { return &b }
|
|
||||||
|
|
||||||
func hash(controllerManagerPod *corev1.Pod) string {
|
func hash(controllerManagerPod *corev1.Pod) string {
|
||||||
// FNV should be faster than SHA, and we don't care about hash-reversibility here, and Kubernetes
|
// FNV should be faster than SHA, and we don't care about hash-reversibility here, and Kubernetes
|
||||||
// uses FNV for their pod templates, so should be good enough for us?
|
// uses FNV for their pod templates, so should be good enough for us?
|
||||||
|
@ -70,6 +70,7 @@ func exampleControllerManagerAndAgentPods(
|
|||||||
}
|
}
|
||||||
|
|
||||||
zero := int64(0)
|
zero := int64(0)
|
||||||
|
f := false
|
||||||
|
|
||||||
// fnv 32a hash of controller-manager uid
|
// fnv 32a hash of controller-manager uid
|
||||||
controllerManagerPodHash := "fbb0addd"
|
controllerManagerPodHash := "fbb0addd"
|
||||||
@ -114,10 +115,11 @@ func exampleControllerManagerAndAgentPods(
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
RestartPolicy: corev1.RestartPolicyNever,
|
RestartPolicy: corev1.RestartPolicyNever,
|
||||||
AutomountServiceAccountToken: boolPtr(false),
|
AutomountServiceAccountToken: &f,
|
||||||
NodeName: controllerManagerPod.Spec.NodeName,
|
NodeName: controllerManagerPod.Spec.NodeName,
|
||||||
NodeSelector: controllerManagerPod.Spec.NodeSelector,
|
NodeSelector: controllerManagerPod.Spec.NodeSelector,
|
||||||
Tolerations: controllerManagerPod.Spec.Tolerations,
|
Tolerations: controllerManagerPod.Spec.Tolerations,
|
||||||
|
SecurityContext: &corev1.PodSecurityContext{RunAsUser: &zero, RunAsGroup: &zero},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -32,13 +32,13 @@ import (
|
|||||||
"go.pinniped.dev/test/library"
|
"go.pinniped.dev/test/library"
|
||||||
)
|
)
|
||||||
|
|
||||||
// This test is intended to exercise the supervisor's HTTP port 80. It can either access it directly via
|
// This test is intended to exercise the supervisor's HTTP port 8080. It can either access it directly via
|
||||||
// the env.SupervisorHTTPAddress setting, or it can access it indirectly through a TLS-enabled Ingress which
|
// the env.SupervisorHTTPAddress setting, or it can access it indirectly through a TLS-enabled Ingress which
|
||||||
// uses the supervisor's port 80 as its backend via the env.SupervisorHTTPSIngressAddress and
|
// uses the supervisor's port 8080 as its backend via the env.SupervisorHTTPSIngressAddress and
|
||||||
// env.SupervisorHTTPSIngressCABundle settings, or it can exercise it both ways when all of those
|
// env.SupervisorHTTPSIngressCABundle settings, or it can exercise it both ways when all of those
|
||||||
// env settings are present.
|
// env settings are present.
|
||||||
//
|
//
|
||||||
// Testing talking to the supervisor's port 443 where the supervisor is terminating TLS itself is
|
// Testing talking to the supervisor's port 8443 where the supervisor is terminating TLS itself is
|
||||||
// handled by the others tests in this file.
|
// handled by the others tests in this file.
|
||||||
func TestSupervisorOIDCDiscovery(t *testing.T) {
|
func TestSupervisorOIDCDiscovery(t *testing.T) {
|
||||||
env := library.IntegrationEnv(t)
|
env := library.IntegrationEnv(t)
|
||||||
@ -155,7 +155,7 @@ func TestSupervisorTLSTerminationWithSNI(t *testing.T) {
|
|||||||
temporarilyRemoveAllOIDCProviderConfigsAndDefaultTLSCertSecret(ctx, t, ns, defaultTLSCertSecretName(env), pinnipedClient, kubeClient)
|
temporarilyRemoveAllOIDCProviderConfigsAndDefaultTLSCertSecret(ctx, t, ns, defaultTLSCertSecretName(env), pinnipedClient, kubeClient)
|
||||||
|
|
||||||
scheme := "https"
|
scheme := "https"
|
||||||
address := env.SupervisorHTTPSAddress // hostname and port for direct access to the supervisor's port 443
|
address := env.SupervisorHTTPSAddress // hostname and port for direct access to the supervisor's port 8443
|
||||||
|
|
||||||
hostname1 := strings.Split(address, ":")[0]
|
hostname1 := strings.Split(address, ":")[0]
|
||||||
issuer1 := fmt.Sprintf("%s://%s/issuer1", scheme, address)
|
issuer1 := fmt.Sprintf("%s://%s/issuer1", scheme, address)
|
||||||
@ -222,12 +222,12 @@ func TestSupervisorTLSTerminationWithDefaultCerts(t *testing.T) {
|
|||||||
temporarilyRemoveAllOIDCProviderConfigsAndDefaultTLSCertSecret(ctx, t, ns, defaultTLSCertSecretName(env), pinnipedClient, kubeClient)
|
temporarilyRemoveAllOIDCProviderConfigsAndDefaultTLSCertSecret(ctx, t, ns, defaultTLSCertSecretName(env), pinnipedClient, kubeClient)
|
||||||
|
|
||||||
scheme := "https"
|
scheme := "https"
|
||||||
address := env.SupervisorHTTPSAddress // hostname and port for direct access to the supervisor's port 443
|
address := env.SupervisorHTTPSAddress // hostname and port for direct access to the supervisor's port 8443
|
||||||
|
|
||||||
hostAndPortSegments := strings.Split(address, ":")
|
hostAndPortSegments := strings.Split(address, ":")
|
||||||
// hostnames are case-insensitive, so test mis-matching the case of the issuer URL and the request URL
|
// hostnames are case-insensitive, so test mis-matching the case of the issuer URL and the request URL
|
||||||
hostname := strings.ToLower(hostAndPortSegments[0])
|
hostname := strings.ToLower(hostAndPortSegments[0])
|
||||||
port := "443"
|
port := "8443"
|
||||||
if len(hostAndPortSegments) > 1 {
|
if len(hostAndPortSegments) > 1 {
|
||||||
port = hostAndPortSegments[1]
|
port = hostAndPortSegments[1]
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user