Merge pull request #738 from mattmoyer/distroless
Switch to a slimmer distroless base image
This commit is contained in:
commit
01ddc7ac36
30
Dockerfile
30
Dockerfile
@ -16,26 +16,18 @@ RUN \
|
|||||||
--mount=type=cache,target=/cache/gocache \
|
--mount=type=cache,target=/cache/gocache \
|
||||||
--mount=type=cache,target=/cache/gomodcache \
|
--mount=type=cache,target=/cache/gomodcache \
|
||||||
mkdir out && \
|
mkdir out && \
|
||||||
GOCACHE=/cache/gocache \
|
export GOCACHE=/cache/gocache GOMODCACHE=/cache/gomodcache CGO_ENABLED=0 GOOS=linux GOARCH=amd64 && \
|
||||||
GOMODCACHE=/cache/gomodcache \
|
go build -v -ldflags "$(hack/get-ldflags.sh) -w -s" -o /usr/local/bin/pinniped-concierge-kube-cert-agent ./cmd/pinniped-concierge-kube-cert-agent/main.go && \
|
||||||
CGO_ENABLED=0 \
|
go build -v -ldflags "$(hack/get-ldflags.sh) -w -s" -o /usr/local/bin/pinniped-server ./cmd/pinniped-server/main.go && \
|
||||||
GOOS=linux \
|
ln -s /usr/local/bin/pinniped-server /usr/local/bin/pinniped-concierge && \
|
||||||
GOARCH=amd64 \
|
ln -s /usr/local/bin/pinniped-server /usr/local/bin/pinniped-supervisor && \
|
||||||
go build -v -ldflags "$(hack/get-ldflags.sh)" -o out \
|
ln -s /usr/local/bin/pinniped-server /usr/local/bin/local-user-authenticator
|
||||||
./cmd/pinniped-concierge/... \
|
|
||||||
./cmd/pinniped-supervisor/... \
|
|
||||||
./cmd/local-user-authenticator/...
|
|
||||||
|
|
||||||
# Use a Debian slim image to grab a reasonable default CA bundle.
|
# Use a distroless runtime image with CA certificates, timezone data, and not much else.
|
||||||
FROM debian:10.10-slim AS get-ca-bundle-env
|
FROM gcr.io/distroless/static:nonroot@sha256:c9f9b040044cc23e1088772814532d90adadfa1b86dcba17d07cb567db18dc4e
|
||||||
RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates && rm -rf /var/lib/apt/lists/* /var/cache/debconf/*
|
|
||||||
|
|
||||||
# Use a runtime image based on Debian slim.
|
# Copy the server binary from the build-env stage.
|
||||||
FROM debian:10.10-slim
|
COPY --from=build-env /usr/local/bin /usr/local/bin
|
||||||
COPY --from=get-ca-bundle-env /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
|
|
||||||
|
|
||||||
# Copy the binaries from the build-env stage.
|
|
||||||
COPY --from=build-env /work/out/ /usr/local/bin/
|
|
||||||
|
|
||||||
# Document the ports
|
# Document the ports
|
||||||
EXPOSE 8080 8443
|
EXPOSE 8080 8443
|
||||||
@ -44,4 +36,4 @@ EXPOSE 8080 8443
|
|||||||
USER 1001:1001
|
USER 1001:1001
|
||||||
|
|
||||||
# Set the entrypoint
|
# Set the entrypoint
|
||||||
ENTRYPOINT ["/usr/local/bin/pinniped-concierge"]
|
ENTRYPOINT ["/usr/local/bin/pinniped-server"]
|
||||||
|
55
cmd/pinniped-concierge-kube-cert-agent/main.go
Normal file
55
cmd/pinniped-concierge-kube-cert-agent/main.go
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
// Package main is the combined entrypoint for the Pinniped "kube-cert-agent" component.
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"math"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
//nolint: gochecknoglobals // these are swapped during unit tests.
|
||||||
|
var (
|
||||||
|
getenv = os.Getenv
|
||||||
|
fail = log.Fatalf
|
||||||
|
sleep = time.Sleep
|
||||||
|
out = io.Writer(os.Stdout)
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
if len(os.Args) < 2 {
|
||||||
|
fail("missing subcommand")
|
||||||
|
}
|
||||||
|
|
||||||
|
switch os.Args[1] {
|
||||||
|
case "sleep":
|
||||||
|
sleep(math.MaxInt64)
|
||||||
|
case "print":
|
||||||
|
certBytes, err := ioutil.ReadFile(getenv("CERT_PATH"))
|
||||||
|
if err != nil {
|
||||||
|
fail("could not read CERT_PATH: %v", err)
|
||||||
|
}
|
||||||
|
keyBytes, err := ioutil.ReadFile(getenv("KEY_PATH"))
|
||||||
|
if err != nil {
|
||||||
|
fail("could not read KEY_PATH: %v", err)
|
||||||
|
}
|
||||||
|
if err := json.NewEncoder(out).Encode(&struct {
|
||||||
|
Cert string `json:"tls.crt"`
|
||||||
|
Key string `json:"tls.key"`
|
||||||
|
}{
|
||||||
|
Cert: base64.StdEncoding.EncodeToString(certBytes),
|
||||||
|
Key: base64.StdEncoding.EncodeToString(keyBytes),
|
||||||
|
}); err != nil {
|
||||||
|
fail("failed to write output: %v", err)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
fail("invalid subcommand %q", os.Args[1])
|
||||||
|
}
|
||||||
|
}
|
128
cmd/pinniped-concierge-kube-cert-agent/main_test.go
Normal file
128
cmd/pinniped-concierge-kube-cert-agent/main_test.go
Normal file
@ -0,0 +1,128 @@
|
|||||||
|
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
type errWriter struct{}
|
||||||
|
|
||||||
|
func (e errWriter) Write([]byte) (int, error) { return 0, fmt.Errorf("some write error") }
|
||||||
|
|
||||||
|
func TestEntrypoint(t *testing.T) {
|
||||||
|
for _, tt := range []struct {
|
||||||
|
name string
|
||||||
|
args []string
|
||||||
|
env map[string]string
|
||||||
|
failOutput bool
|
||||||
|
wantSleep time.Duration
|
||||||
|
wantLog string
|
||||||
|
wantOutJSON string
|
||||||
|
wantFail bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "missing args",
|
||||||
|
args: []string{},
|
||||||
|
wantLog: "missing subcommand\n",
|
||||||
|
wantFail: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid subcommand",
|
||||||
|
args: []string{"/path/to/binary", "invalid"},
|
||||||
|
wantLog: "invalid subcommand \"invalid\"\n",
|
||||||
|
wantFail: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "valid sleep",
|
||||||
|
args: []string{"/path/to/binary", "sleep"},
|
||||||
|
wantSleep: 2562047*time.Hour + 47*time.Minute + 16*time.Second + 854775807*time.Nanosecond, // math.MaxInt64 nanoseconds, approximately 290 years
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "missing cert file",
|
||||||
|
args: []string{"/path/to/binary", "print"},
|
||||||
|
env: map[string]string{
|
||||||
|
"CERT_PATH": "./does/not/exist",
|
||||||
|
"KEY_PATH": "./testdata/test.key",
|
||||||
|
},
|
||||||
|
wantFail: true,
|
||||||
|
wantLog: "could not read CERT_PATH: open ./does/not/exist: no such file or directory\n",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "missing key file",
|
||||||
|
args: []string{"/path/to/binary", "print"},
|
||||||
|
env: map[string]string{
|
||||||
|
"CERT_PATH": "./testdata/test.crt",
|
||||||
|
"KEY_PATH": "./does/not/exist",
|
||||||
|
},
|
||||||
|
wantFail: true,
|
||||||
|
wantLog: "could not read KEY_PATH: open ./does/not/exist: no such file or directory\n",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "fail to write output",
|
||||||
|
args: []string{"/path/to/binary", "print"},
|
||||||
|
env: map[string]string{
|
||||||
|
"CERT_PATH": "./testdata/test.crt",
|
||||||
|
"KEY_PATH": "./testdata/test.key",
|
||||||
|
},
|
||||||
|
failOutput: true,
|
||||||
|
wantFail: true,
|
||||||
|
wantLog: "failed to write output: some write error\n",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "successful print",
|
||||||
|
args: []string{"/path/to/binary", "print"},
|
||||||
|
env: map[string]string{
|
||||||
|
"CERT_PATH": "./testdata/test.crt",
|
||||||
|
"KEY_PATH": "./testdata/test.key",
|
||||||
|
},
|
||||||
|
wantOutJSON: `{
|
||||||
|
"tls.crt": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJd01EY3lOVEl4TURReE9Gb1hEVE13TURjeU16SXhNRFF4T0Zvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTDNLCmhZdjJnSVExRHd6aDJjV01pZCtvZkFudkxJZlYyWHY2MXZUTEdwclVJK1hVcUI0L2d0ZjZYNlVObjBMZXR0Mm4KZDhwNHd5N2h3NzNoVS9nZ2R2bVdKdnFCclNqYzNKR2Z5K2tqNjZmS1hYK1BUbGJMN1Fid2lSdmNTcUlYSVdsVgpsSEh4RUNXckVEOGpDdWx3L05WcWZvb2svaDVpTlVDVDl5c3dTSnIvMGZJbWlWbm9UbElvRVlHMmVDTmVqWjVjCmczOXVEM1pUcWQ5WnhXd1NMTG5JKzJrcEpuWkJQY2QxWlE4QVFxekRnWnRZUkNxYWNuNWdja1FVS1pXS1FseG8KRWZ0NmcxWEhKb3VBV0FadzdoRXRrMHY4ckcwL2VLRjd3YW14Rmk2QkZWbGJqV0JzQjRUOXJBcGJkQldUS2VDSgpIdjhmdjVSTUZTenBUM3V6VE84Q0F3RUFBYU1qTUNFd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFDaDVSaGJ4cUplK1ovZ2MxN2NaaEtObWRpd3UKSTJwTHAzUUJmd3ZOK1dibWFqencvN3JZaFkwZDhKWVZUSnpYU0NQV2k2VUFLeEF0WE9MRjhXSUlmOWkzOW42Ugp1S09CR1cxNEZ6ekd5UkppRDNxYUcvSlR2RVcrU0xod2w2OE5kcjVMSFNuYnVnQXFxMzFhYmNReTZabDl2NUE4CkpLQzk3TGovU244cmo3b3BLeTRXM29xN05DUXNBYjB6aDRJbGxSRjZVdlNuSnlTZnNnN3hkWEhIcHhZREh0T1MKWGNPdTV5U1VJWlRnRmU5UmZlVVpsR1o1eG4wY2tNbFE3cVcyV3gxcTBPVld3NXVzNE50a0dxS3JIRzRUbjFYNwp1d28vWXl0bjVzRHhyRHYxL29paTZBWk9Dc1RQcmU0b0Qzd3o0bm1WekNWSmNncnFINFEyNGhUOFdOZz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=",
|
||||||
|
"tls.key": "LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb2dJQkFBS0NBUUVBdmNxRmkvYUFoRFVQRE9IWnhZeUozNmg4Q2U4c2g5WFplL3JXOU1zYW10UWo1ZFNvCkhqK0MxL3BmcFEyZlF0NjIzYWQzeW5qREx1SER2ZUZUK0NCMitaWW0rb0d0S056Y2taL0w2U1BycDhwZGY0OU8KVnN2dEJ2Q0pHOXhLb2hjaGFWV1VjZkVRSmFzUVB5TUs2WEQ4MVdwK2lpVCtIbUkxUUpQM0t6Qkltdi9SOGlhSgpXZWhPVWlnUmdiWjRJMTZObmx5RGYyNFBkbE9wMzFuRmJCSXN1Y2o3YVNrbWRrRTl4M1ZsRHdCQ3JNT0JtMWhFCktwcHlmbUJ5UkJRcGxZcENYR2dSKzNxRFZjY21pNEJZQm5EdUVTMlRTL3lzYlQ5NG9YdkJxYkVXTG9FVldWdU4KWUd3SGhQMnNDbHQwRlpNcDRJa2UveCsvbEV3VkxPbFBlN05NN3dJREFRQUJBb0lCQUZDMXRVRW1ITlVjTTBCSgpNM0Q5S1F6Qis2M0YxbXdWbHgxUU9PVjFFZVZSM2NvNU94MVI2UFNyOXN5Y0ZHUTlqZ3FJMHpwNVRKZTlUcDZMCkdraGtsZlBoMU1Xbks5bzZ3bG56V0tYV3JycDJKbmkrbXBQeXVPUEFtcTRNYW5pdjJYZVArMGJST3dxcHlvanYKQUE3eUM3TStUSDIyNlpKR05WczNFVjkrY3dIbWwweXV6QmZJSm4vcnYvdzJnK1dSS00vTUMwUzdrMmQ4YlJsQQpOeWNLVkdBR0JoS1RsdGpvVllPZWg2YUhFcFNqSzh6ZmFlUGpvNWRZSnZvVklsaTYwWUNnY0pPVS84alhUK05wCjFGbTd0UnZBdGozcFVwMFNxZGFmMlJVemg5amZKcDJWRkNIdVNKNlRQcUFyT3lRb2p0TWNUSEYwVGlXN3hySFAKeE9DUklBRUNnWUVBd0dCUFU3dmR0aE1KQmcrT1JVb0dRUWFJdFRlSnZRd0lxSnZiS0Qyb3NwNGpoUzFkR1pCdwpXMzBHS0VjL2dkOEpOdE9xOUJCbk1pY1BGN2hrdHV5K2JTUHY0MVhQdWQ2N3JTU083VHN3MjBDMTBnRlJxMDZCCnpJSldGQVVxSzNJa3ZWYzNWRG10U0xTRG94NFFaL0JkcWFNbFE1eTVKQ3NDNWtUaG1rWkZsTzhDZ1lFQS9JOVgKWUhpNlJpb01KRTFmcU9ISkw0RERqbGV6bWN1UnJEN2ZFNUluS2J0SloySmhHWU9YL0MwS1huSFRPV1RDRHh4TgpGQnZwdkQ2WHY1bzNQaEI5WjZrMmZxdko0R1M4dXJrRy9LVTR4Y0MrYmFrKzlhdmE4b2FpU3FHMTZ6RDlOSDJQCmpKNjBOcmJMbDFKMHBVOWZpd3VGVlVLSjRoRFpPZk45UnFZZHlBRUNnWUFWd284V2hKaUdnTTZ6ZmN6MDczT1gKcFZxUFRQSHFqVkxwWjMrNXBJZlJkR3ZHSTZSMVFNNUV1dmFZVmI3TVBPTTQ3V1pYNXdjVk9DL1AyZzZpVmxNUAoyMUhHSUMyMzg0YTlCZmFZeE9vNDBxLytTaUhudzZDUTlta3dLSWxsa3Fxdk5BOVJHcGtNTVViMmkyOEZvcjJsCmM0dkNneGE2RFpkdFhuczZUUnFQeHdLQmdDZlk1Y3hPdi9UNkJWaGs3TWJVZU0ySjMxREIvWkF5VWhWL0Jlc3MKa0FsQmgxOU1ZazJJT1o2TDdLcmlBcFYzbERhV0hJTWp0RWtEQnlZdnlxOThJbzBNWVpDeXdmTXBjYTEwSytvSQpsMkI3L0krSXVHcENaeFVFc081ZGZUcFNUR0RQdnFwTkQ5bmlGVlVXcVZpN29UTnE2ZXA5eVF0bDVTQURqcXhxCjRTQUJBb0dBSW0waFVnMXd0Y1M0NmNHTHk2UElrUE01dG9jVFNnaHR6NHZGc3VrL2k0UUE5R0JvQk8yZ0g2dHkKK2tKSG1lYVh0MmRtZ3lTcDBRQVdpdDVVbGNlRXVtQjBOWG5BZEpaUXhlR1NGU3lZa0RXaHdYZDh3RGNlS28vMQpMZkNVNkRrOElOL1NzcHBWVVdYUTJybE9SdnhsckhlQ2lvOG8wa1M5WWl1NTVXTVlnNGc9Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg=="
|
||||||
|
}`,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
tt := tt
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
var logBuf bytes.Buffer
|
||||||
|
testLog := log.New(&logBuf, "", 0)
|
||||||
|
exited := "exiting via fatal"
|
||||||
|
fail = func(format string, v ...interface{}) {
|
||||||
|
testLog.Printf(format, v...)
|
||||||
|
panic(exited)
|
||||||
|
}
|
||||||
|
|
||||||
|
var sawSleep time.Duration
|
||||||
|
sleep = func(d time.Duration) { sawSleep = d }
|
||||||
|
|
||||||
|
var sawOutput bytes.Buffer
|
||||||
|
out = &sawOutput
|
||||||
|
if tt.failOutput {
|
||||||
|
out = &errWriter{}
|
||||||
|
}
|
||||||
|
|
||||||
|
os.Args = tt.args
|
||||||
|
getenv = func(key string) string { return tt.env[key] }
|
||||||
|
if tt.wantFail {
|
||||||
|
require.PanicsWithValue(t, exited, main)
|
||||||
|
} else {
|
||||||
|
require.NotPanics(t, main)
|
||||||
|
}
|
||||||
|
require.Equal(t, tt.wantSleep.String(), sawSleep.String())
|
||||||
|
require.Equal(t, tt.wantLog, logBuf.String())
|
||||||
|
if tt.wantOutJSON == "" {
|
||||||
|
require.Empty(t, sawOutput.String())
|
||||||
|
} else {
|
||||||
|
require.JSONEq(t, tt.wantOutJSON, sawOutput.String())
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
17
cmd/pinniped-concierge-kube-cert-agent/testdata/test.crt
vendored
Normal file
17
cmd/pinniped-concierge-kube-cert-agent/testdata/test.crt
vendored
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
-----BEGIN CERTIFICATE-----
|
||||||
|
MIICyDCCAbCgAwIBAgIBADANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwprdWJl
|
||||||
|
cm5ldGVzMB4XDTIwMDcyNTIxMDQxOFoXDTMwMDcyMzIxMDQxOFowFTETMBEGA1UE
|
||||||
|
AxMKa3ViZXJuZXRlczCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL3K
|
||||||
|
hYv2gIQ1Dwzh2cWMid+ofAnvLIfV2Xv61vTLGprUI+XUqB4/gtf6X6UNn0Lett2n
|
||||||
|
d8p4wy7hw73hU/ggdvmWJvqBrSjc3JGfy+kj66fKXX+PTlbL7QbwiRvcSqIXIWlV
|
||||||
|
lHHxECWrED8jCulw/NVqfook/h5iNUCT9yswSJr/0fImiVnoTlIoEYG2eCNejZ5c
|
||||||
|
g39uD3ZTqd9ZxWwSLLnI+2kpJnZBPcd1ZQ8AQqzDgZtYRCqacn5gckQUKZWKQlxo
|
||||||
|
Eft6g1XHJouAWAZw7hEtk0v8rG0/eKF7wamxFi6BFVlbjWBsB4T9rApbdBWTKeCJ
|
||||||
|
Hv8fv5RMFSzpT3uzTO8CAwEAAaMjMCEwDgYDVR0PAQH/BAQDAgKkMA8GA1UdEwEB
|
||||||
|
/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBACh5RhbxqJe+Z/gc17cZhKNmdiwu
|
||||||
|
I2pLp3QBfwvN+Wbmajzw/7rYhY0d8JYVTJzXSCPWi6UAKxAtXOLF8WIIf9i39n6R
|
||||||
|
uKOBGW14FzzGyRJiD3qaG/JTvEW+SLhwl68Ndr5LHSnbugAqq31abcQy6Zl9v5A8
|
||||||
|
JKC97Lj/Sn8rj7opKy4W3oq7NCQsAb0zh4IllRF6UvSnJySfsg7xdXHHpxYDHtOS
|
||||||
|
XcOu5ySUIZTgFe9RfeUZlGZ5xn0ckMlQ7qW2Wx1q0OVWw5us4NtkGqKrHG4Tn1X7
|
||||||
|
uwo/Yytn5sDxrDv1/oii6AZOCsTPre4oD3wz4nmVzCVJcgrqH4Q24hT8WNg=
|
||||||
|
-----END CERTIFICATE-----
|
27
cmd/pinniped-concierge-kube-cert-agent/testdata/test.key
vendored
Normal file
27
cmd/pinniped-concierge-kube-cert-agent/testdata/test.key
vendored
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
-----BEGIN RSA PRIVATE KEY-----
|
||||||
|
MIIEogIBAAKCAQEAvcqFi/aAhDUPDOHZxYyJ36h8Ce8sh9XZe/rW9MsamtQj5dSo
|
||||||
|
Hj+C1/pfpQ2fQt623ad3ynjDLuHDveFT+CB2+ZYm+oGtKNzckZ/L6SPrp8pdf49O
|
||||||
|
VsvtBvCJG9xKohchaVWUcfEQJasQPyMK6XD81Wp+iiT+HmI1QJP3KzBImv/R8iaJ
|
||||||
|
WehOUigRgbZ4I16NnlyDf24PdlOp31nFbBIsucj7aSkmdkE9x3VlDwBCrMOBm1hE
|
||||||
|
KppyfmByRBQplYpCXGgR+3qDVccmi4BYBnDuES2TS/ysbT94oXvBqbEWLoEVWVuN
|
||||||
|
YGwHhP2sClt0FZMp4Ike/x+/lEwVLOlPe7NM7wIDAQABAoIBAFC1tUEmHNUcM0BJ
|
||||||
|
M3D9KQzB+63F1mwVlx1QOOV1EeVR3co5Ox1R6PSr9sycFGQ9jgqI0zp5TJe9Tp6L
|
||||||
|
GkhklfPh1MWnK9o6wlnzWKXWrrp2Jni+mpPyuOPAmq4Maniv2XeP+0bROwqpyojv
|
||||||
|
AA7yC7M+TH226ZJGNVs3EV9+cwHml0yuzBfIJn/rv/w2g+WRKM/MC0S7k2d8bRlA
|
||||||
|
NycKVGAGBhKTltjoVYOeh6aHEpSjK8zfaePjo5dYJvoVIli60YCgcJOU/8jXT+Np
|
||||||
|
1Fm7tRvAtj3pUp0Sqdaf2RUzh9jfJp2VFCHuSJ6TPqArOyQojtMcTHF0TiW7xrHP
|
||||||
|
xOCRIAECgYEAwGBPU7vdthMJBg+ORUoGQQaItTeJvQwIqJvbKD2osp4jhS1dGZBw
|
||||||
|
W30GKEc/gd8JNtOq9BBnMicPF7hktuy+bSPv41XPud67rSSO7Tsw20C10gFRq06B
|
||||||
|
zIJWFAUqK3IkvVc3VDmtSLSDox4QZ/BdqaMlQ5y5JCsC5kThmkZFlO8CgYEA/I9X
|
||||||
|
YHi6RioMJE1fqOHJL4DDjlezmcuRrD7fE5InKbtJZ2JhGYOX/C0KXnHTOWTCDxxN
|
||||||
|
FBvpvD6Xv5o3PhB9Z6k2fqvJ4GS8urkG/KU4xcC+bak+9ava8oaiSqG16zD9NH2P
|
||||||
|
jJ60NrbLl1J0pU9fiwuFVUKJ4hDZOfN9RqYdyAECgYAVwo8WhJiGgM6zfcz073OX
|
||||||
|
pVqPTPHqjVLpZ3+5pIfRdGvGI6R1QM5EuvaYVb7MPOM47WZX5wcVOC/P2g6iVlMP
|
||||||
|
21HGIC2384a9BfaYxOo40q/+SiHnw6CQ9mkwKIllkqqvNA9RGpkMMUb2i28For2l
|
||||||
|
c4vCgxa6DZdtXns6TRqPxwKBgCfY5cxOv/T6BVhk7MbUeM2J31DB/ZAyUhV/Bess
|
||||||
|
kAlBh19MYk2IOZ6L7KriApV3lDaWHIMjtEkDByYvyq98Io0MYZCywfMpca10K+oI
|
||||||
|
l2B7/I+IuGpCZxUEsO5dfTpSTGDPvqpND9niFVUWqVi7oTNq6ep9yQtl5SADjqxq
|
||||||
|
4SABAoGAIm0hUg1wtcS46cGLy6PIkPM5tocTSghtz4vFsuk/i4QA9GBoBO2gH6ty
|
||||||
|
+kJHmeaXt2dmgySp0QAWit5UlceEumB0NXnAdJZQxeGSFSyYkDWhwXd8wDceKo/1
|
||||||
|
LfCU6Dk8IN/SsppVUWXQ2rlORvxlrHeCio8o0kS9Yiu55WMYg4g=
|
||||||
|
-----END RSA PRIVATE KEY-----
|
@ -1,35 +0,0 @@
|
|||||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
|
||||||
"k8s.io/client-go/pkg/version"
|
|
||||||
"k8s.io/client-go/rest"
|
|
||||||
"k8s.io/component-base/logs"
|
|
||||||
"k8s.io/klog/v2"
|
|
||||||
|
|
||||||
"go.pinniped.dev/internal/concierge/server"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
logs.InitLogs()
|
|
||||||
defer logs.FlushLogs()
|
|
||||||
|
|
||||||
// Dump out the time since compile (mostly useful for benchmarking our local development cycle latency).
|
|
||||||
var timeSinceCompile time.Duration
|
|
||||||
if buildDate, err := time.Parse(time.RFC3339, version.Get().BuildDate); err == nil {
|
|
||||||
timeSinceCompile = time.Since(buildDate).Round(time.Second)
|
|
||||||
}
|
|
||||||
klog.Infof("Running %s at %#v (%s since build)", rest.DefaultKubernetesUserAgent(), version.Get(), timeSinceCompile)
|
|
||||||
|
|
||||||
ctx := genericapiserver.SetupSignalContext()
|
|
||||||
|
|
||||||
if err := server.New(ctx, os.Args[1:], os.Stdout, os.Stderr).Run(); err != nil {
|
|
||||||
klog.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
41
cmd/pinniped-server/main.go
Normal file
41
cmd/pinniped-server/main.go
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
// Package main is the combined entrypoint for all Pinniped server components.
|
||||||
|
//
|
||||||
|
// It dispatches to the appropriate Main() entrypoint based the name it is invoked as (os.Args[0]). In our server
|
||||||
|
// container image, this binary is symlinked to several names such as `/usr/local/bin/pinniped-concierge`.
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
|
"k8s.io/klog/v2"
|
||||||
|
|
||||||
|
concierge "go.pinniped.dev/internal/concierge/server"
|
||||||
|
lua "go.pinniped.dev/internal/localuserauthenticator"
|
||||||
|
supervisor "go.pinniped.dev/internal/supervisor/server"
|
||||||
|
)
|
||||||
|
|
||||||
|
//nolint: gochecknoglobals // these are swapped during unit tests.
|
||||||
|
var (
|
||||||
|
fail = klog.Fatalf
|
||||||
|
subcommands = map[string]func(){
|
||||||
|
"pinniped-concierge": concierge.Main,
|
||||||
|
"pinniped-supervisor": supervisor.Main,
|
||||||
|
"local-user-authenticator": lua.Main,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
if len(os.Args) == 0 {
|
||||||
|
fail("missing os.Args")
|
||||||
|
}
|
||||||
|
binary := filepath.Base(os.Args[0])
|
||||||
|
if subcommands[binary] == nil {
|
||||||
|
fail("must be invoked as one of %v, not %q", sets.StringKeySet(subcommands).List(), binary)
|
||||||
|
}
|
||||||
|
subcommands[binary]()
|
||||||
|
}
|
72
cmd/pinniped-server/main_test.go
Normal file
72
cmd/pinniped-server/main_test.go
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestEntrypoint(t *testing.T) {
|
||||||
|
for _, tt := range []struct {
|
||||||
|
name string
|
||||||
|
args []string
|
||||||
|
wantOutput string
|
||||||
|
wantFail bool
|
||||||
|
wantArgs []string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "missing args",
|
||||||
|
args: []string{},
|
||||||
|
wantOutput: "missing os.Args\n",
|
||||||
|
wantFail: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid subcommand",
|
||||||
|
args: []string{"/path/to/invalid", "some", "args"},
|
||||||
|
wantOutput: "must be invoked as one of [another-test-binary valid-test-binary], not \"invalid\"\n",
|
||||||
|
wantFail: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "valid",
|
||||||
|
args: []string{"/path/to/valid-test-binary", "foo", "bar"},
|
||||||
|
wantArgs: []string{"/path/to/valid-test-binary", "foo", "bar"},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
tt := tt
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
var logBuf bytes.Buffer
|
||||||
|
testLog := log.New(&logBuf, "", 0)
|
||||||
|
exited := "exiting via fatal"
|
||||||
|
fail = func(format string, v ...interface{}) {
|
||||||
|
testLog.Printf(format, v...)
|
||||||
|
panic(exited)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make a test command that records os.Args when it's invoked.
|
||||||
|
var gotArgs []string
|
||||||
|
subcommands = map[string]func(){
|
||||||
|
"valid-test-binary": func() { gotArgs = os.Args },
|
||||||
|
"another-test-binary": func() {},
|
||||||
|
}
|
||||||
|
|
||||||
|
os.Args = tt.args
|
||||||
|
if tt.wantFail {
|
||||||
|
require.PanicsWithValue(t, exited, main)
|
||||||
|
} else {
|
||||||
|
require.NotPanics(t, main)
|
||||||
|
}
|
||||||
|
if tt.wantArgs != nil {
|
||||||
|
require.Equal(t, tt.wantArgs, gotArgs)
|
||||||
|
}
|
||||||
|
if tt.wantOutput != "" {
|
||||||
|
require.Equal(t, tt.wantOutput, logBuf.String())
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
@ -116,6 +116,7 @@ spec:
|
|||||||
scheduler.alpha.kubernetes.io/critical-pod: ""
|
scheduler.alpha.kubernetes.io/critical-pod: ""
|
||||||
spec:
|
spec:
|
||||||
securityContext:
|
securityContext:
|
||||||
|
readOnlyRootFilesystem: true
|
||||||
runAsUser: #@ data.values.run_as_user
|
runAsUser: #@ data.values.run_as_user
|
||||||
runAsGroup: #@ data.values.run_as_group
|
runAsGroup: #@ data.values.run_as_group
|
||||||
serviceAccountName: #@ defaultResourceName()
|
serviceAccountName: #@ defaultResourceName()
|
||||||
@ -138,10 +139,13 @@ spec:
|
|||||||
limits:
|
limits:
|
||||||
cpu: "100m"
|
cpu: "100m"
|
||||||
memory: "128Mi"
|
memory: "128Mi"
|
||||||
args:
|
command:
|
||||||
|
- pinniped-concierge
|
||||||
- --config=/etc/config/pinniped.yaml
|
- --config=/etc/config/pinniped.yaml
|
||||||
- --downward-api-path=/etc/podinfo
|
- --downward-api-path=/etc/podinfo
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
|
- name: tmp
|
||||||
|
mountPath: /tmp
|
||||||
- name: config-volume
|
- name: config-volume
|
||||||
mountPath: /etc/config
|
mountPath: /etc/config
|
||||||
- name: podinfo
|
- name: podinfo
|
||||||
@ -167,7 +171,12 @@ spec:
|
|||||||
periodSeconds: 10
|
periodSeconds: 10
|
||||||
failureThreshold: 3
|
failureThreshold: 3
|
||||||
volumes:
|
volumes:
|
||||||
|
- name: tmp
|
||||||
|
emptyDir:
|
||||||
|
medium: Memory
|
||||||
|
sizeLimit: 100Mi
|
||||||
- name: config-volume
|
- name: config-volume
|
||||||
|
readOnly: true
|
||||||
configMap:
|
configMap:
|
||||||
name: #@ defaultResourceNameWithSuffix("config")
|
name: #@ defaultResourceNameWithSuffix("config")
|
||||||
- name: impersonation-proxy
|
- name: impersonation-proxy
|
||||||
@ -177,6 +186,7 @@ spec:
|
|||||||
- key: token
|
- key: token
|
||||||
path: token
|
path: token
|
||||||
- name: podinfo
|
- name: podinfo
|
||||||
|
readOnly: true
|
||||||
downwardAPI:
|
downwardAPI:
|
||||||
items:
|
items:
|
||||||
- path: "labels"
|
- path: "labels"
|
||||||
|
@ -63,8 +63,8 @@ spec:
|
|||||||
image: #@ data.values.image_repo + ":" + data.values.image_tag
|
image: #@ data.values.image_repo + ":" + data.values.image_tag
|
||||||
#@ end
|
#@ end
|
||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
command: #! override the default entrypoint
|
command:
|
||||||
- /usr/local/bin/local-user-authenticator
|
- local-user-authenticator
|
||||||
---
|
---
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Service
|
kind: Service
|
||||||
|
@ -65,6 +65,7 @@ spec:
|
|||||||
labels: #@ defaultLabel()
|
labels: #@ defaultLabel()
|
||||||
spec:
|
spec:
|
||||||
securityContext:
|
securityContext:
|
||||||
|
readOnlyRootFilesystem: true
|
||||||
runAsUser: #@ data.values.run_as_user
|
runAsUser: #@ data.values.run_as_user
|
||||||
runAsGroup: #@ data.values.run_as_group
|
runAsGroup: #@ data.values.run_as_group
|
||||||
serviceAccountName: #@ defaultResourceName()
|
serviceAccountName: #@ defaultResourceName()
|
||||||
@ -80,9 +81,8 @@ spec:
|
|||||||
image: #@ data.values.image_repo + ":" + data.values.image_tag
|
image: #@ data.values.image_repo + ":" + data.values.image_tag
|
||||||
#@ end
|
#@ end
|
||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
command: #! override the default entrypoint
|
command:
|
||||||
- /usr/local/bin/pinniped-supervisor
|
- pinniped-supervisor
|
||||||
args:
|
|
||||||
- /etc/podinfo
|
- /etc/podinfo
|
||||||
- /etc/config/pinniped.yaml
|
- /etc/config/pinniped.yaml
|
||||||
resources:
|
resources:
|
||||||
@ -131,9 +131,11 @@ spec:
|
|||||||
failureThreshold: 3
|
failureThreshold: 3
|
||||||
volumes:
|
volumes:
|
||||||
- name: config-volume
|
- name: config-volume
|
||||||
|
readOnly: true
|
||||||
configMap:
|
configMap:
|
||||||
name: #@ defaultResourceNameWithSuffix("static-config")
|
name: #@ defaultResourceNameWithSuffix("static-config")
|
||||||
- name: podinfo
|
- name: podinfo
|
||||||
|
readOnly: true
|
||||||
downwardAPI:
|
downwardAPI:
|
||||||
items:
|
items:
|
||||||
- path: "labels"
|
- path: "labels"
|
||||||
|
@ -8,6 +8,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"os"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
@ -16,6 +17,10 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||||
genericoptions "k8s.io/apiserver/pkg/server/options"
|
genericoptions "k8s.io/apiserver/pkg/server/options"
|
||||||
|
"k8s.io/client-go/pkg/version"
|
||||||
|
"k8s.io/client-go/rest"
|
||||||
|
"k8s.io/component-base/logs"
|
||||||
|
"k8s.io/klog/v2"
|
||||||
|
|
||||||
"go.pinniped.dev/internal/certauthority/dynamiccertauthority"
|
"go.pinniped.dev/internal/certauthority/dynamiccertauthority"
|
||||||
"go.pinniped.dev/internal/concierge/apiserver"
|
"go.pinniped.dev/internal/concierge/apiserver"
|
||||||
@ -231,3 +236,21 @@ func getAggregatedAPIServerConfig(
|
|||||||
}
|
}
|
||||||
return apiServerConfig, nil
|
return apiServerConfig, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func Main() {
|
||||||
|
logs.InitLogs()
|
||||||
|
defer logs.FlushLogs()
|
||||||
|
|
||||||
|
// Dump out the time since compile (mostly useful for benchmarking our local development cycle latency).
|
||||||
|
var timeSinceCompile time.Duration
|
||||||
|
if buildDate, err := time.Parse(time.RFC3339, version.Get().BuildDate); err == nil {
|
||||||
|
timeSinceCompile = time.Since(buildDate).Round(time.Second)
|
||||||
|
}
|
||||||
|
klog.Infof("Running %s at %#v (%s since build)", rest.DefaultKubernetesUserAgent(), version.Get(), timeSinceCompile)
|
||||||
|
|
||||||
|
ctx := genericapiserver.SetupSignalContext()
|
||||||
|
|
||||||
|
if err := New(ctx, os.Args[1:], os.Stdout, os.Stderr).Run(); err != nil {
|
||||||
|
klog.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -8,6 +8,7 @@ package kubecertagent
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
@ -309,22 +310,30 @@ func (c *agentController) loadSigningKey(agentPod *corev1.Pod) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Exec into the agent pod and cat out the certificate and the key.
|
// Exec into the agent pod and cat out the certificate and the key.
|
||||||
combinedPEM, err := c.executor.Exec(
|
outputJSON, err := c.executor.Exec(agentPod.Namespace, agentPod.Name, "pinniped-concierge-kube-cert-agent", "print")
|
||||||
agentPod.Namespace, agentPod.Name,
|
|
||||||
"sh", "-c", "cat ${CERT_PATH}; echo; echo; cat ${KEY_PATH}",
|
|
||||||
)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not exec into agent pod %s/%s: %w", agentPod.Namespace, agentPod.Name, err)
|
return fmt.Errorf("could not exec into agent pod %s/%s: %w", agentPod.Namespace, agentPod.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Split up the output by looking for the block of newlines.
|
// Parse and decode the JSON output from the "pinniped-concierge-kube-cert-agent print" command.
|
||||||
var certPEM, keyPEM string
|
var output struct {
|
||||||
if parts := strings.Split(combinedPEM, "\n\n\n"); len(parts) == 2 {
|
Cert string `json:"tls.crt"`
|
||||||
certPEM, keyPEM = parts[0], parts[1]
|
Key string `json:"tls.key"`
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal([]byte(outputJSON), &output); err != nil {
|
||||||
|
return fmt.Errorf("failed to decode signing cert/key JSON from agent pod %s/%s: %w", agentPod.Namespace, agentPod.Name, err)
|
||||||
|
}
|
||||||
|
certPEM, err := base64.StdEncoding.DecodeString(output.Cert)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to decode signing cert base64 from agent pod %s/%s: %w", agentPod.Namespace, agentPod.Name, err)
|
||||||
|
}
|
||||||
|
keyPEM, err := base64.StdEncoding.DecodeString(output.Key)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to decode signing key base64 from agent pod %s/%s: %w", agentPod.Namespace, agentPod.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load the certificate and key into the dynamic signer.
|
// Load the certificate and key into the dynamic signer.
|
||||||
if err := c.dynamicCertProvider.SetCertKeyContent([]byte(certPEM), []byte(keyPEM)); err != nil {
|
if err := c.dynamicCertProvider.SetCertKeyContent(certPEM, keyPEM); err != nil {
|
||||||
return fmt.Errorf("failed to set signing cert/key content from agent pod %s/%s: %w", agentPod.Namespace, agentPod.Name, err)
|
return fmt.Errorf("failed to set signing cert/key content from agent pod %s/%s: %w", agentPod.Namespace, agentPod.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -461,7 +470,7 @@ func (c *agentController) newAgentDeployment(controllerManagerPod *corev1.Pod) *
|
|||||||
Name: "sleeper",
|
Name: "sleeper",
|
||||||
Image: c.cfg.ContainerImage,
|
Image: c.cfg.ContainerImage,
|
||||||
ImagePullPolicy: corev1.PullIfNotPresent,
|
ImagePullPolicy: corev1.PullIfNotPresent,
|
||||||
Command: []string{"/bin/sleep", "infinity"},
|
Command: []string{"pinniped-concierge-kube-cert-agent", "sleep"},
|
||||||
VolumeMounts: volumeMounts,
|
VolumeMounts: volumeMounts,
|
||||||
Env: []corev1.EnvVar{
|
Env: []corev1.EnvVar{
|
||||||
{Name: "CERT_PATH", Value: getContainerArgByName(controllerManagerPod, "cluster-signing-cert-file", "/etc/kubernetes/ca/ca.pem")},
|
{Name: "CERT_PATH", Value: getContainerArgByName(controllerManagerPod, "cluster-signing-cert-file", "/etc/kubernetes/ca/ca.pem")},
|
||||||
|
@ -104,7 +104,7 @@ func TestAgentController(t *testing.T) {
|
|||||||
Containers: []corev1.Container{{
|
Containers: []corev1.Container{{
|
||||||
Name: "sleeper",
|
Name: "sleeper",
|
||||||
Image: "pinniped-server-image",
|
Image: "pinniped-server-image",
|
||||||
Command: []string{"/bin/sleep", "infinity"},
|
Command: []string{"pinniped-concierge-kube-cert-agent", "sleep"},
|
||||||
Env: []corev1.EnvVar{
|
Env: []corev1.EnvVar{
|
||||||
{Name: "CERT_PATH", Value: "/path/to/signing.crt"},
|
{Name: "CERT_PATH", Value: "/path/to/signing.crt"},
|
||||||
{Name: "KEY_PATH", Value: "/path/to/signing.key"},
|
{Name: "KEY_PATH", Value: "/path/to/signing.key"},
|
||||||
@ -200,8 +200,8 @@ func TestAgentController(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
mockExecSucceeds := func(t *testing.T, executor *mocks.MockPodCommandExecutorMockRecorder, dynamicCert *mocks.MockDynamicCertPrivateMockRecorder, execCache *cache.Expiring) {
|
mockExecSucceeds := func(t *testing.T, executor *mocks.MockPodCommandExecutorMockRecorder, dynamicCert *mocks.MockDynamicCertPrivateMockRecorder, execCache *cache.Expiring) {
|
||||||
executor.Exec("concierge", "pinniped-concierge-kube-cert-agent-xyz-1234", "sh", "-c", "cat ${CERT_PATH}; echo; echo; cat ${KEY_PATH}").
|
executor.Exec("concierge", "pinniped-concierge-kube-cert-agent-xyz-1234", "pinniped-concierge-kube-cert-agent", "print").
|
||||||
Return("test-cert\n\n\ntest-key", nil)
|
Return(`{"tls.crt": "dGVzdC1jZXJ0", "tls.key": "dGVzdC1rZXk="}`, nil) // "test-cert" / "test-key"
|
||||||
dynamicCert.SetCertKeyContent([]byte("test-cert"), []byte("test-key")).
|
dynamicCert.SetCertKeyContent([]byte("test-cert"), []byte("test-key")).
|
||||||
Return(nil)
|
Return(nil)
|
||||||
}
|
}
|
||||||
@ -573,7 +573,7 @@ func TestAgentController(t *testing.T) {
|
|||||||
validClusterInfoConfigMap,
|
validClusterInfoConfigMap,
|
||||||
},
|
},
|
||||||
mocks: func(t *testing.T, executor *mocks.MockPodCommandExecutorMockRecorder, dynamicCert *mocks.MockDynamicCertPrivateMockRecorder, execCache *cache.Expiring) {
|
mocks: func(t *testing.T, executor *mocks.MockPodCommandExecutorMockRecorder, dynamicCert *mocks.MockDynamicCertPrivateMockRecorder, execCache *cache.Expiring) {
|
||||||
executor.Exec("concierge", "pinniped-concierge-kube-cert-agent-xyz-1234", "sh", "-c", "cat ${CERT_PATH}; echo; echo; cat ${KEY_PATH}").
|
executor.Exec("concierge", "pinniped-concierge-kube-cert-agent-xyz-1234", "pinniped-concierge-kube-cert-agent", "print").
|
||||||
Return("", fmt.Errorf("some exec error")).
|
Return("", fmt.Errorf("some exec error")).
|
||||||
AnyTimes()
|
AnyTimes()
|
||||||
},
|
},
|
||||||
@ -589,6 +589,90 @@ func TestAgentController(t *testing.T) {
|
|||||||
LastUpdateTime: metav1.NewTime(now),
|
LastUpdateTime: metav1.NewTime(now),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "deployment exists, configmap is valid, exec into agent pod returns invalid JSON",
|
||||||
|
pinnipedObjects: []runtime.Object{
|
||||||
|
initialCredentialIssuer,
|
||||||
|
},
|
||||||
|
kubeObjects: []runtime.Object{
|
||||||
|
healthyKubeControllerManagerPod,
|
||||||
|
healthyAgentDeployment,
|
||||||
|
healthyAgentPod,
|
||||||
|
validClusterInfoConfigMap,
|
||||||
|
},
|
||||||
|
mocks: func(t *testing.T, executor *mocks.MockPodCommandExecutorMockRecorder, dynamicCert *mocks.MockDynamicCertPrivateMockRecorder, execCache *cache.Expiring) {
|
||||||
|
executor.Exec("concierge", "pinniped-concierge-kube-cert-agent-xyz-1234", "pinniped-concierge-kube-cert-agent", "print").
|
||||||
|
Return("bogus-data", nil).
|
||||||
|
AnyTimes()
|
||||||
|
},
|
||||||
|
wantDistinctErrors: []string{
|
||||||
|
`failed to decode signing cert/key JSON from agent pod concierge/pinniped-concierge-kube-cert-agent-xyz-1234: invalid character 'b' looking for beginning of value`,
|
||||||
|
},
|
||||||
|
wantAgentDeployment: healthyAgentDeployment,
|
||||||
|
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
||||||
|
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||||
|
Status: configv1alpha1.ErrorStrategyStatus,
|
||||||
|
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
|
||||||
|
Message: `failed to decode signing cert/key JSON from agent pod concierge/pinniped-concierge-kube-cert-agent-xyz-1234: invalid character 'b' looking for beginning of value`,
|
||||||
|
LastUpdateTime: metav1.NewTime(now),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "deployment exists, configmap is valid, exec into agent pod returns invalid cert base64",
|
||||||
|
pinnipedObjects: []runtime.Object{
|
||||||
|
initialCredentialIssuer,
|
||||||
|
},
|
||||||
|
kubeObjects: []runtime.Object{
|
||||||
|
healthyKubeControllerManagerPod,
|
||||||
|
healthyAgentDeployment,
|
||||||
|
healthyAgentPod,
|
||||||
|
validClusterInfoConfigMap,
|
||||||
|
},
|
||||||
|
mocks: func(t *testing.T, executor *mocks.MockPodCommandExecutorMockRecorder, dynamicCert *mocks.MockDynamicCertPrivateMockRecorder, execCache *cache.Expiring) {
|
||||||
|
executor.Exec("concierge", "pinniped-concierge-kube-cert-agent-xyz-1234", "pinniped-concierge-kube-cert-agent", "print").
|
||||||
|
Return(`{"tls.crt": "invalid"}`, nil).
|
||||||
|
AnyTimes()
|
||||||
|
},
|
||||||
|
wantDistinctErrors: []string{
|
||||||
|
`failed to decode signing cert base64 from agent pod concierge/pinniped-concierge-kube-cert-agent-xyz-1234: illegal base64 data at input byte 4`,
|
||||||
|
},
|
||||||
|
wantAgentDeployment: healthyAgentDeployment,
|
||||||
|
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
||||||
|
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||||
|
Status: configv1alpha1.ErrorStrategyStatus,
|
||||||
|
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
|
||||||
|
Message: `failed to decode signing cert base64 from agent pod concierge/pinniped-concierge-kube-cert-agent-xyz-1234: illegal base64 data at input byte 4`,
|
||||||
|
LastUpdateTime: metav1.NewTime(now),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "deployment exists, configmap is valid, exec into agent pod returns invalid key base64",
|
||||||
|
pinnipedObjects: []runtime.Object{
|
||||||
|
initialCredentialIssuer,
|
||||||
|
},
|
||||||
|
kubeObjects: []runtime.Object{
|
||||||
|
healthyKubeControllerManagerPod,
|
||||||
|
healthyAgentDeployment,
|
||||||
|
healthyAgentPod,
|
||||||
|
validClusterInfoConfigMap,
|
||||||
|
},
|
||||||
|
mocks: func(t *testing.T, executor *mocks.MockPodCommandExecutorMockRecorder, dynamicCert *mocks.MockDynamicCertPrivateMockRecorder, execCache *cache.Expiring) {
|
||||||
|
executor.Exec("concierge", "pinniped-concierge-kube-cert-agent-xyz-1234", "pinniped-concierge-kube-cert-agent", "print").
|
||||||
|
Return(`{"tls.crt": "dGVzdAo=", "tls.key": "invalid"}`, nil).
|
||||||
|
AnyTimes()
|
||||||
|
},
|
||||||
|
wantDistinctErrors: []string{
|
||||||
|
`failed to decode signing key base64 from agent pod concierge/pinniped-concierge-kube-cert-agent-xyz-1234: illegal base64 data at input byte 4`,
|
||||||
|
},
|
||||||
|
wantAgentDeployment: healthyAgentDeployment,
|
||||||
|
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
||||||
|
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||||
|
Status: configv1alpha1.ErrorStrategyStatus,
|
||||||
|
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
|
||||||
|
Message: `failed to decode signing key base64 from agent pod concierge/pinniped-concierge-kube-cert-agent-xyz-1234: illegal base64 data at input byte 4`,
|
||||||
|
LastUpdateTime: metav1.NewTime(now),
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "deployment exists, configmap is valid, exec into agent pod returns bogus certs",
|
name: "deployment exists, configmap is valid, exec into agent pod returns bogus certs",
|
||||||
pinnipedObjects: []runtime.Object{
|
pinnipedObjects: []runtime.Object{
|
||||||
@ -601,10 +685,10 @@ func TestAgentController(t *testing.T) {
|
|||||||
validClusterInfoConfigMap,
|
validClusterInfoConfigMap,
|
||||||
},
|
},
|
||||||
mocks: func(t *testing.T, executor *mocks.MockPodCommandExecutorMockRecorder, dynamicCert *mocks.MockDynamicCertPrivateMockRecorder, execCache *cache.Expiring) {
|
mocks: func(t *testing.T, executor *mocks.MockPodCommandExecutorMockRecorder, dynamicCert *mocks.MockDynamicCertPrivateMockRecorder, execCache *cache.Expiring) {
|
||||||
executor.Exec("concierge", "pinniped-concierge-kube-cert-agent-xyz-1234", "sh", "-c", "cat ${CERT_PATH}; echo; echo; cat ${KEY_PATH}").
|
executor.Exec("concierge", "pinniped-concierge-kube-cert-agent-xyz-1234", "pinniped-concierge-kube-cert-agent", "print").
|
||||||
Return("bogus-data", nil).
|
Return(`{"tls.crt": "dGVzdC1jZXJ0", "tls.key": "dGVzdC1rZXk="}`, nil). // "test-cert" / "test-key"
|
||||||
AnyTimes()
|
AnyTimes()
|
||||||
dynamicCert.SetCertKeyContent([]byte(""), []byte("")).
|
dynamicCert.SetCertKeyContent([]byte("test-cert"), []byte("test-key")).
|
||||||
Return(fmt.Errorf("some dynamic cert error")).
|
Return(fmt.Errorf("some dynamic cert error")).
|
||||||
AnyTimes()
|
AnyTimes()
|
||||||
},
|
},
|
||||||
|
@ -1,13 +1,13 @@
|
|||||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||||
// SPDX-License-Identifier: Apache-2.0
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
// Package main provides a authentication webhook program.
|
// Package localuserauthenticator provides a authentication webhook program.
|
||||||
//
|
//
|
||||||
// This webhook is meant to be used in demo settings to play around with
|
// This webhook is meant to be used in demo settings to play around with
|
||||||
// Pinniped. As well, it can come in handy in integration tests.
|
// Pinniped. As well, it can come in handy in integration tests.
|
||||||
//
|
//
|
||||||
// This webhook is NOT meant for use in production systems.
|
// This webhook is NOT meant for use in production systems.
|
||||||
package main
|
package localuserauthenticator
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
@ -378,7 +378,7 @@ func run() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func Main() {
|
||||||
// Hardcode the logging level to debug, since this is a test app and it is very helpful to have
|
// Hardcode the logging level to debug, since this is a test app and it is very helpful to have
|
||||||
// verbose logs to debug test failures.
|
// verbose logs to debug test failures.
|
||||||
if err := plog.ValidateAndSetLogLevelGlobally(plog.LevelDebug); err != nil {
|
if err := plog.ValidateAndSetLogLevelGlobally(plog.LevelDebug); err != nil {
|
@ -1,7 +1,7 @@
|
|||||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||||
// SPDX-License-Identifier: Apache-2.0
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
package main
|
package localuserauthenticator
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
@ -1,7 +1,8 @@
|
|||||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||||
// SPDX-License-Identifier: Apache-2.0
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
package main
|
// Package server defines the entrypoint for the Pinniped Supervisor server.
|
||||||
|
package server
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
@ -371,7 +372,7 @@ func run(podInfo *downward.PodInfo, cfg *supervisor.Config) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func Main() {
|
||||||
logs.InitLogs()
|
logs.InitLogs()
|
||||||
defer logs.FlushLogs()
|
defer logs.FlushLogs()
|
||||||
plog.RemoveKlogGlobalFlags() // move this whenever the below code gets refactored to use cobra
|
plog.RemoveKlogGlobalFlags() // move this whenever the below code gets refactored to use cobra
|
@ -40,6 +40,7 @@ import (
|
|||||||
rbacv1 "k8s.io/api/rbac/v1"
|
rbacv1 "k8s.io/api/rbac/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/equality"
|
"k8s.io/apimachinery/pkg/api/equality"
|
||||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme"
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
@ -306,7 +307,7 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
|
|
||||||
// Get pods in concierge namespace and pick one.
|
// Get pods in concierge namespace and pick one.
|
||||||
// this is for tests that require performing actions against a running pod. We use the concierge pod because we already have it handy.
|
// this is for tests that require performing actions against a running pod. We use the concierge pod because we already have it handy.
|
||||||
// We want to make sure it's a concierge pod (not cert agent), because we need to be able to "exec echo" and port-forward a running port.
|
// We want to make sure it's a concierge pod (not cert agent), because we need to be able to port-forward a running port.
|
||||||
pods, err := adminClient.CoreV1().Pods(env.ConciergeNamespace).List(ctx, metav1.ListOptions{})
|
pods, err := adminClient.CoreV1().Pods(env.ConciergeNamespace).List(ctx, metav1.ListOptions{})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Greater(t, len(pods.Items), 0)
|
require.Greater(t, len(pods.Items), 0)
|
||||||
@ -989,48 +990,53 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
parallelIfNotEKS(t)
|
parallelIfNotEKS(t)
|
||||||
kubeconfigPath, envVarsWithProxy, tempDir := getImpersonationKubeconfig(t, env, impersonationProxyURL, impersonationProxyCACertPEM, credentialRequestSpecWithWorkingCredentials.Authenticator)
|
kubeconfigPath, envVarsWithProxy, tempDir := getImpersonationKubeconfig(t, env, impersonationProxyURL, impersonationProxyCACertPEM, credentialRequestSpecWithWorkingCredentials.Authenticator)
|
||||||
|
|
||||||
|
// Run a new test pod so we can interact with it using kubectl. We use a fresh pod here rather than the
|
||||||
|
// existing Concierge pod because we need more tools than we can get from a scratch/distroless base image.
|
||||||
|
runningTestPod := testlib.CreatePod(ctx, t, "impersonation-proxy", env.ConciergeNamespace, corev1.PodSpec{Containers: []corev1.Container{{
|
||||||
|
Name: "impersonation-proxy-test",
|
||||||
|
Image: "debian:10.10-slim",
|
||||||
|
ImagePullPolicy: corev1.PullIfNotPresent,
|
||||||
|
Command: []string{"bash", "-c", `while true; do read VAR; echo "VAR: $VAR"; done`},
|
||||||
|
Stdin: true,
|
||||||
|
Resources: corev1.ResourceRequirements{
|
||||||
|
Limits: corev1.ResourceList{
|
||||||
|
corev1.ResourceMemory: resource.MustParse("16Mi"),
|
||||||
|
corev1.ResourceCPU: resource.MustParse("10m"),
|
||||||
|
},
|
||||||
|
Requests: corev1.ResourceList{
|
||||||
|
corev1.ResourceMemory: resource.MustParse("16Mi"),
|
||||||
|
corev1.ResourceCPU: resource.MustParse("10m"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}}})
|
||||||
|
|
||||||
// Try "kubectl exec" through the impersonation proxy.
|
// Try "kubectl exec" through the impersonation proxy.
|
||||||
echoString := "hello world"
|
echoString := "hello world"
|
||||||
remoteEchoFile := fmt.Sprintf("/tmp/test-impersonation-proxy-echo-file-%d.txt", time.Now().Unix())
|
remoteEchoFile := fmt.Sprintf("/tmp/test-impersonation-proxy-echo-file-%d.txt", time.Now().Unix())
|
||||||
stdout, err := runKubectl(t, kubeconfigPath, envVarsWithProxy, "exec", "--namespace", env.ConciergeNamespace, conciergePod.Name, "--", "bash", "-c", fmt.Sprintf(`echo "%s" | tee %s`, echoString, remoteEchoFile))
|
stdout, err := runKubectl(t, kubeconfigPath, envVarsWithProxy, "exec", "--namespace", runningTestPod.Namespace, runningTestPod.Name, "--", "bash", "-c", fmt.Sprintf(`echo "%s" | tee %s`, echoString, remoteEchoFile))
|
||||||
require.NoError(t, err, `"kubectl exec" failed`)
|
require.NoError(t, err, `"kubectl exec" failed`)
|
||||||
require.Equal(t, echoString+"\n", stdout)
|
require.Equal(t, echoString+"\n", stdout)
|
||||||
|
|
||||||
// run the kubectl cp command
|
// run the kubectl cp command
|
||||||
localEchoFile := filepath.Join(tempDir, filepath.Base(remoteEchoFile))
|
localEchoFile := filepath.Join(tempDir, filepath.Base(remoteEchoFile))
|
||||||
_, err = runKubectl(t, kubeconfigPath, envVarsWithProxy, "cp", fmt.Sprintf("%s/%s:%s", env.ConciergeNamespace, conciergePod.Name, remoteEchoFile), localEchoFile)
|
_, err = runKubectl(t, kubeconfigPath, envVarsWithProxy, "cp", fmt.Sprintf("%s/%s:%s", runningTestPod.Namespace, runningTestPod.Name, remoteEchoFile), localEchoFile)
|
||||||
require.NoError(t, err, `"kubectl cp" failed`)
|
require.NoError(t, err, `"kubectl cp" failed`)
|
||||||
localEchoFileData, err := ioutil.ReadFile(localEchoFile)
|
localEchoFileData, err := ioutil.ReadFile(localEchoFile)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, echoString+"\n", string(localEchoFileData))
|
require.Equal(t, echoString+"\n", string(localEchoFileData))
|
||||||
defer func() {
|
|
||||||
_, _ = runKubectl(t, kubeconfigPath, envVarsWithProxy, "exec", "--namespace", env.ConciergeNamespace, conciergePod.Name, "--", "rm", remoteEchoFile) // cleanup remote echo file
|
|
||||||
}()
|
|
||||||
|
|
||||||
// run the kubectl logs command
|
// run the kubectl logs command
|
||||||
logLinesCount := 10
|
logLinesCount := 10
|
||||||
stdout, err = runKubectl(t, kubeconfigPath, envVarsWithProxy, "logs", "--namespace", env.ConciergeNamespace, conciergePod.Name, fmt.Sprintf("--tail=%d", logLinesCount))
|
stdout, err = runKubectl(t, kubeconfigPath, envVarsWithProxy, "logs", "--namespace", conciergePod.Namespace, conciergePod.Name, fmt.Sprintf("--tail=%d", logLinesCount))
|
||||||
require.NoError(t, err, `"kubectl logs" failed`)
|
require.NoError(t, err, `"kubectl logs" failed`)
|
||||||
// Expect _approximately_ logLinesCount lines in the output
|
// Expect _approximately_ logLinesCount lines in the output
|
||||||
// (we can't match 100% exactly due to https://github.com/kubernetes/kubernetes/issues/72628).
|
// (we can't match 100% exactly due to https://github.com/kubernetes/kubernetes/issues/72628).
|
||||||
require.InDeltaf(t, logLinesCount, strings.Count(stdout, "\n"), 1, "wanted %d newlines in kubectl logs output:\n%s", logLinesCount, stdout)
|
require.InDeltaf(t, logLinesCount, strings.Count(stdout, "\n"), 1, "wanted %d newlines in kubectl logs output:\n%s", logLinesCount, stdout)
|
||||||
|
|
||||||
// run the kubectl attach command
|
// run the kubectl attach command
|
||||||
namespaceName := createTestNamespace(t, adminClient)
|
|
||||||
attachPod := testlib.CreatePod(ctx, t, "impersonation-proxy-attach", namespaceName, corev1.PodSpec{
|
|
||||||
Containers: []corev1.Container{
|
|
||||||
{
|
|
||||||
Name: "impersonation-proxy-attach",
|
|
||||||
Image: conciergePod.Spec.Containers[0].Image,
|
|
||||||
Command: []string{"bash"},
|
|
||||||
Args: []string{"-c", `while true; do read VAR; echo "VAR: $VAR"; done`},
|
|
||||||
Stdin: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
timeout, cancelFunc := context.WithTimeout(ctx, 2*time.Minute)
|
timeout, cancelFunc := context.WithTimeout(ctx, 2*time.Minute)
|
||||||
defer cancelFunc()
|
defer cancelFunc()
|
||||||
attachCmd, attachStdout, attachStderr := kubectlCommand(timeout, t, kubeconfigPath, envVarsWithProxy, "attach", "--stdin=true", "--namespace", namespaceName, attachPod.Name, "-v=10")
|
attachCmd, attachStdout, attachStderr := kubectlCommand(timeout, t, kubeconfigPath, envVarsWithProxy, "attach", "--stdin=true", "--namespace", runningTestPod.Namespace, runningTestPod.Name, "-v=10")
|
||||||
attachCmd.Env = envVarsWithProxy
|
attachCmd.Env = envVarsWithProxy
|
||||||
attachStdin, err := attachCmd.StdinPipe()
|
attachStdin, err := attachCmd.StdinPipe()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -478,7 +478,8 @@ func CreatePod(ctx context.Context, t *testing.T, name, namespace string, spec c
|
|||||||
client := NewKubernetesClientset(t)
|
client := NewKubernetesClientset(t)
|
||||||
pods := client.CoreV1().Pods(namespace)
|
pods := client.CoreV1().Pods(namespace)
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(ctx, time.Minute)
|
const podCreateTimeout = 2 * time.Minute
|
||||||
|
ctx, cancel := context.WithTimeout(ctx, podCreateTimeout+time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
created, err := pods.Create(ctx, &corev1.Pod{ObjectMeta: testObjectMeta(t, name), Spec: spec}, metav1.CreateOptions{})
|
created, err := pods.Create(ctx, &corev1.Pod{ObjectMeta: testObjectMeta(t, name), Spec: spec}, metav1.CreateOptions{})
|
||||||
@ -497,7 +498,7 @@ func CreatePod(ctx context.Context, t *testing.T, name, namespace string, spec c
|
|||||||
result, err = pods.Get(ctx, created.Name, metav1.GetOptions{})
|
result, err = pods.Get(ctx, created.Name, metav1.GetOptions{})
|
||||||
requireEventually.NoError(err)
|
requireEventually.NoError(err)
|
||||||
requireEventually.Equal(corev1.PodRunning, result.Status.Phase)
|
requireEventually.Equal(corev1.PodRunning, result.Status.Phase)
|
||||||
}, 15*time.Second, 1*time.Second, "expected the Pod to go into phase %s", corev1.PodRunning)
|
}, podCreateTimeout, 1*time.Second, "expected the Pod to go into phase %s", corev1.PodRunning)
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user