Switch to go.uber.org/zap for JSON formatted logging
Signed-off-by: Monis Khan <mok@vmware.com>
This commit is contained in:
parent
03ccef03fe
commit
0674215ef3
@ -8,22 +8,23 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
"k8s.io/klog/v2"
|
|
||||||
|
|
||||||
concierge "go.pinniped.dev/internal/concierge/server"
|
|
||||||
// this side effect import ensures that we use fipsonly crypto in fips_strict mode.
|
// this side effect import ensures that we use fipsonly crypto in fips_strict mode.
|
||||||
|
concierge "go.pinniped.dev/internal/concierge/server"
|
||||||
_ "go.pinniped.dev/internal/crypto/ptls"
|
_ "go.pinniped.dev/internal/crypto/ptls"
|
||||||
lua "go.pinniped.dev/internal/localuserauthenticator"
|
lua "go.pinniped.dev/internal/localuserauthenticator"
|
||||||
|
"go.pinniped.dev/internal/plog"
|
||||||
supervisor "go.pinniped.dev/internal/supervisor/server"
|
supervisor "go.pinniped.dev/internal/supervisor/server"
|
||||||
)
|
)
|
||||||
|
|
||||||
//nolint: gochecknoglobals // these are swapped during unit tests.
|
// nolint: gochecknoglobals // these are swapped during unit tests.
|
||||||
var (
|
var (
|
||||||
fail = klog.Fatalf
|
fail = plog.Fatal
|
||||||
subcommands = map[string]func(){
|
subcommands = map[string]func(){
|
||||||
"pinniped-concierge": concierge.Main,
|
"pinniped-concierge": concierge.Main,
|
||||||
"pinniped-supervisor": supervisor.Main,
|
"pinniped-supervisor": supervisor.Main,
|
||||||
@ -33,11 +34,11 @@ var (
|
|||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
if len(os.Args) == 0 {
|
if len(os.Args) == 0 {
|
||||||
fail("missing os.Args")
|
fail(fmt.Errorf("missing os.Args"))
|
||||||
}
|
}
|
||||||
binary := filepath.Base(os.Args[0])
|
binary := filepath.Base(os.Args[0])
|
||||||
if subcommands[binary] == nil {
|
if subcommands[binary] == nil {
|
||||||
fail("must be invoked as one of %v, not %q", sets.StringKeySet(subcommands).List(), binary)
|
fail(fmt.Errorf("must be invoked as one of %v, not %q", sets.StringKeySet(subcommands).List(), binary))
|
||||||
}
|
}
|
||||||
subcommands[binary]()
|
subcommands[binary]()
|
||||||
}
|
}
|
||||||
|
@ -43,8 +43,11 @@ func TestEntrypoint(t *testing.T) {
|
|||||||
var logBuf bytes.Buffer
|
var logBuf bytes.Buffer
|
||||||
testLog := log.New(&logBuf, "", 0)
|
testLog := log.New(&logBuf, "", 0)
|
||||||
exited := "exiting via fatal"
|
exited := "exiting via fatal"
|
||||||
fail = func(format string, v ...interface{}) {
|
fail = func(err error, keysAndValues ...interface{}) {
|
||||||
testLog.Printf(format, v...)
|
testLog.Print(err)
|
||||||
|
if len(keysAndValues) > 0 {
|
||||||
|
testLog.Print(keysAndValues...)
|
||||||
|
}
|
||||||
panic(exited)
|
panic(exited)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -11,7 +11,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
@ -19,8 +18,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/coreos/go-oidc/v3/oidc"
|
"github.com/coreos/go-oidc/v3/oidc"
|
||||||
"github.com/go-logr/logr"
|
|
||||||
"github.com/go-logr/stdr"
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
clientauthenticationv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
|
clientauthenticationv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
|
||||||
@ -34,23 +31,24 @@ import (
|
|||||||
conciergeclientset "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned"
|
conciergeclientset "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned"
|
||||||
"go.pinniped.dev/internal/groupsuffix"
|
"go.pinniped.dev/internal/groupsuffix"
|
||||||
"go.pinniped.dev/internal/net/phttp"
|
"go.pinniped.dev/internal/net/phttp"
|
||||||
|
"go.pinniped.dev/internal/plog"
|
||||||
)
|
)
|
||||||
|
|
||||||
type kubeconfigDeps struct {
|
type kubeconfigDeps struct {
|
||||||
getPathToSelf func() (string, error)
|
getPathToSelf func() (string, error)
|
||||||
getClientset getConciergeClientsetFunc
|
getClientset getConciergeClientsetFunc
|
||||||
log logr.Logger
|
log plog.MinLogger
|
||||||
}
|
}
|
||||||
|
|
||||||
func kubeconfigRealDeps() kubeconfigDeps {
|
func kubeconfigRealDeps() kubeconfigDeps {
|
||||||
return kubeconfigDeps{
|
return kubeconfigDeps{
|
||||||
getPathToSelf: os.Executable,
|
getPathToSelf: os.Executable,
|
||||||
getClientset: getRealConciergeClientset,
|
getClientset: getRealConciergeClientset,
|
||||||
log: stdr.New(log.New(os.Stderr, "", 0)),
|
log: plog.New(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//nolint: gochecknoinits
|
// nolint: gochecknoinits
|
||||||
func init() {
|
func init() {
|
||||||
getCmd.AddCommand(kubeconfigCommand(kubeconfigRealDeps()))
|
getCmd.AddCommand(kubeconfigCommand(kubeconfigRealDeps()))
|
||||||
}
|
}
|
||||||
@ -175,6 +173,11 @@ func runGetKubeconfig(ctx context.Context, out io.Writer, deps kubeconfigDeps, f
|
|||||||
ctx, cancel := context.WithTimeout(ctx, flags.timeout)
|
ctx, cancel := context.WithTimeout(ctx, flags.timeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
|
// the log statements in this file assume that Info logs are unconditionally printed so we set the global level to info
|
||||||
|
if err := plog.ValidateAndSetLogLevelAndFormatGlobally(ctx, plog.LogSpec{Level: plog.LevelInfo, Format: plog.FormatCLI}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// Validate api group suffix and immediately return an error if it is invalid.
|
// Validate api group suffix and immediately return an error if it is invalid.
|
||||||
if err := groupsuffix.Validate(flags.concierge.apiGroupSuffix); err != nil {
|
if err := groupsuffix.Validate(flags.concierge.apiGroupSuffix); err != nil {
|
||||||
return fmt.Errorf("invalid API group suffix: %w", err)
|
return fmt.Errorf("invalid API group suffix: %w", err)
|
||||||
@ -398,7 +401,7 @@ func waitForCredentialIssuer(ctx context.Context, clientset conciergeclientset.I
|
|||||||
return credentialIssuer, nil
|
return credentialIssuer, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func discoverConciergeParams(credentialIssuer *configv1alpha1.CredentialIssuer, flags *getKubeconfigParams, v1Cluster *clientcmdapi.Cluster, log logr.Logger) error {
|
func discoverConciergeParams(credentialIssuer *configv1alpha1.CredentialIssuer, flags *getKubeconfigParams, v1Cluster *clientcmdapi.Cluster, log plog.MinLogger) error {
|
||||||
// Autodiscover the --concierge-mode.
|
// Autodiscover the --concierge-mode.
|
||||||
frontend, err := getConciergeFrontend(credentialIssuer, flags.concierge.mode)
|
frontend, err := getConciergeFrontend(credentialIssuer, flags.concierge.mode)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -446,7 +449,7 @@ func discoverConciergeParams(credentialIssuer *configv1alpha1.CredentialIssuer,
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func logStrategies(credentialIssuer *configv1alpha1.CredentialIssuer, log logr.Logger) {
|
func logStrategies(credentialIssuer *configv1alpha1.CredentialIssuer, log plog.MinLogger) {
|
||||||
for _, strategy := range credentialIssuer.Status.Strategies {
|
for _, strategy := range credentialIssuer.Status.Strategies {
|
||||||
log.Info("found CredentialIssuer strategy",
|
log.Info("found CredentialIssuer strategy",
|
||||||
"type", strategy.Type,
|
"type", strategy.Type,
|
||||||
@ -457,7 +460,7 @@ func logStrategies(credentialIssuer *configv1alpha1.CredentialIssuer, log logr.L
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func discoverAuthenticatorParams(authenticator metav1.Object, flags *getKubeconfigParams, log logr.Logger) error {
|
func discoverAuthenticatorParams(authenticator metav1.Object, flags *getKubeconfigParams, log plog.MinLogger) error {
|
||||||
switch auth := authenticator.(type) {
|
switch auth := authenticator.(type) {
|
||||||
case *conciergev1alpha1.WebhookAuthenticator:
|
case *conciergev1alpha1.WebhookAuthenticator:
|
||||||
// If the --concierge-authenticator-type/--concierge-authenticator-name flags were not set explicitly, set
|
// If the --concierge-authenticator-type/--concierge-authenticator-name flags were not set explicitly, set
|
||||||
@ -556,7 +559,7 @@ func newExecKubeconfig(cluster *clientcmdapi.Cluster, execConfig *clientcmdapi.E
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func lookupCredentialIssuer(clientset conciergeclientset.Interface, name string, log logr.Logger) (*configv1alpha1.CredentialIssuer, error) {
|
func lookupCredentialIssuer(clientset conciergeclientset.Interface, name string, log plog.MinLogger) (*configv1alpha1.CredentialIssuer, error) {
|
||||||
ctx, cancelFunc := context.WithTimeout(context.Background(), time.Second*20)
|
ctx, cancelFunc := context.WithTimeout(context.Background(), time.Second*20)
|
||||||
defer cancelFunc()
|
defer cancelFunc()
|
||||||
|
|
||||||
@ -582,7 +585,7 @@ func lookupCredentialIssuer(clientset conciergeclientset.Interface, name string,
|
|||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func lookupAuthenticator(clientset conciergeclientset.Interface, authType, authName string, log logr.Logger) (metav1.Object, error) {
|
func lookupAuthenticator(clientset conciergeclientset.Interface, authType, authName string, log plog.MinLogger) (metav1.Object, error) {
|
||||||
ctx, cancelFunc := context.WithTimeout(context.Background(), time.Second*20)
|
ctx, cancelFunc := context.WithTimeout(context.Background(), time.Second*20)
|
||||||
defer cancelFunc()
|
defer cancelFunc()
|
||||||
|
|
||||||
@ -643,7 +646,7 @@ func writeConfigAsYAML(out io.Writer, config clientcmdapi.Config) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func validateKubeconfig(ctx context.Context, flags getKubeconfigParams, kubeconfig clientcmdapi.Config, log logr.Logger) error {
|
func validateKubeconfig(ctx context.Context, flags getKubeconfigParams, kubeconfig clientcmdapi.Config, log plog.MinLogger) error {
|
||||||
if flags.skipValidate {
|
if flags.skipValidate {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -706,7 +709,7 @@ func validateKubeconfig(ctx context.Context, flags getKubeconfigParams, kubeconf
|
|||||||
log.Info("validated connection to the cluster", "attempts", attempts)
|
log.Info("validated connection to the cluster", "attempts", attempts)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
log.Error(err, "could not connect to cluster, retrying...", "attempts", attempts, "remaining", time.Until(deadline).Round(time.Second).String())
|
log.Info("could not connect to cluster, retrying...", "error", err, "attempts", attempts, "remaining", time.Until(deadline).Round(time.Second).String())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -726,7 +729,7 @@ func hasPendingStrategy(credentialIssuer *configv1alpha1.CredentialIssuer) bool
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func discoverSupervisorUpstreamIDP(ctx context.Context, flags *getKubeconfigParams, log logr.Logger) error {
|
func discoverSupervisorUpstreamIDP(ctx context.Context, flags *getKubeconfigParams, log plog.MinLogger) error {
|
||||||
httpClient, err := newDiscoveryHTTPClient(flags.oidc.caBundle)
|
httpClient, err := newDiscoveryHTTPClient(flags.oidc.caBundle)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -898,7 +901,7 @@ func selectUpstreamIDPNameAndType(pinnipedIDPs []idpdiscoveryv1alpha1.PinnipedID
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func selectUpstreamIDPFlow(discoveredIDPFlows []idpdiscoveryv1alpha1.IDPFlow, selectedIDPName string, selectedIDPType idpdiscoveryv1alpha1.IDPType, specifiedFlow string, log logr.Logger) (idpdiscoveryv1alpha1.IDPFlow, error) {
|
func selectUpstreamIDPFlow(discoveredIDPFlows []idpdiscoveryv1alpha1.IDPFlow, selectedIDPName string, selectedIDPType idpdiscoveryv1alpha1.IDPType, specifiedFlow string, log plog.MinLogger) (idpdiscoveryv1alpha1.IDPFlow, error) {
|
||||||
switch {
|
switch {
|
||||||
case len(discoveredIDPFlows) == 0:
|
case len(discoveredIDPFlows) == 0:
|
||||||
// No flows listed by discovery means that we are talking to an old Supervisor from before this feature existed.
|
// No flows listed by discovery means that we are talking to an old Supervisor from before this feature existed.
|
||||||
|
@ -2889,7 +2889,7 @@ func TestGetKubeconfig(t *testing.T) {
|
|||||||
})
|
})
|
||||||
issuerEndpointPtr = &issuerEndpoint
|
issuerEndpointPtr = &issuerEndpoint
|
||||||
|
|
||||||
testLog := testlogger.NewLegacy(t) //nolint: staticcheck // old test with lots of log statements
|
testLog := testlogger.NewLegacy(t) // nolint: staticcheck // old test with lots of log statements
|
||||||
cmd := kubeconfigCommand(kubeconfigDeps{
|
cmd := kubeconfigCommand(kubeconfigDeps{
|
||||||
getPathToSelf: func() (string, error) {
|
getPathToSelf: func() (string, error) {
|
||||||
if tt.getPathToSelfErr != nil {
|
if tt.getPathToSelfErr != nil {
|
||||||
|
@ -20,7 +20,6 @@ import (
|
|||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
clientauthv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
|
clientauthv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
|
||||||
"k8s.io/klog/v2/klogr"
|
|
||||||
|
|
||||||
idpdiscoveryv1alpha1 "go.pinniped.dev/generated/latest/apis/supervisor/idpdiscovery/v1alpha1"
|
idpdiscoveryv1alpha1 "go.pinniped.dev/generated/latest/apis/supervisor/idpdiscovery/v1alpha1"
|
||||||
"go.pinniped.dev/internal/execcredcache"
|
"go.pinniped.dev/internal/execcredcache"
|
||||||
@ -33,7 +32,7 @@ import (
|
|||||||
"go.pinniped.dev/pkg/oidcclient/oidctypes"
|
"go.pinniped.dev/pkg/oidcclient/oidctypes"
|
||||||
)
|
)
|
||||||
|
|
||||||
//nolint: gochecknoinits
|
// nolint: gochecknoinits
|
||||||
func init() {
|
func init() {
|
||||||
loginCmd.AddCommand(oidcLoginCommand(oidcLoginCommandRealDeps()))
|
loginCmd.AddCommand(oidcLoginCommand(oidcLoginCommandRealDeps()))
|
||||||
}
|
}
|
||||||
@ -125,7 +124,7 @@ func oidcLoginCommand(deps oidcLoginCommandDeps) *cobra.Command {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func runOIDCLogin(cmd *cobra.Command, deps oidcLoginCommandDeps, flags oidcLoginFlags) error { //nolint:funlen
|
func runOIDCLogin(cmd *cobra.Command, deps oidcLoginCommandDeps, flags oidcLoginFlags) error { //nolint:funlen
|
||||||
pLogger, err := SetLogLevel(deps.lookupEnv)
|
pLogger, err := SetLogLevel(cmd.Context(), deps.lookupEnv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
plog.WarningErr("Received error while setting log level", err)
|
plog.WarningErr("Received error while setting log level", err)
|
||||||
}
|
}
|
||||||
@ -133,11 +132,11 @@ func runOIDCLogin(cmd *cobra.Command, deps oidcLoginCommandDeps, flags oidcLogin
|
|||||||
// Initialize the session cache.
|
// Initialize the session cache.
|
||||||
var sessionOptions []filesession.Option
|
var sessionOptions []filesession.Option
|
||||||
|
|
||||||
// If the hidden --debug-session-cache option is passed, log all the errors from the session cache with klog.
|
// If the hidden --debug-session-cache option is passed, log all the errors from the session cache.
|
||||||
if flags.debugSessionCache {
|
if flags.debugSessionCache {
|
||||||
logger := klogr.New().WithName("session")
|
logger := plog.WithName("session")
|
||||||
sessionOptions = append(sessionOptions, filesession.WithErrorReporter(func(err error) {
|
sessionOptions = append(sessionOptions, filesession.WithErrorReporter(func(err error) {
|
||||||
logger.Error(err, "error during session cache operation")
|
logger.Error("error during session cache operation", err)
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
sessionCache := filesession.New(flags.sessionCachePath, sessionOptions...)
|
sessionCache := filesession.New(flags.sessionCachePath, sessionOptions...)
|
||||||
@ -145,7 +144,7 @@ func runOIDCLogin(cmd *cobra.Command, deps oidcLoginCommandDeps, flags oidcLogin
|
|||||||
// Initialize the login handler.
|
// Initialize the login handler.
|
||||||
opts := []oidcclient.Option{
|
opts := []oidcclient.Option{
|
||||||
oidcclient.WithContext(cmd.Context()),
|
oidcclient.WithContext(cmd.Context()),
|
||||||
oidcclient.WithLogger(klogr.New()),
|
oidcclient.WithLogger(plog.Logr()), // nolint: staticcheck // old code with lots of log statements
|
||||||
oidcclient.WithScopes(flags.scopes),
|
oidcclient.WithScopes(flags.scopes),
|
||||||
oidcclient.WithSessionCache(sessionCache),
|
oidcclient.WithSessionCache(sessionCache),
|
||||||
}
|
}
|
||||||
@ -326,15 +325,15 @@ func tokenCredential(token *oidctypes.Token) *clientauthv1beta1.ExecCredential {
|
|||||||
return &cred
|
return &cred
|
||||||
}
|
}
|
||||||
|
|
||||||
func SetLogLevel(lookupEnv func(string) (string, bool)) (plog.Logger, error) {
|
func SetLogLevel(ctx context.Context, lookupEnv func(string) (string, bool)) (plog.Logger, error) {
|
||||||
debug, _ := lookupEnv("PINNIPED_DEBUG")
|
debug, _ := lookupEnv("PINNIPED_DEBUG")
|
||||||
if debug == "true" {
|
if debug == "true" {
|
||||||
err := plog.ValidateAndSetLogLevelGlobally(plog.LevelDebug)
|
err := plog.ValidateAndSetLogLevelAndFormatGlobally(ctx, plog.LogSpec{Level: plog.LevelDebug, Format: plog.FormatCLI})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
logger := plog.New("Pinniped login: ")
|
logger := plog.New().WithName("pinniped-login")
|
||||||
return logger, nil
|
return logger, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -10,18 +10,20 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
"go.uber.org/zap"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
clientauthv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
|
clientauthv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
|
||||||
"k8s.io/klog/v2"
|
clocktesting "k8s.io/utils/clock/testing"
|
||||||
|
|
||||||
"go.pinniped.dev/internal/certauthority"
|
"go.pinniped.dev/internal/certauthority"
|
||||||
"go.pinniped.dev/internal/here"
|
"go.pinniped.dev/internal/here"
|
||||||
|
"go.pinniped.dev/internal/plog"
|
||||||
"go.pinniped.dev/internal/testutil"
|
"go.pinniped.dev/internal/testutil"
|
||||||
"go.pinniped.dev/internal/testutil/testlogger"
|
|
||||||
"go.pinniped.dev/pkg/conciergeclient"
|
"go.pinniped.dev/pkg/conciergeclient"
|
||||||
"go.pinniped.dev/pkg/oidcclient"
|
"go.pinniped.dev/pkg/oidcclient"
|
||||||
"go.pinniped.dev/pkg/oidcclient/oidctypes"
|
"go.pinniped.dev/pkg/oidcclient/oidctypes"
|
||||||
@ -38,6 +40,10 @@ func TestLoginOIDCCommand(t *testing.T) {
|
|||||||
|
|
||||||
time1 := time.Date(3020, 10, 12, 13, 14, 15, 16, time.UTC)
|
time1 := time.Date(3020, 10, 12, 13, 14, 15, 16, time.UTC)
|
||||||
|
|
||||||
|
now, err := time.Parse(time.RFC3339Nano, "2028-10-11T23:37:26.953313745Z")
|
||||||
|
require.NoError(t, err)
|
||||||
|
nowStr := now.Local().Format(time.RFC1123)
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
args []string
|
args []string
|
||||||
@ -342,8 +348,8 @@ func TestLoginOIDCCommand(t *testing.T) {
|
|||||||
wantOptionsCount: 4,
|
wantOptionsCount: 4,
|
||||||
wantStdout: `{"kind":"ExecCredential","apiVersion":"client.authentication.k8s.io/v1beta1","spec":{"interactive":false},"status":{"expirationTimestamp":"3020-10-12T13:14:15Z","token":"test-id-token"}}` + "\n",
|
wantStdout: `{"kind":"ExecCredential","apiVersion":"client.authentication.k8s.io/v1beta1","spec":{"interactive":false},"status":{"expirationTimestamp":"3020-10-12T13:14:15Z","token":"test-id-token"}}` + "\n",
|
||||||
wantLogs: []string{
|
wantLogs: []string{
|
||||||
"\"level\"=0 \"msg\"=\"Pinniped login: Performing OIDC login\" \"client id\"=\"test-client-id\" \"issuer\"=\"test-issuer\"",
|
nowStr + ` pinniped-login cmd/login_oidc.go:222 Performing OIDC login {"issuer": "test-issuer", "client id": "test-client-id"}`,
|
||||||
"\"level\"=0 \"msg\"=\"Pinniped login: No concierge configured, skipping token credential exchange\"",
|
nowStr + ` pinniped-login cmd/login_oidc.go:242 No concierge configured, skipping token credential exchange`,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -372,18 +378,20 @@ func TestLoginOIDCCommand(t *testing.T) {
|
|||||||
wantOptionsCount: 11,
|
wantOptionsCount: 11,
|
||||||
wantStdout: `{"kind":"ExecCredential","apiVersion":"client.authentication.k8s.io/v1beta1","spec":{"interactive":false},"status":{"token":"exchanged-token"}}` + "\n",
|
wantStdout: `{"kind":"ExecCredential","apiVersion":"client.authentication.k8s.io/v1beta1","spec":{"interactive":false},"status":{"token":"exchanged-token"}}` + "\n",
|
||||||
wantLogs: []string{
|
wantLogs: []string{
|
||||||
"\"level\"=0 \"msg\"=\"Pinniped login: Performing OIDC login\" \"client id\"=\"test-client-id\" \"issuer\"=\"test-issuer\"",
|
nowStr + ` pinniped-login cmd/login_oidc.go:222 Performing OIDC login {"issuer": "test-issuer", "client id": "test-client-id"}`,
|
||||||
"\"level\"=0 \"msg\"=\"Pinniped login: Exchanging token for cluster credential\" \"authenticator name\"=\"test-authenticator\" \"authenticator type\"=\"webhook\" \"endpoint\"=\"https://127.0.0.1:1234/\"",
|
nowStr + ` pinniped-login cmd/login_oidc.go:232 Exchanging token for cluster credential {"endpoint": "https://127.0.0.1:1234/", "authenticator type": "webhook", "authenticator name": "test-authenticator"}`,
|
||||||
"\"level\"=0 \"msg\"=\"Pinniped login: Successfully exchanged token for cluster credential.\"",
|
nowStr + ` pinniped-login cmd/login_oidc.go:240 Successfully exchanged token for cluster credential.`,
|
||||||
"\"level\"=0 \"msg\"=\"Pinniped login: caching cluster credential for future use.\"",
|
nowStr + ` pinniped-login cmd/login_oidc.go:247 caching cluster credential for future use.`,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
tt := tt
|
tt := tt
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
testLogger := testlogger.NewLegacy(t) //nolint: staticcheck // old test with lots of log statements
|
var buf bytes.Buffer
|
||||||
klog.SetLogger(testLogger.Logger)
|
fakeClock := clocktesting.NewFakeClock(now)
|
||||||
|
ctx := plog.TestZapOverrides(context.Background(), t, &buf, nil, zap.WithClock(plog.ZapClock(fakeClock)))
|
||||||
|
|
||||||
var (
|
var (
|
||||||
gotOptions []oidcclient.Option
|
gotOptions []oidcclient.Option
|
||||||
)
|
)
|
||||||
@ -428,7 +436,7 @@ func TestLoginOIDCCommand(t *testing.T) {
|
|||||||
cmd.SetOut(&stdout)
|
cmd.SetOut(&stdout)
|
||||||
cmd.SetErr(&stderr)
|
cmd.SetErr(&stderr)
|
||||||
cmd.SetArgs(tt.args)
|
cmd.SetArgs(tt.args)
|
||||||
err := cmd.Execute()
|
err = cmd.ExecuteContext(ctx)
|
||||||
if tt.wantError {
|
if tt.wantError {
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
} else {
|
} else {
|
||||||
@ -438,7 +446,15 @@ func TestLoginOIDCCommand(t *testing.T) {
|
|||||||
require.Equal(t, tt.wantStderr, stderr.String(), "unexpected stderr")
|
require.Equal(t, tt.wantStderr, stderr.String(), "unexpected stderr")
|
||||||
require.Len(t, gotOptions, tt.wantOptionsCount)
|
require.Len(t, gotOptions, tt.wantOptionsCount)
|
||||||
|
|
||||||
require.Equal(t, tt.wantLogs, testLogger.Lines())
|
require.Equal(t, tt.wantLogs, logLines(buf.String()))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func logLines(logs string) []string {
|
||||||
|
if len(logs) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Split(strings.TrimSpace(logs), "\n")
|
||||||
|
}
|
||||||
|
@ -7,7 +7,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"time"
|
"time"
|
||||||
@ -22,7 +21,7 @@ import (
|
|||||||
"go.pinniped.dev/pkg/oidcclient/oidctypes"
|
"go.pinniped.dev/pkg/oidcclient/oidctypes"
|
||||||
)
|
)
|
||||||
|
|
||||||
//nolint: gochecknoinits
|
// nolint: gochecknoinits
|
||||||
func init() {
|
func init() {
|
||||||
loginCmd.AddCommand(staticLoginCommand(staticLoginRealDeps()))
|
loginCmd.AddCommand(staticLoginCommand(staticLoginRealDeps()))
|
||||||
}
|
}
|
||||||
@ -75,7 +74,7 @@ func staticLoginCommand(deps staticLoginDeps) *cobra.Command {
|
|||||||
cmd.Flags().StringVar(&flags.conciergeAPIGroupSuffix, "concierge-api-group-suffix", groupsuffix.PinnipedDefaultSuffix, "Concierge API group suffix")
|
cmd.Flags().StringVar(&flags.conciergeAPIGroupSuffix, "concierge-api-group-suffix", groupsuffix.PinnipedDefaultSuffix, "Concierge API group suffix")
|
||||||
cmd.Flags().StringVar(&flags.credentialCachePath, "credential-cache", filepath.Join(mustGetConfigDir(), "credentials.yaml"), "Path to cluster-specific credentials cache (\"\" disables the cache)")
|
cmd.Flags().StringVar(&flags.credentialCachePath, "credential-cache", filepath.Join(mustGetConfigDir(), "credentials.yaml"), "Path to cluster-specific credentials cache (\"\" disables the cache)")
|
||||||
|
|
||||||
cmd.RunE = func(cmd *cobra.Command, args []string) error { return runStaticLogin(cmd.OutOrStdout(), deps, flags) }
|
cmd.RunE = func(cmd *cobra.Command, args []string) error { return runStaticLogin(cmd, deps, flags) }
|
||||||
|
|
||||||
mustMarkDeprecated(cmd, "concierge-namespace", "not needed anymore")
|
mustMarkDeprecated(cmd, "concierge-namespace", "not needed anymore")
|
||||||
mustMarkHidden(cmd, "concierge-namespace")
|
mustMarkHidden(cmd, "concierge-namespace")
|
||||||
@ -83,8 +82,9 @@ func staticLoginCommand(deps staticLoginDeps) *cobra.Command {
|
|||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
func runStaticLogin(out io.Writer, deps staticLoginDeps, flags staticLoginParams) error {
|
func runStaticLogin(cmd *cobra.Command, deps staticLoginDeps, flags staticLoginParams) error {
|
||||||
pLogger, err := SetLogLevel(deps.lookupEnv)
|
out := cmd.OutOrStdout()
|
||||||
|
pLogger, err := SetLogLevel(cmd.Context(), deps.lookupEnv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
plog.WarningErr("Received error while setting log level", err)
|
plog.WarningErr("Received error while setting log level", err)
|
||||||
}
|
}
|
||||||
|
@ -12,16 +12,15 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/klog/v2"
|
|
||||||
|
|
||||||
"go.pinniped.dev/internal/testutil/testlogger"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
"go.uber.org/zap"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
clientauthv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
|
clientauthv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
|
||||||
|
clocktesting "k8s.io/utils/clock/testing"
|
||||||
|
|
||||||
"go.pinniped.dev/internal/certauthority"
|
"go.pinniped.dev/internal/certauthority"
|
||||||
"go.pinniped.dev/internal/here"
|
"go.pinniped.dev/internal/here"
|
||||||
|
"go.pinniped.dev/internal/plog"
|
||||||
"go.pinniped.dev/internal/testutil"
|
"go.pinniped.dev/internal/testutil"
|
||||||
"go.pinniped.dev/pkg/conciergeclient"
|
"go.pinniped.dev/pkg/conciergeclient"
|
||||||
)
|
)
|
||||||
@ -35,6 +34,10 @@ func TestLoginStaticCommand(t *testing.T) {
|
|||||||
testCABundlePath := filepath.Join(tmpdir, "testca.pem")
|
testCABundlePath := filepath.Join(tmpdir, "testca.pem")
|
||||||
require.NoError(t, ioutil.WriteFile(testCABundlePath, testCA.Bundle(), 0600))
|
require.NoError(t, ioutil.WriteFile(testCABundlePath, testCA.Bundle(), 0600))
|
||||||
|
|
||||||
|
now, err := time.Parse(time.RFC3339Nano, "2038-12-07T23:37:26.953313745Z")
|
||||||
|
require.NoError(t, err)
|
||||||
|
nowStr := now.Local().Format(time.RFC1123)
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
args []string
|
args []string
|
||||||
@ -136,7 +139,9 @@ func TestLoginStaticCommand(t *testing.T) {
|
|||||||
wantStderr: here.Doc(`
|
wantStderr: here.Doc(`
|
||||||
Error: could not complete Concierge credential exchange: some concierge error
|
Error: could not complete Concierge credential exchange: some concierge error
|
||||||
`),
|
`),
|
||||||
wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped login: exchanging static token for cluster credential\" \"authenticator name\"=\"test-authenticator\" \"authenticator type\"=\"webhook\" \"endpoint\"=\"https://127.0.0.1/\""},
|
wantLogs: []string{
|
||||||
|
nowStr + ` pinniped-login cmd/login_static.go:147 exchanging static token for cluster credential {"endpoint": "https://127.0.0.1/", "authenticator type": "webhook", "authenticator name": "test-authenticator"}`,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "invalid API group suffix",
|
name: "invalid API group suffix",
|
||||||
@ -165,8 +170,10 @@ func TestLoginStaticCommand(t *testing.T) {
|
|||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
tt := tt
|
tt := tt
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
testLogger := testlogger.NewLegacy(t) //nolint: staticcheck // old test with lots of log statements
|
var buf bytes.Buffer
|
||||||
klog.SetLogger(testLogger.Logger)
|
fakeClock := clocktesting.NewFakeClock(now)
|
||||||
|
ctx := plog.TestZapOverrides(context.Background(), t, &buf, nil, zap.WithClock(plog.ZapClock(fakeClock)))
|
||||||
|
|
||||||
cmd := staticLoginCommand(staticLoginDeps{
|
cmd := staticLoginCommand(staticLoginDeps{
|
||||||
lookupEnv: func(s string) (string, bool) {
|
lookupEnv: func(s string) (string, bool) {
|
||||||
v, ok := tt.env[s]
|
v, ok := tt.env[s]
|
||||||
@ -194,7 +201,7 @@ func TestLoginStaticCommand(t *testing.T) {
|
|||||||
cmd.SetOut(&stdout)
|
cmd.SetOut(&stdout)
|
||||||
cmd.SetErr(&stderr)
|
cmd.SetErr(&stderr)
|
||||||
cmd.SetArgs(tt.args)
|
cmd.SetArgs(tt.args)
|
||||||
err := cmd.Execute()
|
err := cmd.ExecuteContext(ctx)
|
||||||
if tt.wantError {
|
if tt.wantError {
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
} else {
|
} else {
|
||||||
@ -203,7 +210,7 @@ func TestLoginStaticCommand(t *testing.T) {
|
|||||||
require.Equal(t, tt.wantStdout, stdout.String(), "unexpected stdout")
|
require.Equal(t, tt.wantStdout, stdout.String(), "unexpected stdout")
|
||||||
require.Equal(t, tt.wantStderr, stderr.String(), "unexpected stderr")
|
require.Equal(t, tt.wantStderr, stderr.String(), "unexpected stderr")
|
||||||
|
|
||||||
require.Equal(t, tt.wantLogs, testLogger.Lines())
|
require.Equal(t, tt.wantLogs, logLines(buf.String()))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -4,12 +4,14 @@
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"context"
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
|
"go.pinniped.dev/internal/plog"
|
||||||
)
|
)
|
||||||
|
|
||||||
//nolint: gochecknoglobals
|
// nolint: gochecknoglobals
|
||||||
var rootCmd = &cobra.Command{
|
var rootCmd = &cobra.Command{
|
||||||
Use: "pinniped",
|
Use: "pinniped",
|
||||||
Short: "pinniped",
|
Short: "pinniped",
|
||||||
@ -19,8 +21,11 @@ var rootCmd = &cobra.Command{
|
|||||||
|
|
||||||
// Execute adds all child commands to the root command and sets flags appropriately.
|
// Execute adds all child commands to the root command and sets flags appropriately.
|
||||||
// This is called by main.main(). It only needs to happen once to the rootCmd.
|
// This is called by main.main(). It only needs to happen once to the rootCmd.
|
||||||
func Execute() {
|
func Execute() error {
|
||||||
if err := rootCmd.Execute(); err != nil {
|
defer plog.Setup()()
|
||||||
os.Exit(1)
|
// the context does not matter here because it is unused when CLI formatting is provided
|
||||||
|
if err := plog.ValidateAndSetLogLevelAndFormatGlobally(context.Background(), plog.LogSpec{Format: plog.FormatCLI}); err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
return rootCmd.Execute()
|
||||||
}
|
}
|
||||||
|
@ -13,7 +13,7 @@ import (
|
|||||||
_ "go.pinniped.dev/internal/crypto/ptls"
|
_ "go.pinniped.dev/internal/crypto/ptls"
|
||||||
)
|
)
|
||||||
|
|
||||||
//nolint: gochecknoinits
|
// nolint: gochecknoinits
|
||||||
func init() {
|
func init() {
|
||||||
// browsers like chrome like to write to our std out which breaks our JSON ExecCredential output
|
// browsers like chrome like to write to our std out which breaks our JSON ExecCredential output
|
||||||
// thus we redirect the browser's std out to our std err
|
// thus we redirect the browser's std out to our std err
|
||||||
@ -21,5 +21,7 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
cmd.Execute()
|
if err := cmd.Execute(); err != nil {
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -86,8 +86,14 @@ data:
|
|||||||
imagePullSecrets:
|
imagePullSecrets:
|
||||||
- image-pull-secret
|
- image-pull-secret
|
||||||
(@ end @)
|
(@ end @)
|
||||||
(@ if data.values.log_level: @)
|
(@ if data.values.log_level or data.values.deprecated_log_format: @)
|
||||||
logLevel: (@= getAndValidateLogLevel() @)
|
log:
|
||||||
|
(@ if data.values.log_level: @)
|
||||||
|
level: (@= getAndValidateLogLevel() @)
|
||||||
|
(@ end @)
|
||||||
|
(@ if data.values.deprecated_log_format: @)
|
||||||
|
format: (@= data.values.deprecated_log_format @)
|
||||||
|
(@ end @)
|
||||||
(@ end @)
|
(@ end @)
|
||||||
---
|
---
|
||||||
#@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "":
|
#@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "":
|
||||||
|
@ -54,6 +54,10 @@ api_serving_certificate_renew_before_seconds: 2160000
|
|||||||
#! Specify the verbosity of logging: info ("nice to know" information), debug (developer
|
#! Specify the verbosity of logging: info ("nice to know" information), debug (developer
|
||||||
#! information), trace (timing information), all (kitchen sink).
|
#! information), trace (timing information), all (kitchen sink).
|
||||||
log_level: #! By default, when this value is left unset, only warnings and errors are printed. There is no way to suppress warning and error logs.
|
log_level: #! By default, when this value is left unset, only warnings and errors are printed. There is no way to suppress warning and error logs.
|
||||||
|
#! Specify the format of logging: json (for machine parsable logs) and text (for legacy klog formatted logs).
|
||||||
|
#! By default, when this value is left unset, logs are formatted in json.
|
||||||
|
#! This configuration is deprecated and will be removed in a future release at which point logs will always be formatted as json.
|
||||||
|
deprecated_log_format:
|
||||||
|
|
||||||
run_as_user: 65532 #! run_as_user specifies the user ID that will own the process, see the Dockerfile for the reasoning behind this choice
|
run_as_user: 65532 #! run_as_user specifies the user ID that will own the process, see the Dockerfile for the reasoning behind this choice
|
||||||
run_as_group: 65532 #! run_as_group specifies the group ID that will own the process, see the Dockerfile for the reasoning behind this choice
|
run_as_group: 65532 #! run_as_group specifies the group ID that will own the process, see the Dockerfile for the reasoning behind this choice
|
||||||
|
@ -54,8 +54,14 @@ _: #@ template.replace(data.values.custom_labels)
|
|||||||
#@ "labels": labels(),
|
#@ "labels": labels(),
|
||||||
#@ "insecureAcceptExternalUnencryptedHttpRequests": data.values.deprecated_insecure_accept_external_unencrypted_http_requests
|
#@ "insecureAcceptExternalUnencryptedHttpRequests": data.values.deprecated_insecure_accept_external_unencrypted_http_requests
|
||||||
#@ }
|
#@ }
|
||||||
|
#@ if data.values.log_level or data.values.deprecated_log_format:
|
||||||
|
#@ config["log"] = {}
|
||||||
|
#@ end
|
||||||
#@ if data.values.log_level:
|
#@ if data.values.log_level:
|
||||||
#@ config["logLevel"] = getAndValidateLogLevel()
|
#@ config["log"]["level"] = getAndValidateLogLevel()
|
||||||
|
#@ end
|
||||||
|
#@ if data.values.deprecated_log_format:
|
||||||
|
#@ config["log"]["format"] = data.values.deprecated_log_format
|
||||||
#@ end
|
#@ end
|
||||||
#@ if data.values.endpoints:
|
#@ if data.values.endpoints:
|
||||||
#@ config["endpoints"] = data.values.endpoints
|
#@ config["endpoints"] = data.values.endpoints
|
||||||
|
@ -57,6 +57,10 @@ service_loadbalancer_ip: #! e.g. 1.2.3.4
|
|||||||
#! Specify the verbosity of logging: info ("nice to know" information), debug (developer information), trace (timing information),
|
#! Specify the verbosity of logging: info ("nice to know" information), debug (developer information), trace (timing information),
|
||||||
#! or all (kitchen sink). Do not use trace or all on production systems, as credentials may get logged.
|
#! or all (kitchen sink). Do not use trace or all on production systems, as credentials may get logged.
|
||||||
log_level: #! By default, when this value is left unset, only warnings and errors are printed. There is no way to suppress warning and error logs.
|
log_level: #! By default, when this value is left unset, only warnings and errors are printed. There is no way to suppress warning and error logs.
|
||||||
|
#! Specify the format of logging: json (for machine parsable logs) and text (for legacy klog formatted logs).
|
||||||
|
#! By default, when this value is left unset, logs are formatted in json.
|
||||||
|
#! This configuration is deprecated and will be removed in a future release at which point logs will always be formatted as json.
|
||||||
|
deprecated_log_format:
|
||||||
|
|
||||||
run_as_user: 65532 #! run_as_user specifies the user ID that will own the process, see the Dockerfile for the reasoning behind this choice
|
run_as_user: 65532 #! run_as_user specifies the user ID that will own the process, see the Dockerfile for the reasoning behind this choice
|
||||||
run_as_group: 65532 #! run_as_group specifies the group ID that will own the process, see the Dockerfile for the reasoning behind this choice
|
run_as_group: 65532 #! run_as_group specifies the group ID that will own the process, see the Dockerfile for the reasoning behind this choice
|
||||||
|
3
go.mod
3
go.mod
@ -45,6 +45,7 @@ require (
|
|||||||
github.com/go-ldap/ldap/v3 v3.4.3
|
github.com/go-ldap/ldap/v3 v3.4.3
|
||||||
github.com/go-logr/logr v1.2.3
|
github.com/go-logr/logr v1.2.3
|
||||||
github.com/go-logr/stdr v1.2.2
|
github.com/go-logr/stdr v1.2.2
|
||||||
|
github.com/go-logr/zapr v1.2.3
|
||||||
github.com/gofrs/flock v0.8.1
|
github.com/gofrs/flock v0.8.1
|
||||||
github.com/golang/mock v1.6.0
|
github.com/golang/mock v1.6.0
|
||||||
github.com/google/go-cmp v0.5.8
|
github.com/google/go-cmp v0.5.8
|
||||||
@ -64,6 +65,7 @@ require (
|
|||||||
github.com/stretchr/testify v1.7.1
|
github.com/stretchr/testify v1.7.1
|
||||||
github.com/tdewolff/minify/v2 v2.11.2
|
github.com/tdewolff/minify/v2 v2.11.2
|
||||||
go.uber.org/atomic v1.9.0
|
go.uber.org/atomic v1.9.0
|
||||||
|
go.uber.org/zap v1.21.0
|
||||||
golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f
|
golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f
|
||||||
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4
|
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4
|
||||||
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5
|
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5
|
||||||
@ -168,7 +170,6 @@ require (
|
|||||||
go.opentelemetry.io/otel/trace v1.6.3 // indirect
|
go.opentelemetry.io/otel/trace v1.6.3 // indirect
|
||||||
go.opentelemetry.io/proto/otlp v0.15.0 // indirect
|
go.opentelemetry.io/proto/otlp v0.15.0 // indirect
|
||||||
go.uber.org/multierr v1.8.0 // indirect
|
go.uber.org/multierr v1.8.0 // indirect
|
||||||
go.uber.org/zap v1.21.0 // indirect
|
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect
|
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect
|
||||||
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 // indirect
|
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 // indirect
|
||||||
golang.org/x/time v0.0.0-20220411224347-583f2d630306 // indirect
|
golang.org/x/time v0.0.0-20220411224347-583f2d630306 // indirect
|
||||||
|
2
go.sum
2
go.sum
@ -299,6 +299,8 @@ github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbV
|
|||||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||||
github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro=
|
github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro=
|
||||||
|
github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A=
|
||||||
|
github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4=
|
||||||
github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI=
|
github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI=
|
||||||
github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
|
github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
|
||||||
github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
|
github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
|
||||||
|
@ -15,12 +15,11 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||||
|
apimachineryversion "k8s.io/apimachinery/pkg/version"
|
||||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||||
genericoptions "k8s.io/apiserver/pkg/server/options"
|
genericoptions "k8s.io/apiserver/pkg/server/options"
|
||||||
"k8s.io/client-go/pkg/version"
|
"k8s.io/client-go/pkg/version"
|
||||||
"k8s.io/client-go/rest"
|
"k8s.io/client-go/rest"
|
||||||
"k8s.io/component-base/logs"
|
|
||||||
"k8s.io/klog/v2"
|
|
||||||
|
|
||||||
"go.pinniped.dev/internal/certauthority/dynamiccertauthority"
|
"go.pinniped.dev/internal/certauthority/dynamiccertauthority"
|
||||||
"go.pinniped.dev/internal/concierge/apiserver"
|
"go.pinniped.dev/internal/concierge/apiserver"
|
||||||
@ -35,6 +34,7 @@ import (
|
|||||||
"go.pinniped.dev/internal/here"
|
"go.pinniped.dev/internal/here"
|
||||||
"go.pinniped.dev/internal/issuer"
|
"go.pinniped.dev/internal/issuer"
|
||||||
"go.pinniped.dev/internal/kubeclient"
|
"go.pinniped.dev/internal/kubeclient"
|
||||||
|
"go.pinniped.dev/internal/plog"
|
||||||
"go.pinniped.dev/internal/registry/credentialrequest"
|
"go.pinniped.dev/internal/registry/credentialrequest"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -100,7 +100,7 @@ func addCommandlineFlagsToCommand(cmd *cobra.Command, app *App) {
|
|||||||
// Boot the aggregated API server, which will in turn boot the controllers.
|
// Boot the aggregated API server, which will in turn boot the controllers.
|
||||||
func (a *App) runServer(ctx context.Context) error {
|
func (a *App) runServer(ctx context.Context) error {
|
||||||
// Read the server config file.
|
// Read the server config file.
|
||||||
cfg, err := concierge.FromPath(a.configPath)
|
cfg, err := concierge.FromPath(ctx, a.configPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not load config: %w", err)
|
return fmt.Errorf("could not load config: %w", err)
|
||||||
}
|
}
|
||||||
@ -250,16 +250,20 @@ func getAggregatedAPIServerConfig(
|
|||||||
return apiServerConfig, nil
|
return apiServerConfig, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() error { // return an error instead of klog.Fatal to allow defer statements to run
|
func main() error { // return an error instead of plog.Fatal to allow defer statements to run
|
||||||
logs.InitLogs()
|
defer plog.Setup()()
|
||||||
defer logs.FlushLogs()
|
|
||||||
|
|
||||||
// Dump out the time since compile (mostly useful for benchmarking our local development cycle latency).
|
// Dump out the time since compile (mostly useful for benchmarking our local development cycle latency).
|
||||||
var timeSinceCompile time.Duration
|
var timeSinceCompile time.Duration
|
||||||
if buildDate, err := time.Parse(time.RFC3339, version.Get().BuildDate); err == nil {
|
if buildDate, err := time.Parse(time.RFC3339, version.Get().BuildDate); err == nil {
|
||||||
timeSinceCompile = time.Since(buildDate).Round(time.Second)
|
timeSinceCompile = time.Since(buildDate).Round(time.Second)
|
||||||
}
|
}
|
||||||
klog.Infof("Running %s at %#v (%s since build)", rest.DefaultKubernetesUserAgent(), version.Get(), timeSinceCompile)
|
|
||||||
|
plog.Always("Running concierge",
|
||||||
|
"user-agent", rest.DefaultKubernetesUserAgent(),
|
||||||
|
"version", versionInfo(version.Get()),
|
||||||
|
"time-since-build", timeSinceCompile,
|
||||||
|
)
|
||||||
|
|
||||||
ctx := genericapiserver.SetupSignalContext()
|
ctx := genericapiserver.SetupSignalContext()
|
||||||
|
|
||||||
@ -268,6 +272,8 @@ func main() error { // return an error instead of klog.Fatal to allow defer stat
|
|||||||
|
|
||||||
func Main() {
|
func Main() {
|
||||||
if err := main(); err != nil {
|
if err := main(); err != nil {
|
||||||
klog.Fatal(err)
|
plog.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type versionInfo apimachineryversion.Info // hide .String() method from plog
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
package concierge
|
package concierge
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"strings"
|
"strings"
|
||||||
@ -41,7 +42,7 @@ const (
|
|||||||
// Note! The Config file should contain base64-encoded WebhookCABundle data.
|
// Note! The Config file should contain base64-encoded WebhookCABundle data.
|
||||||
// This function will decode that base64-encoded data to PEM bytes to be stored
|
// This function will decode that base64-encoded data to PEM bytes to be stored
|
||||||
// in the Config.
|
// in the Config.
|
||||||
func FromPath(path string) (*Config, error) {
|
func FromPath(ctx context.Context, path string) (*Config, error) {
|
||||||
data, err := ioutil.ReadFile(path)
|
data, err := ioutil.ReadFile(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("read file: %w", err)
|
return nil, fmt.Errorf("read file: %w", err)
|
||||||
@ -78,7 +79,8 @@ func FromPath(path string) (*Config, error) {
|
|||||||
return nil, fmt.Errorf("validate names: %w", err)
|
return nil, fmt.Errorf("validate names: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := plog.ValidateAndSetLogLevelGlobally(config.LogLevel); err != nil {
|
plog.MaybeSetDeprecatedLogLevel(config.LogLevel, &config.Log)
|
||||||
|
if err := plog.ValidateAndSetLogLevelAndFormatGlobally(ctx, config.Log); err != nil {
|
||||||
return nil, fmt.Errorf("validate log level: %w", err)
|
return nil, fmt.Errorf("validate log level: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4,6 +4,7 @@
|
|||||||
package concierge
|
package concierge
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
@ -90,9 +91,186 @@ func TestFromPath(t *testing.T) {
|
|||||||
Image: pointer.StringPtr("kube-cert-agent-image"),
|
Image: pointer.StringPtr("kube-cert-agent-image"),
|
||||||
ImagePullSecrets: []string{"kube-cert-agent-image-pull-secret"},
|
ImagePullSecrets: []string{"kube-cert-agent-image-pull-secret"},
|
||||||
},
|
},
|
||||||
LogLevel: plog.LevelDebug,
|
LogLevel: func(level plog.LogLevel) *plog.LogLevel { return &level }(plog.LevelDebug),
|
||||||
|
Log: plog.LogSpec{
|
||||||
|
Level: plog.LevelDebug,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "Fully filled out new log struct",
|
||||||
|
yaml: here.Doc(`
|
||||||
|
---
|
||||||
|
discovery:
|
||||||
|
url: https://some.discovery/url
|
||||||
|
api:
|
||||||
|
servingCertificate:
|
||||||
|
durationSeconds: 3600
|
||||||
|
renewBeforeSeconds: 2400
|
||||||
|
apiGroupSuffix: some.suffix.com
|
||||||
|
aggregatedAPIServerPort: 12345
|
||||||
|
impersonationProxyServerPort: 4242
|
||||||
|
names:
|
||||||
|
servingCertificateSecret: pinniped-concierge-api-tls-serving-certificate
|
||||||
|
credentialIssuer: pinniped-config
|
||||||
|
apiService: pinniped-api
|
||||||
|
kubeCertAgentPrefix: kube-cert-agent-prefix
|
||||||
|
impersonationLoadBalancerService: impersonationLoadBalancerService-value
|
||||||
|
impersonationClusterIPService: impersonationClusterIPService-value
|
||||||
|
impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value
|
||||||
|
impersonationCACertificateSecret: impersonationCACertificateSecret-value
|
||||||
|
impersonationSignerSecret: impersonationSignerSecret-value
|
||||||
|
impersonationSignerSecret: impersonationSignerSecret-value
|
||||||
|
agentServiceAccount: agentServiceAccount-value
|
||||||
|
extraName: extraName-value
|
||||||
|
labels:
|
||||||
|
myLabelKey1: myLabelValue1
|
||||||
|
myLabelKey2: myLabelValue2
|
||||||
|
kubeCertAgent:
|
||||||
|
namePrefix: kube-cert-agent-name-prefix-
|
||||||
|
image: kube-cert-agent-image
|
||||||
|
imagePullSecrets: [kube-cert-agent-image-pull-secret]
|
||||||
|
log:
|
||||||
|
level: all
|
||||||
|
format: json
|
||||||
|
`),
|
||||||
|
wantConfig: &Config{
|
||||||
|
DiscoveryInfo: DiscoveryInfoSpec{
|
||||||
|
URL: pointer.StringPtr("https://some.discovery/url"),
|
||||||
|
},
|
||||||
|
APIConfig: APIConfigSpec{
|
||||||
|
ServingCertificateConfig: ServingCertificateConfigSpec{
|
||||||
|
DurationSeconds: pointer.Int64Ptr(3600),
|
||||||
|
RenewBeforeSeconds: pointer.Int64Ptr(2400),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
APIGroupSuffix: pointer.StringPtr("some.suffix.com"),
|
||||||
|
AggregatedAPIServerPort: pointer.Int64Ptr(12345),
|
||||||
|
ImpersonationProxyServerPort: pointer.Int64Ptr(4242),
|
||||||
|
NamesConfig: NamesConfigSpec{
|
||||||
|
ServingCertificateSecret: "pinniped-concierge-api-tls-serving-certificate",
|
||||||
|
CredentialIssuer: "pinniped-config",
|
||||||
|
APIService: "pinniped-api",
|
||||||
|
ImpersonationLoadBalancerService: "impersonationLoadBalancerService-value",
|
||||||
|
ImpersonationClusterIPService: "impersonationClusterIPService-value",
|
||||||
|
ImpersonationTLSCertificateSecret: "impersonationTLSCertificateSecret-value",
|
||||||
|
ImpersonationCACertificateSecret: "impersonationCACertificateSecret-value",
|
||||||
|
ImpersonationSignerSecret: "impersonationSignerSecret-value",
|
||||||
|
AgentServiceAccount: "agentServiceAccount-value",
|
||||||
|
},
|
||||||
|
Labels: map[string]string{
|
||||||
|
"myLabelKey1": "myLabelValue1",
|
||||||
|
"myLabelKey2": "myLabelValue2",
|
||||||
|
},
|
||||||
|
KubeCertAgentConfig: KubeCertAgentSpec{
|
||||||
|
NamePrefix: pointer.StringPtr("kube-cert-agent-name-prefix-"),
|
||||||
|
Image: pointer.StringPtr("kube-cert-agent-image"),
|
||||||
|
ImagePullSecrets: []string{"kube-cert-agent-image-pull-secret"},
|
||||||
|
},
|
||||||
|
Log: plog.LogSpec{
|
||||||
|
Level: plog.LevelAll,
|
||||||
|
Format: plog.FormatJSON,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Fully filled out old log and new log struct",
|
||||||
|
yaml: here.Doc(`
|
||||||
|
---
|
||||||
|
discovery:
|
||||||
|
url: https://some.discovery/url
|
||||||
|
api:
|
||||||
|
servingCertificate:
|
||||||
|
durationSeconds: 3600
|
||||||
|
renewBeforeSeconds: 2400
|
||||||
|
apiGroupSuffix: some.suffix.com
|
||||||
|
aggregatedAPIServerPort: 12345
|
||||||
|
impersonationProxyServerPort: 4242
|
||||||
|
names:
|
||||||
|
servingCertificateSecret: pinniped-concierge-api-tls-serving-certificate
|
||||||
|
credentialIssuer: pinniped-config
|
||||||
|
apiService: pinniped-api
|
||||||
|
kubeCertAgentPrefix: kube-cert-agent-prefix
|
||||||
|
impersonationLoadBalancerService: impersonationLoadBalancerService-value
|
||||||
|
impersonationClusterIPService: impersonationClusterIPService-value
|
||||||
|
impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value
|
||||||
|
impersonationCACertificateSecret: impersonationCACertificateSecret-value
|
||||||
|
impersonationSignerSecret: impersonationSignerSecret-value
|
||||||
|
impersonationSignerSecret: impersonationSignerSecret-value
|
||||||
|
agentServiceAccount: agentServiceAccount-value
|
||||||
|
extraName: extraName-value
|
||||||
|
labels:
|
||||||
|
myLabelKey1: myLabelValue1
|
||||||
|
myLabelKey2: myLabelValue2
|
||||||
|
kubeCertAgent:
|
||||||
|
namePrefix: kube-cert-agent-name-prefix-
|
||||||
|
image: kube-cert-agent-image
|
||||||
|
imagePullSecrets: [kube-cert-agent-image-pull-secret]
|
||||||
|
logLevel: debug
|
||||||
|
log:
|
||||||
|
level: all
|
||||||
|
format: json
|
||||||
|
`),
|
||||||
|
wantConfig: &Config{
|
||||||
|
DiscoveryInfo: DiscoveryInfoSpec{
|
||||||
|
URL: pointer.StringPtr("https://some.discovery/url"),
|
||||||
|
},
|
||||||
|
APIConfig: APIConfigSpec{
|
||||||
|
ServingCertificateConfig: ServingCertificateConfigSpec{
|
||||||
|
DurationSeconds: pointer.Int64Ptr(3600),
|
||||||
|
RenewBeforeSeconds: pointer.Int64Ptr(2400),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
APIGroupSuffix: pointer.StringPtr("some.suffix.com"),
|
||||||
|
AggregatedAPIServerPort: pointer.Int64Ptr(12345),
|
||||||
|
ImpersonationProxyServerPort: pointer.Int64Ptr(4242),
|
||||||
|
NamesConfig: NamesConfigSpec{
|
||||||
|
ServingCertificateSecret: "pinniped-concierge-api-tls-serving-certificate",
|
||||||
|
CredentialIssuer: "pinniped-config",
|
||||||
|
APIService: "pinniped-api",
|
||||||
|
ImpersonationLoadBalancerService: "impersonationLoadBalancerService-value",
|
||||||
|
ImpersonationClusterIPService: "impersonationClusterIPService-value",
|
||||||
|
ImpersonationTLSCertificateSecret: "impersonationTLSCertificateSecret-value",
|
||||||
|
ImpersonationCACertificateSecret: "impersonationCACertificateSecret-value",
|
||||||
|
ImpersonationSignerSecret: "impersonationSignerSecret-value",
|
||||||
|
AgentServiceAccount: "agentServiceAccount-value",
|
||||||
|
},
|
||||||
|
Labels: map[string]string{
|
||||||
|
"myLabelKey1": "myLabelValue1",
|
||||||
|
"myLabelKey2": "myLabelValue2",
|
||||||
|
},
|
||||||
|
KubeCertAgentConfig: KubeCertAgentSpec{
|
||||||
|
NamePrefix: pointer.StringPtr("kube-cert-agent-name-prefix-"),
|
||||||
|
Image: pointer.StringPtr("kube-cert-agent-image"),
|
||||||
|
ImagePullSecrets: []string{"kube-cert-agent-image-pull-secret"},
|
||||||
|
},
|
||||||
|
LogLevel: func(level plog.LogLevel) *plog.LogLevel { return &level }(plog.LevelDebug),
|
||||||
|
Log: plog.LogSpec{
|
||||||
|
Level: plog.LevelDebug,
|
||||||
|
Format: plog.FormatJSON,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid log format",
|
||||||
|
yaml: here.Doc(`
|
||||||
|
---
|
||||||
|
names:
|
||||||
|
servingCertificateSecret: pinniped-concierge-api-tls-serving-certificate
|
||||||
|
credentialIssuer: pinniped-config
|
||||||
|
apiService: pinniped-api
|
||||||
|
impersonationLoadBalancerService: impersonationLoadBalancerService-value
|
||||||
|
impersonationClusterIPService: impersonationClusterIPService-value
|
||||||
|
impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value
|
||||||
|
impersonationCACertificateSecret: impersonationCACertificateSecret-value
|
||||||
|
impersonationSignerSecret: impersonationSignerSecret-value
|
||||||
|
agentServiceAccount: agentServiceAccount-value
|
||||||
|
log:
|
||||||
|
level: all
|
||||||
|
format: snorlax
|
||||||
|
`),
|
||||||
|
wantError: "decode yaml: error unmarshaling JSON: while decoding JSON: invalid log format, valid choices are the empty string, json and text",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "When only the required fields are present, causes other fields to be defaulted",
|
name: "When only the required fields are present, causes other fields to be defaulted",
|
||||||
yaml: here.Doc(`
|
yaml: here.Doc(`
|
||||||
@ -404,6 +582,8 @@ func TestFromPath(t *testing.T) {
|
|||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
test := test
|
test := test
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
// this is a serial test because it sets the global logger
|
||||||
|
|
||||||
// Write yaml to temp file
|
// Write yaml to temp file
|
||||||
f, err := ioutil.TempFile("", "pinniped-test-config-yaml-*")
|
f, err := ioutil.TempFile("", "pinniped-test-config-yaml-*")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -417,7 +597,9 @@ func TestFromPath(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Test FromPath()
|
// Test FromPath()
|
||||||
config, err := FromPath(f.Name())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
t.Cleanup(cancel)
|
||||||
|
config, err := FromPath(ctx, f.Name())
|
||||||
|
|
||||||
if test.wantError != "" {
|
if test.wantError != "" {
|
||||||
require.EqualError(t, err, test.wantError)
|
require.EqualError(t, err, test.wantError)
|
||||||
|
@ -15,7 +15,9 @@ type Config struct {
|
|||||||
NamesConfig NamesConfigSpec `json:"names"`
|
NamesConfig NamesConfigSpec `json:"names"`
|
||||||
KubeCertAgentConfig KubeCertAgentSpec `json:"kubeCertAgent"`
|
KubeCertAgentConfig KubeCertAgentSpec `json:"kubeCertAgent"`
|
||||||
Labels map[string]string `json:"labels"`
|
Labels map[string]string `json:"labels"`
|
||||||
LogLevel plog.LogLevel `json:"logLevel"`
|
// Deprecated: use log.level instead
|
||||||
|
LogLevel *plog.LogLevel `json:"logLevel"`
|
||||||
|
Log plog.LogSpec `json:"log"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// DiscoveryInfoSpec contains configuration knobs specific to
|
// DiscoveryInfoSpec contains configuration knobs specific to
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
package supervisor
|
package supervisor
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net"
|
"net"
|
||||||
@ -28,7 +29,7 @@ const (
|
|||||||
// FromPath loads an Config from a provided local file path, inserts any
|
// FromPath loads an Config from a provided local file path, inserts any
|
||||||
// defaults (from the Config documentation), and verifies that the config is
|
// defaults (from the Config documentation), and verifies that the config is
|
||||||
// valid (Config documentation).
|
// valid (Config documentation).
|
||||||
func FromPath(path string) (*Config, error) {
|
func FromPath(ctx context.Context, path string) (*Config, error) {
|
||||||
data, err := ioutil.ReadFile(path)
|
data, err := ioutil.ReadFile(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("read file: %w", err)
|
return nil, fmt.Errorf("read file: %w", err)
|
||||||
@ -53,7 +54,8 @@ func FromPath(path string) (*Config, error) {
|
|||||||
return nil, fmt.Errorf("validate names: %w", err)
|
return nil, fmt.Errorf("validate names: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := plog.ValidateAndSetLogLevelGlobally(config.LogLevel); err != nil {
|
plog.MaybeSetDeprecatedLogLevel(config.LogLevel, &config.Log)
|
||||||
|
if err := plog.ValidateAndSetLogLevelAndFormatGlobally(ctx, config.Log); err != nil {
|
||||||
return nil, fmt.Errorf("validate log level: %w", err)
|
return nil, fmt.Errorf("validate log level: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4,6 +4,7 @@
|
|||||||
package supervisor
|
package supervisor
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
@ -13,6 +14,7 @@ import (
|
|||||||
"k8s.io/utils/pointer"
|
"k8s.io/utils/pointer"
|
||||||
|
|
||||||
"go.pinniped.dev/internal/here"
|
"go.pinniped.dev/internal/here"
|
||||||
|
"go.pinniped.dev/internal/plog"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestFromPath(t *testing.T) {
|
func TestFromPath(t *testing.T) {
|
||||||
@ -40,6 +42,7 @@ func TestFromPath(t *testing.T) {
|
|||||||
network: tcp
|
network: tcp
|
||||||
address: 127.0.0.1:1234
|
address: 127.0.0.1:1234
|
||||||
insecureAcceptExternalUnencryptedHttpRequests: false
|
insecureAcceptExternalUnencryptedHttpRequests: false
|
||||||
|
logLevel: trace
|
||||||
`),
|
`),
|
||||||
wantConfig: &Config{
|
wantConfig: &Config{
|
||||||
APIGroupSuffix: pointer.StringPtr("some.suffix.com"),
|
APIGroupSuffix: pointer.StringPtr("some.suffix.com"),
|
||||||
@ -61,8 +64,122 @@ func TestFromPath(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
AllowExternalHTTP: false,
|
AllowExternalHTTP: false,
|
||||||
|
LogLevel: func(level plog.LogLevel) *plog.LogLevel { return &level }(plog.LevelTrace),
|
||||||
|
Log: plog.LogSpec{
|
||||||
|
Level: plog.LevelTrace,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "Happy with new log field",
|
||||||
|
yaml: here.Doc(`
|
||||||
|
---
|
||||||
|
apiGroupSuffix: some.suffix.com
|
||||||
|
labels:
|
||||||
|
myLabelKey1: myLabelValue1
|
||||||
|
myLabelKey2: myLabelValue2
|
||||||
|
names:
|
||||||
|
defaultTLSCertificateSecret: my-secret-name
|
||||||
|
endpoints:
|
||||||
|
https:
|
||||||
|
network: unix
|
||||||
|
address: :1234
|
||||||
|
http:
|
||||||
|
network: tcp
|
||||||
|
address: 127.0.0.1:1234
|
||||||
|
insecureAcceptExternalUnencryptedHttpRequests: false
|
||||||
|
log:
|
||||||
|
level: info
|
||||||
|
format: text
|
||||||
|
`),
|
||||||
|
wantConfig: &Config{
|
||||||
|
APIGroupSuffix: pointer.StringPtr("some.suffix.com"),
|
||||||
|
Labels: map[string]string{
|
||||||
|
"myLabelKey1": "myLabelValue1",
|
||||||
|
"myLabelKey2": "myLabelValue2",
|
||||||
|
},
|
||||||
|
NamesConfig: NamesConfigSpec{
|
||||||
|
DefaultTLSCertificateSecret: "my-secret-name",
|
||||||
|
},
|
||||||
|
Endpoints: &Endpoints{
|
||||||
|
HTTPS: &Endpoint{
|
||||||
|
Network: "unix",
|
||||||
|
Address: ":1234",
|
||||||
|
},
|
||||||
|
HTTP: &Endpoint{
|
||||||
|
Network: "tcp",
|
||||||
|
Address: "127.0.0.1:1234",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
AllowExternalHTTP: false,
|
||||||
|
Log: plog.LogSpec{
|
||||||
|
Level: plog.LevelInfo,
|
||||||
|
Format: plog.FormatText,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Happy with old and new log field",
|
||||||
|
yaml: here.Doc(`
|
||||||
|
---
|
||||||
|
apiGroupSuffix: some.suffix.com
|
||||||
|
labels:
|
||||||
|
myLabelKey1: myLabelValue1
|
||||||
|
myLabelKey2: myLabelValue2
|
||||||
|
names:
|
||||||
|
defaultTLSCertificateSecret: my-secret-name
|
||||||
|
endpoints:
|
||||||
|
https:
|
||||||
|
network: unix
|
||||||
|
address: :1234
|
||||||
|
http:
|
||||||
|
network: tcp
|
||||||
|
address: 127.0.0.1:1234
|
||||||
|
insecureAcceptExternalUnencryptedHttpRequests: false
|
||||||
|
logLevel: trace
|
||||||
|
log:
|
||||||
|
level: info
|
||||||
|
format: text
|
||||||
|
`),
|
||||||
|
wantConfig: &Config{
|
||||||
|
APIGroupSuffix: pointer.StringPtr("some.suffix.com"),
|
||||||
|
Labels: map[string]string{
|
||||||
|
"myLabelKey1": "myLabelValue1",
|
||||||
|
"myLabelKey2": "myLabelValue2",
|
||||||
|
},
|
||||||
|
NamesConfig: NamesConfigSpec{
|
||||||
|
DefaultTLSCertificateSecret: "my-secret-name",
|
||||||
|
},
|
||||||
|
Endpoints: &Endpoints{
|
||||||
|
HTTPS: &Endpoint{
|
||||||
|
Network: "unix",
|
||||||
|
Address: ":1234",
|
||||||
|
},
|
||||||
|
HTTP: &Endpoint{
|
||||||
|
Network: "tcp",
|
||||||
|
Address: "127.0.0.1:1234",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
AllowExternalHTTP: false,
|
||||||
|
LogLevel: func(level plog.LogLevel) *plog.LogLevel { return &level }(plog.LevelTrace),
|
||||||
|
Log: plog.LogSpec{
|
||||||
|
Level: plog.LevelTrace,
|
||||||
|
Format: plog.FormatText,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "bad log format",
|
||||||
|
yaml: here.Doc(`
|
||||||
|
---
|
||||||
|
names:
|
||||||
|
defaultTLSCertificateSecret: my-secret-name
|
||||||
|
log:
|
||||||
|
level: info
|
||||||
|
format: cli
|
||||||
|
`),
|
||||||
|
wantError: "decode yaml: error unmarshaling JSON: while decoding JSON: invalid log format, valid choices are the empty string, json and text",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "When only the required fields are present, causes other fields to be defaulted",
|
name: "When only the required fields are present, causes other fields to be defaulted",
|
||||||
yaml: here.Doc(`
|
yaml: here.Doc(`
|
||||||
@ -307,7 +424,7 @@ func TestFromPath(t *testing.T) {
|
|||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
test := test
|
test := test
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
t.Parallel()
|
// this is a serial test because it sets the global logger
|
||||||
|
|
||||||
// Write yaml to temp file
|
// Write yaml to temp file
|
||||||
f, err := ioutil.TempFile("", "pinniped-test-config-yaml-*")
|
f, err := ioutil.TempFile("", "pinniped-test-config-yaml-*")
|
||||||
@ -322,7 +439,9 @@ func TestFromPath(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Test FromPath()
|
// Test FromPath()
|
||||||
config, err := FromPath(f.Name())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
t.Cleanup(cancel)
|
||||||
|
config, err := FromPath(ctx, f.Name())
|
||||||
|
|
||||||
if test.wantError != "" {
|
if test.wantError != "" {
|
||||||
require.EqualError(t, err, test.wantError)
|
require.EqualError(t, err, test.wantError)
|
||||||
|
@ -11,10 +11,12 @@ import (
|
|||||||
|
|
||||||
// Config contains knobs to setup an instance of the Pinniped Supervisor.
|
// Config contains knobs to setup an instance of the Pinniped Supervisor.
|
||||||
type Config struct {
|
type Config struct {
|
||||||
APIGroupSuffix *string `json:"apiGroupSuffix,omitempty"`
|
APIGroupSuffix *string `json:"apiGroupSuffix,omitempty"`
|
||||||
Labels map[string]string `json:"labels"`
|
Labels map[string]string `json:"labels"`
|
||||||
NamesConfig NamesConfigSpec `json:"names"`
|
NamesConfig NamesConfigSpec `json:"names"`
|
||||||
LogLevel plog.LogLevel `json:"logLevel"`
|
// Deprecated: use log.level instead
|
||||||
|
LogLevel *plog.LogLevel `json:"logLevel"`
|
||||||
|
Log plog.LogSpec `json:"log"`
|
||||||
Endpoints *Endpoints `json:"endpoints"`
|
Endpoints *Endpoints `json:"endpoints"`
|
||||||
AllowExternalHTTP stringOrBoolAsBool `json:"insecureAcceptExternalUnencryptedHttpRequests"`
|
AllowExternalHTTP stringOrBoolAsBool `json:"insecureAcceptExternalUnencryptedHttpRequests"`
|
||||||
}
|
}
|
||||||
|
@ -32,6 +32,8 @@ type certsExpirerController struct {
|
|||||||
renewBefore time.Duration
|
renewBefore time.Duration
|
||||||
|
|
||||||
secretKey string
|
secretKey string
|
||||||
|
|
||||||
|
logger plog.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewCertsExpirerController returns a controllerlib.Controller that will delete a
|
// NewCertsExpirerController returns a controllerlib.Controller that will delete a
|
||||||
@ -45,10 +47,12 @@ func NewCertsExpirerController(
|
|||||||
withInformer pinnipedcontroller.WithInformerOptionFunc,
|
withInformer pinnipedcontroller.WithInformerOptionFunc,
|
||||||
renewBefore time.Duration,
|
renewBefore time.Duration,
|
||||||
secretKey string,
|
secretKey string,
|
||||||
|
logger plog.Logger,
|
||||||
) controllerlib.Controller {
|
) controllerlib.Controller {
|
||||||
|
const name = "certs-expirer-controller"
|
||||||
return controllerlib.New(
|
return controllerlib.New(
|
||||||
controllerlib.Config{
|
controllerlib.Config{
|
||||||
Name: "certs-expirer-controller",
|
Name: name,
|
||||||
Syncer: &certsExpirerController{
|
Syncer: &certsExpirerController{
|
||||||
namespace: namespace,
|
namespace: namespace,
|
||||||
certsSecretResourceName: certsSecretResourceName,
|
certsSecretResourceName: certsSecretResourceName,
|
||||||
@ -56,6 +60,7 @@ func NewCertsExpirerController(
|
|||||||
secretInformer: secretInformer,
|
secretInformer: secretInformer,
|
||||||
renewBefore: renewBefore,
|
renewBefore: renewBefore,
|
||||||
secretKey: secretKey,
|
secretKey: secretKey,
|
||||||
|
logger: logger.WithName(name),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
withInformer(
|
withInformer(
|
||||||
@ -74,7 +79,7 @@ func (c *certsExpirerController) Sync(ctx controllerlib.Context) error {
|
|||||||
return fmt.Errorf("failed to get %s/%s secret: %w", c.namespace, c.certsSecretResourceName, err)
|
return fmt.Errorf("failed to get %s/%s secret: %w", c.namespace, c.certsSecretResourceName, err)
|
||||||
}
|
}
|
||||||
if notFound {
|
if notFound {
|
||||||
plog.Info("secret does not exist yet or was deleted",
|
c.logger.Info("secret does not exist yet or was deleted",
|
||||||
"controller", ctx.Name,
|
"controller", ctx.Name,
|
||||||
"namespace", c.namespace,
|
"namespace", c.namespace,
|
||||||
"name", c.certsSecretResourceName,
|
"name", c.certsSecretResourceName,
|
||||||
@ -91,7 +96,7 @@ func (c *certsExpirerController) Sync(ctx controllerlib.Context) error {
|
|||||||
|
|
||||||
certAge := time.Since(notBefore)
|
certAge := time.Since(notBefore)
|
||||||
renewDelta := certAge - c.renewBefore
|
renewDelta := certAge - c.renewBefore
|
||||||
plog.Debug("found renew delta",
|
c.logger.Debug("found renew delta",
|
||||||
"controller", ctx.Name,
|
"controller", ctx.Name,
|
||||||
"namespace", c.namespace,
|
"namespace", c.namespace,
|
||||||
"name", c.certsSecretResourceName,
|
"name", c.certsSecretResourceName,
|
||||||
|
@ -4,12 +4,15 @@
|
|||||||
package apicerts
|
package apicerts
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"crypto/ecdsa"
|
"crypto/ecdsa"
|
||||||
"crypto/elliptic"
|
"crypto/elliptic"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"errors"
|
"errors"
|
||||||
|
"io"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -24,6 +27,7 @@ import (
|
|||||||
kubetesting "k8s.io/client-go/testing"
|
kubetesting "k8s.io/client-go/testing"
|
||||||
|
|
||||||
"go.pinniped.dev/internal/controllerlib"
|
"go.pinniped.dev/internal/controllerlib"
|
||||||
|
"go.pinniped.dev/internal/plog"
|
||||||
"go.pinniped.dev/internal/testutil"
|
"go.pinniped.dev/internal/testutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -101,6 +105,7 @@ func TestExpirerControllerFilters(t *testing.T) {
|
|||||||
withInformer.WithInformer,
|
withInformer.WithInformer,
|
||||||
0, // renewBefore, not needed
|
0, // renewBefore, not needed
|
||||||
"", // not needed
|
"", // not needed
|
||||||
|
plog.TestLogger(t, io.Discard),
|
||||||
)
|
)
|
||||||
|
|
||||||
unrelated := corev1.Secret{}
|
unrelated := corev1.Secret{}
|
||||||
@ -125,10 +130,12 @@ func TestExpirerControllerSync(t *testing.T) {
|
|||||||
fillSecretData func(*testing.T, map[string][]byte)
|
fillSecretData func(*testing.T, map[string][]byte)
|
||||||
configKubeAPIClient func(*kubernetesfake.Clientset)
|
configKubeAPIClient func(*kubernetesfake.Clientset)
|
||||||
wantDelete bool
|
wantDelete bool
|
||||||
|
wantLog string
|
||||||
wantError string
|
wantError string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "secret does not exist",
|
name: "secret does not exist",
|
||||||
|
wantLog: `{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","logger":"certs-expirer-controller","caller":"apicerts/certs_expirer.go:<line>$apicerts.(*certsExpirerController).Sync","message":"secret does not exist yet or was deleted","controller":"","namespace":"some-namespace","name":"some-resource-name","key":"some-awesome-key","renewBefore":"0s"}`,
|
||||||
wantDelete: false,
|
wantDelete: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -251,6 +258,8 @@ func TestExpirerControllerSync(t *testing.T) {
|
|||||||
0,
|
0,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var log bytes.Buffer
|
||||||
|
|
||||||
c := NewCertsExpirerController(
|
c := NewCertsExpirerController(
|
||||||
namespace,
|
namespace,
|
||||||
certsSecretResourceName,
|
certsSecretResourceName,
|
||||||
@ -259,6 +268,7 @@ func TestExpirerControllerSync(t *testing.T) {
|
|||||||
controllerlib.WithInformer,
|
controllerlib.WithInformer,
|
||||||
test.renewBefore,
|
test.renewBefore,
|
||||||
fakeTestKey,
|
fakeTestKey,
|
||||||
|
plog.TestLogger(t, &log),
|
||||||
)
|
)
|
||||||
|
|
||||||
// Must start informers before calling TestRunSynchronously().
|
// Must start informers before calling TestRunSynchronously().
|
||||||
@ -268,6 +278,9 @@ func TestExpirerControllerSync(t *testing.T) {
|
|||||||
err := controllerlib.TestSync(t, c, controllerlib.Context{
|
err := controllerlib.TestSync(t, c, controllerlib.Context{
|
||||||
Context: ctx,
|
Context: ctx,
|
||||||
})
|
})
|
||||||
|
if len(test.wantLog) > 0 {
|
||||||
|
require.Equal(t, test.wantLog, strings.TrimSpace(log.String()))
|
||||||
|
}
|
||||||
if test.wantError != "" {
|
if test.wantError != "" {
|
||||||
require.EqualError(t, err, test.wantError)
|
require.EqualError(t, err, test.wantError)
|
||||||
return
|
return
|
||||||
|
@ -12,11 +12,11 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/klog/v2"
|
|
||||||
|
|
||||||
"go.pinniped.dev/internal/certauthority"
|
"go.pinniped.dev/internal/certauthority"
|
||||||
pinnipedcontroller "go.pinniped.dev/internal/controller"
|
pinnipedcontroller "go.pinniped.dev/internal/controller"
|
||||||
"go.pinniped.dev/internal/controllerlib"
|
"go.pinniped.dev/internal/controllerlib"
|
||||||
|
"go.pinniped.dev/internal/plog"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -139,6 +139,6 @@ func (c *certsManagerController) Sync(ctx controllerlib.Context) error {
|
|||||||
return fmt.Errorf("could not create secret: %w", err)
|
return fmt.Errorf("could not create secret: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
klog.Info("certsManagerController Sync successfully created secret")
|
plog.Info("certsManagerController Sync successfully created secret")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -8,11 +8,11 @@ import (
|
|||||||
|
|
||||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||||
"k8s.io/klog/v2"
|
|
||||||
|
|
||||||
pinnipedcontroller "go.pinniped.dev/internal/controller"
|
pinnipedcontroller "go.pinniped.dev/internal/controller"
|
||||||
"go.pinniped.dev/internal/controllerlib"
|
"go.pinniped.dev/internal/controllerlib"
|
||||||
"go.pinniped.dev/internal/dynamiccert"
|
"go.pinniped.dev/internal/dynamiccert"
|
||||||
|
"go.pinniped.dev/internal/plog"
|
||||||
)
|
)
|
||||||
|
|
||||||
type certsObserverController struct {
|
type certsObserverController struct {
|
||||||
@ -55,7 +55,7 @@ func (c *certsObserverController) Sync(_ controllerlib.Context) error {
|
|||||||
return fmt.Errorf("failed to get %s/%s secret: %w", c.namespace, c.certsSecretResourceName, err)
|
return fmt.Errorf("failed to get %s/%s secret: %w", c.namespace, c.certsSecretResourceName, err)
|
||||||
}
|
}
|
||||||
if notFound {
|
if notFound {
|
||||||
klog.Info("certsObserverController Sync found that the secret does not exist yet or was deleted")
|
plog.Info("certsObserverController Sync found that the secret does not exist yet or was deleted")
|
||||||
// The secret does not exist yet or was deleted.
|
// The secret does not exist yet or was deleted.
|
||||||
c.dynamicCertProvider.UnsetCertKeyContent()
|
c.dynamicCertProvider.UnsetCertKeyContent()
|
||||||
return nil
|
return nil
|
||||||
@ -66,6 +66,6 @@ func (c *certsObserverController) Sync(_ controllerlib.Context) error {
|
|||||||
return fmt.Errorf("failed to set serving cert/key content from secret %s/%s: %w", c.namespace, c.certsSecretResourceName, err)
|
return fmt.Errorf("failed to set serving cert/key content from secret %s/%s: %w", c.namespace, c.certsSecretResourceName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
klog.Info("certsObserverController Sync updated certs in the dynamic cert provider")
|
plog.Info("certsObserverController Sync updated certs in the dynamic cert provider")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -6,15 +6,15 @@ package conditionsutil
|
|||||||
import (
|
import (
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
"github.com/go-logr/logr"
|
|
||||||
"k8s.io/apimachinery/pkg/api/equality"
|
"k8s.io/apimachinery/pkg/api/equality"
|
||||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
"go.pinniped.dev/generated/latest/apis/supervisor/idp/v1alpha1"
|
"go.pinniped.dev/generated/latest/apis/supervisor/idp/v1alpha1"
|
||||||
|
"go.pinniped.dev/internal/plog"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Merge merges conditions into conditionsToUpdate. If returns true if it merged any error conditions.
|
// Merge merges conditions into conditionsToUpdate. If returns true if it merged any error conditions.
|
||||||
func Merge(conditions []*v1alpha1.Condition, observedGeneration int64, conditionsToUpdate *[]v1alpha1.Condition, log logr.Logger) bool {
|
func Merge(conditions []*v1alpha1.Condition, observedGeneration int64, conditionsToUpdate *[]v1alpha1.Condition, log plog.MinLogger) bool {
|
||||||
hadErrorCondition := false
|
hadErrorCondition := false
|
||||||
for i := range conditions {
|
for i := range conditions {
|
||||||
cond := conditions[i].DeepCopy()
|
cond := conditions[i].DeepCopy()
|
||||||
|
@ -45,6 +45,7 @@ import (
|
|||||||
"go.pinniped.dev/internal/controllerlib"
|
"go.pinniped.dev/internal/controllerlib"
|
||||||
"go.pinniped.dev/internal/dynamiccert"
|
"go.pinniped.dev/internal/dynamiccert"
|
||||||
"go.pinniped.dev/internal/endpointaddr"
|
"go.pinniped.dev/internal/endpointaddr"
|
||||||
|
"go.pinniped.dev/internal/plog"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -132,8 +133,8 @@ func NewImpersonatorConfigController(
|
|||||||
impersonationSigningCertProvider: impersonationSigningCertProvider,
|
impersonationSigningCertProvider: impersonationSigningCertProvider,
|
||||||
impersonatorFunc: impersonatorFunc,
|
impersonatorFunc: impersonatorFunc,
|
||||||
tlsServingCertDynamicCertProvider: dynamiccert.NewServingCert("impersonation-proxy-serving-cert"),
|
tlsServingCertDynamicCertProvider: dynamiccert.NewServingCert("impersonation-proxy-serving-cert"),
|
||||||
infoLog: log.V(2),
|
infoLog: log.V(plog.KlogLevelInfo),
|
||||||
debugLog: log.V(4),
|
debugLog: log.V(plog.KlogLevelDebug),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
withInformer(credentialIssuerInformer,
|
withInformer(credentialIssuerInformer,
|
||||||
|
@ -43,8 +43,8 @@ import (
|
|||||||
"go.pinniped.dev/internal/controllerlib"
|
"go.pinniped.dev/internal/controllerlib"
|
||||||
"go.pinniped.dev/internal/dynamiccert"
|
"go.pinniped.dev/internal/dynamiccert"
|
||||||
"go.pinniped.dev/internal/kubeclient"
|
"go.pinniped.dev/internal/kubeclient"
|
||||||
|
"go.pinniped.dev/internal/plog"
|
||||||
"go.pinniped.dev/internal/testutil"
|
"go.pinniped.dev/internal/testutil"
|
||||||
"go.pinniped.dev/internal/testutil/testlogger"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestImpersonatorConfigControllerOptions(t *testing.T) {
|
func TestImpersonatorConfigControllerOptions(t *testing.T) {
|
||||||
@ -63,7 +63,6 @@ func TestImpersonatorConfigControllerOptions(t *testing.T) {
|
|||||||
var credIssuerInformerFilter controllerlib.Filter
|
var credIssuerInformerFilter controllerlib.Filter
|
||||||
var servicesInformerFilter controllerlib.Filter
|
var servicesInformerFilter controllerlib.Filter
|
||||||
var secretsInformerFilter controllerlib.Filter
|
var secretsInformerFilter controllerlib.Filter
|
||||||
var testLog *testlogger.Logger
|
|
||||||
|
|
||||||
it.Before(func() {
|
it.Before(func() {
|
||||||
r = require.New(t)
|
r = require.New(t)
|
||||||
@ -73,7 +72,6 @@ func TestImpersonatorConfigControllerOptions(t *testing.T) {
|
|||||||
credIssuerInformer := pinnipedInformerFactory.Config().V1alpha1().CredentialIssuers()
|
credIssuerInformer := pinnipedInformerFactory.Config().V1alpha1().CredentialIssuers()
|
||||||
servicesInformer := sharedInformerFactory.Core().V1().Services()
|
servicesInformer := sharedInformerFactory.Core().V1().Services()
|
||||||
secretsInformer := sharedInformerFactory.Core().V1().Secrets()
|
secretsInformer := sharedInformerFactory.Core().V1().Secrets()
|
||||||
testLog = testlogger.New(t)
|
|
||||||
|
|
||||||
_ = NewImpersonatorConfigController(
|
_ = NewImpersonatorConfigController(
|
||||||
installedInNamespace,
|
installedInNamespace,
|
||||||
@ -94,7 +92,7 @@ func TestImpersonatorConfigControllerOptions(t *testing.T) {
|
|||||||
nil,
|
nil,
|
||||||
caSignerName,
|
caSignerName,
|
||||||
nil,
|
nil,
|
||||||
testLog.Logger,
|
plog.Logr(), // nolint: staticcheck // old test with no log assertions
|
||||||
)
|
)
|
||||||
credIssuerInformerFilter = observableWithInformerOption.GetFilterForInformer(credIssuerInformer)
|
credIssuerInformerFilter = observableWithInformerOption.GetFilterForInformer(credIssuerInformer)
|
||||||
servicesInformerFilter = observableWithInformerOption.GetFilterForInformer(servicesInformer)
|
servicesInformerFilter = observableWithInformerOption.GetFilterForInformer(servicesInformer)
|
||||||
@ -292,7 +290,6 @@ func TestImpersonatorConfigControllerSync(t *testing.T) {
|
|||||||
var testHTTPServerInterruptCh chan struct{}
|
var testHTTPServerInterruptCh chan struct{}
|
||||||
var queue *testQueue
|
var queue *testQueue
|
||||||
var validClientCert *tls.Certificate
|
var validClientCert *tls.Certificate
|
||||||
var testLog *testlogger.Logger
|
|
||||||
|
|
||||||
var impersonatorFunc = func(
|
var impersonatorFunc = func(
|
||||||
port int,
|
port int,
|
||||||
@ -563,7 +560,7 @@ func TestImpersonatorConfigControllerSync(t *testing.T) {
|
|||||||
impersonatorFunc,
|
impersonatorFunc,
|
||||||
caSignerName,
|
caSignerName,
|
||||||
signingCertProvider,
|
signingCertProvider,
|
||||||
testLog.Logger,
|
plog.Logr(), // nolint: staticcheck // old test with no log assertions
|
||||||
)
|
)
|
||||||
controllerlib.TestWrap(t, subject, func(syncer controllerlib.Syncer) controllerlib.Syncer {
|
controllerlib.TestWrap(t, subject, func(syncer controllerlib.Syncer) controllerlib.Syncer {
|
||||||
tlsServingCertDynamicCertProvider = syncer.(*impersonatorConfigController).tlsServingCertDynamicCertProvider
|
tlsServingCertDynamicCertProvider = syncer.(*impersonatorConfigController).tlsServingCertDynamicCertProvider
|
||||||
@ -1120,7 +1117,6 @@ func TestImpersonatorConfigControllerSync(t *testing.T) {
|
|||||||
signingCASecret = newSigningKeySecret(caSignerName, signingCACertPEM, signingCAKeyPEM)
|
signingCASecret = newSigningKeySecret(caSignerName, signingCACertPEM, signingCAKeyPEM)
|
||||||
validClientCert, err = ca.IssueClientCert("username", nil, time.Hour)
|
validClientCert, err = ca.IssueClientCert("username", nil, time.Hour)
|
||||||
r.NoError(err)
|
r.NoError(err)
|
||||||
testLog = testlogger.New(t)
|
|
||||||
})
|
})
|
||||||
|
|
||||||
it.After(func() {
|
it.After(func() {
|
||||||
|
@ -28,7 +28,6 @@ import (
|
|||||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||||
"k8s.io/client-go/tools/clientcmd"
|
"k8s.io/client-go/tools/clientcmd"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
"k8s.io/klog/v2/klogr"
|
|
||||||
"k8s.io/utils/clock"
|
"k8s.io/utils/clock"
|
||||||
"k8s.io/utils/pointer"
|
"k8s.io/utils/pointer"
|
||||||
|
|
||||||
@ -39,6 +38,7 @@ import (
|
|||||||
"go.pinniped.dev/internal/controllerlib"
|
"go.pinniped.dev/internal/controllerlib"
|
||||||
"go.pinniped.dev/internal/dynamiccert"
|
"go.pinniped.dev/internal/dynamiccert"
|
||||||
"go.pinniped.dev/internal/kubeclient"
|
"go.pinniped.dev/internal/kubeclient"
|
||||||
|
"go.pinniped.dev/internal/plog"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -179,7 +179,7 @@ func NewAgentController(
|
|||||||
dynamicCertProvider,
|
dynamicCertProvider,
|
||||||
&clock.RealClock{},
|
&clock.RealClock{},
|
||||||
cache.NewExpiring(),
|
cache.NewExpiring(),
|
||||||
klogr.New(),
|
plog.Logr(), // nolint: staticcheck // old controller with lots of log statements
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4,8 +4,10 @@
|
|||||||
package kubecertagent
|
package kubecertagent
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -35,8 +37,8 @@ import (
|
|||||||
"go.pinniped.dev/internal/controllerlib"
|
"go.pinniped.dev/internal/controllerlib"
|
||||||
"go.pinniped.dev/internal/here"
|
"go.pinniped.dev/internal/here"
|
||||||
"go.pinniped.dev/internal/kubeclient"
|
"go.pinniped.dev/internal/kubeclient"
|
||||||
|
"go.pinniped.dev/internal/plog"
|
||||||
"go.pinniped.dev/internal/testutil"
|
"go.pinniped.dev/internal/testutil"
|
||||||
"go.pinniped.dev/internal/testutil/testlogger"
|
|
||||||
"go.pinniped.dev/test/testlib"
|
"go.pinniped.dev/test/testlib"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -339,7 +341,7 @@ func TestAgentController(t *testing.T) {
|
|||||||
"could not ensure agent deployment: some creation error",
|
"could not ensure agent deployment: some creation error",
|
||||||
},
|
},
|
||||||
wantDistinctLogs: []string{
|
wantDistinctLogs: []string{
|
||||||
`kube-cert-agent-controller "level"=0 "msg"="creating new deployment" "deployment"={"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"} "templatePod"={"name":"kube-controller-manager-1","namespace":"kube-system"}`,
|
`{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","logger":"kube-cert-agent-controller","caller":"kubecertagent/kubecertagent.go:<line>$kubecertagent.(*agentController).createOrUpdateDeployment","message":"creating new deployment","deployment":{"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"},"templatePod":{"name":"kube-controller-manager-1","namespace":"kube-system"}}`,
|
||||||
},
|
},
|
||||||
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||||
@ -386,7 +388,7 @@ func TestAgentController(t *testing.T) {
|
|||||||
`could not ensure agent deployment: deployments.apps "pinniped-concierge-kube-cert-agent" already exists`,
|
`could not ensure agent deployment: deployments.apps "pinniped-concierge-kube-cert-agent" already exists`,
|
||||||
},
|
},
|
||||||
wantDistinctLogs: []string{
|
wantDistinctLogs: []string{
|
||||||
`kube-cert-agent-controller "level"=0 "msg"="creating new deployment" "deployment"={"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"} "templatePod"={"name":"kube-controller-manager-1","namespace":"kube-system"}`,
|
`{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","logger":"kube-cert-agent-controller","caller":"kubecertagent/kubecertagent.go:<line>$kubecertagent.(*agentController).createOrUpdateDeployment","message":"creating new deployment","deployment":{"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"},"templatePod":{"name":"kube-controller-manager-1","namespace":"kube-system"}}`,
|
||||||
},
|
},
|
||||||
wantAgentDeployment: healthyAgentDeployment,
|
wantAgentDeployment: healthyAgentDeployment,
|
||||||
wantDeploymentActionVerbs: []string{"list", "watch", "create"},
|
wantDeploymentActionVerbs: []string{"list", "watch", "create"},
|
||||||
@ -435,7 +437,7 @@ func TestAgentController(t *testing.T) {
|
|||||||
`could not ensure agent deployment: deployments.apps "pinniped-concierge-kube-cert-agent" already exists`,
|
`could not ensure agent deployment: deployments.apps "pinniped-concierge-kube-cert-agent" already exists`,
|
||||||
},
|
},
|
||||||
wantDistinctLogs: []string{
|
wantDistinctLogs: []string{
|
||||||
`kube-cert-agent-controller "level"=0 "msg"="creating new deployment" "deployment"={"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"} "templatePod"={"name":"kube-controller-manager-1","namespace":"kube-system"}`,
|
`{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","logger":"kube-cert-agent-controller","caller":"kubecertagent/kubecertagent.go:<line>$kubecertagent.(*agentController).createOrUpdateDeployment","message":"creating new deployment","deployment":{"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"},"templatePod":{"name":"kube-controller-manager-1","namespace":"kube-system"}}`,
|
||||||
},
|
},
|
||||||
wantAgentDeployment: healthyAgentDeploymentWithDefaultedPaths,
|
wantAgentDeployment: healthyAgentDeploymentWithDefaultedPaths,
|
||||||
wantDeploymentActionVerbs: []string{"list", "watch", "create"},
|
wantDeploymentActionVerbs: []string{"list", "watch", "create"},
|
||||||
@ -461,8 +463,8 @@ func TestAgentController(t *testing.T) {
|
|||||||
"could not find a healthy agent pod (1 candidate)",
|
"could not find a healthy agent pod (1 candidate)",
|
||||||
},
|
},
|
||||||
wantDistinctLogs: []string{
|
wantDistinctLogs: []string{
|
||||||
`kube-cert-agent-controller "level"=0 "msg"="deleting deployment to update immutable Selector field" "deployment"={"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"} "templatePod"={"name":"kube-controller-manager-1","namespace":"kube-system"}`,
|
`{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","logger":"kube-cert-agent-controller","caller":"kubecertagent/kubecertagent.go:<line>$kubecertagent.(*agentController).createOrUpdateDeployment","message":"deleting deployment to update immutable Selector field","deployment":{"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"},"templatePod":{"name":"kube-controller-manager-1","namespace":"kube-system"}}`,
|
||||||
`kube-cert-agent-controller "level"=0 "msg"="creating new deployment to update immutable Selector field" "deployment"={"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"} "templatePod"={"name":"kube-controller-manager-1","namespace":"kube-system"}`,
|
`{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","logger":"kube-cert-agent-controller","caller":"kubecertagent/kubecertagent.go:<line>$kubecertagent.(*agentController).createOrUpdateDeployment","message":"creating new deployment to update immutable Selector field","deployment":{"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"},"templatePod":{"name":"kube-controller-manager-1","namespace":"kube-system"}}`,
|
||||||
},
|
},
|
||||||
wantAgentDeployment: healthyAgentDeployment,
|
wantAgentDeployment: healthyAgentDeployment,
|
||||||
wantDeploymentActionVerbs: []string{"list", "watch", "delete", "create"}, // must recreate deployment when Selector field changes
|
wantDeploymentActionVerbs: []string{"list", "watch", "delete", "create"}, // must recreate deployment when Selector field changes
|
||||||
@ -496,7 +498,7 @@ func TestAgentController(t *testing.T) {
|
|||||||
"could not ensure agent deployment: some delete error",
|
"could not ensure agent deployment: some delete error",
|
||||||
},
|
},
|
||||||
wantDistinctLogs: []string{
|
wantDistinctLogs: []string{
|
||||||
`kube-cert-agent-controller "level"=0 "msg"="deleting deployment to update immutable Selector field" "deployment"={"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"} "templatePod"={"name":"kube-controller-manager-1","namespace":"kube-system"}`,
|
`{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","logger":"kube-cert-agent-controller","caller":"kubecertagent/kubecertagent.go:<line>$kubecertagent.(*agentController).createOrUpdateDeployment","message":"deleting deployment to update immutable Selector field","deployment":{"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"},"templatePod":{"name":"kube-controller-manager-1","namespace":"kube-system"}}`,
|
||||||
},
|
},
|
||||||
wantAgentDeployment: healthyAgentDeploymentWithOldStyleSelector, // couldn't be deleted, so it didn't change
|
wantAgentDeployment: healthyAgentDeploymentWithOldStyleSelector, // couldn't be deleted, so it didn't change
|
||||||
// delete to try to recreate deployment when Selector field changes, but delete always fails, so keeps trying to delete
|
// delete to try to recreate deployment when Selector field changes, but delete always fails, so keeps trying to delete
|
||||||
@ -532,9 +534,9 @@ func TestAgentController(t *testing.T) {
|
|||||||
"could not ensure agent deployment: some create error",
|
"could not ensure agent deployment: some create error",
|
||||||
},
|
},
|
||||||
wantDistinctLogs: []string{
|
wantDistinctLogs: []string{
|
||||||
`kube-cert-agent-controller "level"=0 "msg"="deleting deployment to update immutable Selector field" "deployment"={"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"} "templatePod"={"name":"kube-controller-manager-1","namespace":"kube-system"}`,
|
`{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","logger":"kube-cert-agent-controller","caller":"kubecertagent/kubecertagent.go:<line>$kubecertagent.(*agentController).createOrUpdateDeployment","message":"deleting deployment to update immutable Selector field","deployment":{"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"},"templatePod":{"name":"kube-controller-manager-1","namespace":"kube-system"}}`,
|
||||||
`kube-cert-agent-controller "level"=0 "msg"="creating new deployment to update immutable Selector field" "deployment"={"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"} "templatePod"={"name":"kube-controller-manager-1","namespace":"kube-system"}`,
|
`{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","logger":"kube-cert-agent-controller","caller":"kubecertagent/kubecertagent.go:<line>$kubecertagent.(*agentController).createOrUpdateDeployment","message":"creating new deployment to update immutable Selector field","deployment":{"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"},"templatePod":{"name":"kube-controller-manager-1","namespace":"kube-system"}}`,
|
||||||
`kube-cert-agent-controller "level"=0 "msg"="creating new deployment" "deployment"={"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"} "templatePod"={"name":"kube-controller-manager-1","namespace":"kube-system"}`,
|
`{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","logger":"kube-cert-agent-controller","caller":"kubecertagent/kubecertagent.go:<line>$kubecertagent.(*agentController).createOrUpdateDeployment","message":"creating new deployment","deployment":{"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"},"templatePod":{"name":"kube-controller-manager-1","namespace":"kube-system"}}`,
|
||||||
},
|
},
|
||||||
wantAgentDeployment: nil, // was deleted, but couldn't be recreated
|
wantAgentDeployment: nil, // was deleted, but couldn't be recreated
|
||||||
// delete to try to recreate deployment when Selector field changes, but create always fails, so keeps trying to recreate
|
// delete to try to recreate deployment when Selector field changes, but create always fails, so keeps trying to recreate
|
||||||
@ -584,7 +586,7 @@ func TestAgentController(t *testing.T) {
|
|||||||
"could not find a healthy agent pod (1 candidate)",
|
"could not find a healthy agent pod (1 candidate)",
|
||||||
},
|
},
|
||||||
wantDistinctLogs: []string{
|
wantDistinctLogs: []string{
|
||||||
`kube-cert-agent-controller "level"=0 "msg"="updating existing deployment" "deployment"={"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"} "templatePod"={"name":"kube-controller-manager-1","namespace":"kube-system"}`,
|
`{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","logger":"kube-cert-agent-controller","caller":"kubecertagent/kubecertagent.go:<line>$kubecertagent.(*agentController).createOrUpdateDeployment","message":"updating existing deployment","deployment":{"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"},"templatePod":{"name":"kube-controller-manager-1","namespace":"kube-system"}}`,
|
||||||
},
|
},
|
||||||
wantAgentDeployment: healthyAgentDeploymentWithExtraLabels,
|
wantAgentDeployment: healthyAgentDeploymentWithExtraLabels,
|
||||||
wantDeploymentActionVerbs: []string{"list", "watch", "update"},
|
wantDeploymentActionVerbs: []string{"list", "watch", "update"},
|
||||||
@ -619,7 +621,7 @@ func TestAgentController(t *testing.T) {
|
|||||||
LastUpdateTime: metav1.NewTime(now),
|
LastUpdateTime: metav1.NewTime(now),
|
||||||
},
|
},
|
||||||
wantDistinctLogs: []string{
|
wantDistinctLogs: []string{
|
||||||
`kube-cert-agent-controller "level"=0 "msg"="updating existing deployment" "deployment"={"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"} "templatePod"={"name":"kube-controller-manager-1","namespace":"kube-system"}`,
|
`{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","logger":"kube-cert-agent-controller","caller":"kubecertagent/kubecertagent.go:<line>$kubecertagent.(*agentController).createOrUpdateDeployment","message":"updating existing deployment","deployment":{"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"},"templatePod":{"name":"kube-controller-manager-1","namespace":"kube-system"}}`,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -931,8 +933,8 @@ func TestAgentController(t *testing.T) {
|
|||||||
// delete to try to recreate deployment when Selector field changes, but delete always fails, so keeps trying to delete
|
// delete to try to recreate deployment when Selector field changes, but delete always fails, so keeps trying to delete
|
||||||
wantDeploymentActionVerbs: []string{"list", "watch", "delete", "delete"},
|
wantDeploymentActionVerbs: []string{"list", "watch", "delete", "delete"},
|
||||||
wantDistinctLogs: []string{
|
wantDistinctLogs: []string{
|
||||||
`kube-cert-agent-controller "level"=0 "msg"="deleting deployment to update immutable Selector field" "deployment"={"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"} "templatePod"={"name":"kube-controller-manager-1","namespace":"kube-system"}`,
|
`{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","logger":"kube-cert-agent-controller","caller":"kubecertagent/kubecertagent.go:<line>$kubecertagent.(*agentController).createOrUpdateDeployment","message":"deleting deployment to update immutable Selector field","deployment":{"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"},"templatePod":{"name":"kube-controller-manager-1","namespace":"kube-system"}}`,
|
||||||
`kube-cert-agent-controller "level"=0 "msg"="successfully loaded signing key from agent pod into cache"`,
|
`{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","logger":"kube-cert-agent-controller","caller":"kubecertagent/kubecertagent.go:<line>$kubecertagent.(*agentController).loadSigningKey","message":"successfully loaded signing key from agent pod into cache"}`,
|
||||||
},
|
},
|
||||||
wantDeploymentDeleteActionOpts: []metav1.DeleteOptions{
|
wantDeploymentDeleteActionOpts: []metav1.DeleteOptions{
|
||||||
testutil.NewPreconditions(healthyAgentDeploymentWithOldStyleSelector.UID, healthyAgentDeploymentWithOldStyleSelector.ResourceVersion),
|
testutil.NewPreconditions(healthyAgentDeploymentWithOldStyleSelector.UID, healthyAgentDeploymentWithOldStyleSelector.ResourceVersion),
|
||||||
@ -962,7 +964,7 @@ func TestAgentController(t *testing.T) {
|
|||||||
wantAgentDeployment: healthyAgentDeployment,
|
wantAgentDeployment: healthyAgentDeployment,
|
||||||
wantDeploymentActionVerbs: []string{"list", "watch"},
|
wantDeploymentActionVerbs: []string{"list", "watch"},
|
||||||
wantDistinctLogs: []string{
|
wantDistinctLogs: []string{
|
||||||
`kube-cert-agent-controller "level"=0 "msg"="successfully loaded signing key from agent pod into cache"`,
|
`{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","logger":"kube-cert-agent-controller","caller":"kubecertagent/kubecertagent.go:<line>$kubecertagent.(*agentController).loadSigningKey","message":"successfully loaded signing key from agent pod into cache"}`,
|
||||||
},
|
},
|
||||||
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||||
@ -996,7 +998,7 @@ func TestAgentController(t *testing.T) {
|
|||||||
wantAgentDeployment: healthyAgentDeployment,
|
wantAgentDeployment: healthyAgentDeployment,
|
||||||
wantDeploymentActionVerbs: []string{"list", "watch"},
|
wantDeploymentActionVerbs: []string{"list", "watch"},
|
||||||
wantDistinctLogs: []string{
|
wantDistinctLogs: []string{
|
||||||
`kube-cert-agent-controller "level"=0 "msg"="successfully loaded signing key from agent pod into cache"`,
|
`{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","logger":"kube-cert-agent-controller","caller":"kubecertagent/kubecertagent.go:<line>$kubecertagent.(*agentController).loadSigningKey","message":"successfully loaded signing key from agent pod into cache"}`,
|
||||||
},
|
},
|
||||||
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||||
@ -1028,7 +1030,9 @@ func TestAgentController(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
kubeInformers := informers.NewSharedInformerFactory(kubeClientset, 0)
|
kubeInformers := informers.NewSharedInformerFactory(kubeClientset, 0)
|
||||||
log := testlogger.NewLegacy(t) // nolint: staticcheck // old test with lots of log statements
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
log := plog.TestZapr(t, &buf)
|
||||||
|
|
||||||
ctrl := gomock.NewController(t)
|
ctrl := gomock.NewController(t)
|
||||||
defer ctrl.Finish()
|
defer ctrl.Finish()
|
||||||
@ -1066,7 +1070,7 @@ func TestAgentController(t *testing.T) {
|
|||||||
mockDynamicCert,
|
mockDynamicCert,
|
||||||
fakeClock,
|
fakeClock,
|
||||||
execCache,
|
execCache,
|
||||||
log.Logger,
|
log,
|
||||||
)
|
)
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
@ -1081,7 +1085,7 @@ func TestAgentController(t *testing.T) {
|
|||||||
allAllowedErrors = append(allAllowedErrors, tt.alsoAllowUndesiredDistinctErrors...)
|
allAllowedErrors = append(allAllowedErrors, tt.alsoAllowUndesiredDistinctErrors...)
|
||||||
assert.Subsetf(t, allAllowedErrors, actualErrors, "actual errors contained additional error(s) which is not expected by the test")
|
assert.Subsetf(t, allAllowedErrors, actualErrors, "actual errors contained additional error(s) which is not expected by the test")
|
||||||
|
|
||||||
assert.Equal(t, tt.wantDistinctLogs, deduplicate(log.Lines()), "unexpected logs")
|
assert.Equal(t, tt.wantDistinctLogs, deduplicate(logLines(buf.String())), "unexpected logs")
|
||||||
|
|
||||||
// Assert on all actions that happened to deployments.
|
// Assert on all actions that happened to deployments.
|
||||||
var actualDeploymentActionVerbs []string
|
var actualDeploymentActionVerbs []string
|
||||||
@ -1124,6 +1128,14 @@ func TestAgentController(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func logLines(logs string) []string {
|
||||||
|
if len(logs) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Split(strings.TrimSpace(logs), "\n")
|
||||||
|
}
|
||||||
|
|
||||||
func TestMergeLabelsAndAnnotations(t *testing.T) {
|
func TestMergeLabelsAndAnnotations(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
|
@ -13,12 +13,10 @@ import (
|
|||||||
|
|
||||||
"github.com/go-ldap/ldap/v3"
|
"github.com/go-ldap/ldap/v3"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/api/equality"
|
"k8s.io/apimachinery/pkg/api/equality"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||||
"k8s.io/klog/v2/klogr"
|
|
||||||
|
|
||||||
"go.pinniped.dev/generated/latest/apis/supervisor/idp/v1alpha1"
|
"go.pinniped.dev/generated/latest/apis/supervisor/idp/v1alpha1"
|
||||||
pinnipedclientset "go.pinniped.dev/generated/latest/client/supervisor/clientset/versioned"
|
pinnipedclientset "go.pinniped.dev/generated/latest/client/supervisor/clientset/versioned"
|
||||||
@ -28,6 +26,7 @@ import (
|
|||||||
"go.pinniped.dev/internal/controller/supervisorconfig/upstreamwatchers"
|
"go.pinniped.dev/internal/controller/supervisorconfig/upstreamwatchers"
|
||||||
"go.pinniped.dev/internal/controllerlib"
|
"go.pinniped.dev/internal/controllerlib"
|
||||||
"go.pinniped.dev/internal/oidc/provider"
|
"go.pinniped.dev/internal/oidc/provider"
|
||||||
|
"go.pinniped.dev/internal/plog"
|
||||||
"go.pinniped.dev/internal/upstreamldap"
|
"go.pinniped.dev/internal/upstreamldap"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -360,7 +359,7 @@ func (c *activeDirectoryWatcherController) validateUpstream(ctx context.Context,
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *activeDirectoryWatcherController) updateStatus(ctx context.Context, upstream *v1alpha1.ActiveDirectoryIdentityProvider, conditions []*v1alpha1.Condition) {
|
func (c *activeDirectoryWatcherController) updateStatus(ctx context.Context, upstream *v1alpha1.ActiveDirectoryIdentityProvider, conditions []*v1alpha1.Condition) {
|
||||||
log := klogr.New().WithValues("namespace", upstream.Namespace, "name", upstream.Name)
|
log := plog.WithValues("namespace", upstream.Namespace, "name", upstream.Name)
|
||||||
updated := upstream.DeepCopy()
|
updated := upstream.DeepCopy()
|
||||||
|
|
||||||
hadErrorCondition := conditionsutil.Merge(conditions, upstream.Generation, &updated.Status.Conditions, log)
|
hadErrorCondition := conditionsutil.Merge(conditions, upstream.Generation, &updated.Status.Conditions, log)
|
||||||
@ -379,7 +378,7 @@ func (c *activeDirectoryWatcherController) updateStatus(ctx context.Context, ups
|
|||||||
ActiveDirectoryIdentityProviders(upstream.Namespace).
|
ActiveDirectoryIdentityProviders(upstream.Namespace).
|
||||||
UpdateStatus(ctx, updated, metav1.UpdateOptions{})
|
UpdateStatus(ctx, updated, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(err, "failed to update status")
|
log.Error("failed to update status", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -12,7 +12,6 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||||
"k8s.io/klog/v2/klogr"
|
|
||||||
|
|
||||||
"go.pinniped.dev/generated/latest/apis/supervisor/idp/v1alpha1"
|
"go.pinniped.dev/generated/latest/apis/supervisor/idp/v1alpha1"
|
||||||
pinnipedclientset "go.pinniped.dev/generated/latest/client/supervisor/clientset/versioned"
|
pinnipedclientset "go.pinniped.dev/generated/latest/client/supervisor/clientset/versioned"
|
||||||
@ -22,6 +21,7 @@ import (
|
|||||||
"go.pinniped.dev/internal/controller/supervisorconfig/upstreamwatchers"
|
"go.pinniped.dev/internal/controller/supervisorconfig/upstreamwatchers"
|
||||||
"go.pinniped.dev/internal/controllerlib"
|
"go.pinniped.dev/internal/controllerlib"
|
||||||
"go.pinniped.dev/internal/oidc/provider"
|
"go.pinniped.dev/internal/oidc/provider"
|
||||||
|
"go.pinniped.dev/internal/plog"
|
||||||
"go.pinniped.dev/internal/upstreamldap"
|
"go.pinniped.dev/internal/upstreamldap"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -252,7 +252,7 @@ func (c *ldapWatcherController) validateUpstream(ctx context.Context, upstream *
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *ldapWatcherController) updateStatus(ctx context.Context, upstream *v1alpha1.LDAPIdentityProvider, conditions []*v1alpha1.Condition) {
|
func (c *ldapWatcherController) updateStatus(ctx context.Context, upstream *v1alpha1.LDAPIdentityProvider, conditions []*v1alpha1.Condition) {
|
||||||
log := klogr.New().WithValues("namespace", upstream.Namespace, "name", upstream.Name)
|
log := plog.WithValues("namespace", upstream.Namespace, "name", upstream.Name)
|
||||||
updated := upstream.DeepCopy()
|
updated := upstream.DeepCopy()
|
||||||
|
|
||||||
hadErrorCondition := conditionsutil.Merge(conditions, upstream.Generation, &updated.Status.Conditions, log)
|
hadErrorCondition := conditionsutil.Merge(conditions, upstream.Generation, &updated.Status.Conditions, log)
|
||||||
@ -271,6 +271,6 @@ func (c *ldapWatcherController) updateStatus(ctx context.Context, upstream *v1al
|
|||||||
LDAPIdentityProviders(upstream.Namespace).
|
LDAPIdentityProviders(upstream.Namespace).
|
||||||
UpdateStatus(ctx, updated, metav1.UpdateOptions{})
|
UpdateStatus(ctx, updated, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(err, "failed to update status")
|
log.Error("failed to update status", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -14,8 +14,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
|
||||||
|
|
||||||
"github.com/coreos/go-oidc/v3/oidc"
|
"github.com/coreos/go-oidc/v3/oidc"
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
@ -24,6 +22,7 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/util/cache"
|
"k8s.io/apimachinery/pkg/util/cache"
|
||||||
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||||
|
|
||||||
"go.pinniped.dev/generated/latest/apis/supervisor/idp/v1alpha1"
|
"go.pinniped.dev/generated/latest/apis/supervisor/idp/v1alpha1"
|
||||||
@ -36,6 +35,7 @@ import (
|
|||||||
"go.pinniped.dev/internal/controllerlib"
|
"go.pinniped.dev/internal/controllerlib"
|
||||||
"go.pinniped.dev/internal/net/phttp"
|
"go.pinniped.dev/internal/net/phttp"
|
||||||
"go.pinniped.dev/internal/oidc/provider"
|
"go.pinniped.dev/internal/oidc/provider"
|
||||||
|
"go.pinniped.dev/internal/plog"
|
||||||
"go.pinniped.dev/internal/upstreamoidc"
|
"go.pinniped.dev/internal/upstreamoidc"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -67,7 +67,7 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
disallowedAdditionalAuthorizeParameters = map[string]bool{ //nolint: gochecknoglobals
|
disallowedAdditionalAuthorizeParameters = map[string]bool{ // nolint: gochecknoglobals
|
||||||
// Reject these AdditionalAuthorizeParameters to avoid allowing the user's config to overwrite the parameters
|
// Reject these AdditionalAuthorizeParameters to avoid allowing the user's config to overwrite the parameters
|
||||||
// that are always used by Pinniped in authcode authorization requests. The OIDC library used would otherwise
|
// that are always used by Pinniped in authcode authorization requests. The OIDC library used would otherwise
|
||||||
// happily treat the user's config as an override. Users can already set the "client_id" and "scope" params
|
// happily treat the user's config as an override. Users can already set the "client_id" and "scope" params
|
||||||
@ -331,8 +331,7 @@ func (c *oidcWatcherController) validateIssuer(ctx context.Context, upstream *v1
|
|||||||
|
|
||||||
discoveredProvider, err = oidc.NewProvider(oidc.ClientContext(ctx, httpClient), upstream.Spec.Issuer)
|
discoveredProvider, err = oidc.NewProvider(oidc.ClientContext(ctx, httpClient), upstream.Spec.Issuer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
const klogLevelTrace = 6
|
c.log.V(plog.KlogLevelTrace).WithValues(
|
||||||
c.log.V(klogLevelTrace).WithValues(
|
|
||||||
"namespace", upstream.Namespace,
|
"namespace", upstream.Namespace,
|
||||||
"name", upstream.Name,
|
"name", upstream.Name,
|
||||||
"issuer", upstream.Spec.Issuer,
|
"issuer", upstream.Spec.Issuer,
|
||||||
|
@ -29,6 +29,7 @@ import (
|
|||||||
"go.pinniped.dev/internal/certauthority"
|
"go.pinniped.dev/internal/certauthority"
|
||||||
"go.pinniped.dev/internal/controllerlib"
|
"go.pinniped.dev/internal/controllerlib"
|
||||||
"go.pinniped.dev/internal/oidc/provider"
|
"go.pinniped.dev/internal/oidc/provider"
|
||||||
|
"go.pinniped.dev/internal/plog"
|
||||||
"go.pinniped.dev/internal/testutil"
|
"go.pinniped.dev/internal/testutil"
|
||||||
"go.pinniped.dev/internal/testutil/oidctestutil"
|
"go.pinniped.dev/internal/testutil/oidctestutil"
|
||||||
"go.pinniped.dev/internal/testutil/testlogger"
|
"go.pinniped.dev/internal/testutil/testlogger"
|
||||||
@ -78,7 +79,6 @@ func TestOIDCUpstreamWatcherControllerFilterSecret(t *testing.T) {
|
|||||||
pinnipedInformers := pinnipedinformers.NewSharedInformerFactory(fakePinnipedClient, 0)
|
pinnipedInformers := pinnipedinformers.NewSharedInformerFactory(fakePinnipedClient, 0)
|
||||||
fakeKubeClient := fake.NewSimpleClientset()
|
fakeKubeClient := fake.NewSimpleClientset()
|
||||||
kubeInformers := informers.NewSharedInformerFactory(fakeKubeClient, 0)
|
kubeInformers := informers.NewSharedInformerFactory(fakeKubeClient, 0)
|
||||||
testLog := testlogger.New(t)
|
|
||||||
cache := provider.NewDynamicUpstreamIDPProvider()
|
cache := provider.NewDynamicUpstreamIDPProvider()
|
||||||
cache.SetOIDCIdentityProviders([]provider.UpstreamOIDCIdentityProviderI{
|
cache.SetOIDCIdentityProviders([]provider.UpstreamOIDCIdentityProviderI{
|
||||||
&upstreamoidc.ProviderConfig{Name: "initial-entry"},
|
&upstreamoidc.ProviderConfig{Name: "initial-entry"},
|
||||||
@ -91,7 +91,7 @@ func TestOIDCUpstreamWatcherControllerFilterSecret(t *testing.T) {
|
|||||||
nil,
|
nil,
|
||||||
pinnipedInformers.IDP().V1alpha1().OIDCIdentityProviders(),
|
pinnipedInformers.IDP().V1alpha1().OIDCIdentityProviders(),
|
||||||
secretInformer,
|
secretInformer,
|
||||||
testLog.Logger,
|
plog.Logr(), // nolint: staticcheck // old test with no log assertions
|
||||||
withInformer.WithInformer,
|
withInformer.WithInformer,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -1400,7 +1400,7 @@ oidc: issuer did not match the issuer returned by provider, expected "` + testIs
|
|||||||
pinnipedInformers := pinnipedinformers.NewSharedInformerFactory(fakePinnipedClient, 0)
|
pinnipedInformers := pinnipedinformers.NewSharedInformerFactory(fakePinnipedClient, 0)
|
||||||
fakeKubeClient := fake.NewSimpleClientset(tt.inputSecrets...)
|
fakeKubeClient := fake.NewSimpleClientset(tt.inputSecrets...)
|
||||||
kubeInformers := informers.NewSharedInformerFactory(fakeKubeClient, 0)
|
kubeInformers := informers.NewSharedInformerFactory(fakeKubeClient, 0)
|
||||||
testLog := testlogger.NewLegacy(t) //nolint: staticcheck // old test with lots of log statements
|
testLog := testlogger.NewLegacy(t) // nolint: staticcheck // old test with lots of log statements
|
||||||
cache := provider.NewDynamicUpstreamIDPProvider()
|
cache := provider.NewDynamicUpstreamIDPProvider()
|
||||||
cache.SetOIDCIdentityProviders([]provider.UpstreamOIDCIdentityProviderI{
|
cache.SetOIDCIdentityProviders([]provider.UpstreamOIDCIdentityProviderI{
|
||||||
&upstreamoidc.ProviderConfig{Name: "initial-entry"},
|
&upstreamoidc.ProviderConfig{Name: "initial-entry"},
|
||||||
|
@ -15,7 +15,6 @@ import (
|
|||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
"k8s.io/client-go/tools/events"
|
"k8s.io/client-go/tools/events"
|
||||||
"k8s.io/client-go/util/workqueue"
|
"k8s.io/client-go/util/workqueue"
|
||||||
"k8s.io/klog/v2"
|
|
||||||
|
|
||||||
"go.pinniped.dev/internal/plog"
|
"go.pinniped.dev/internal/plog"
|
||||||
)
|
)
|
||||||
@ -215,7 +214,7 @@ func (c *controller) handleKey(key Key, err error) {
|
|||||||
|
|
||||||
if errors.Is(err, ErrSyntheticRequeue) {
|
if errors.Is(err, ErrSyntheticRequeue) {
|
||||||
// logging this helps detecting wedged controllers with missing pre-requirements
|
// logging this helps detecting wedged controllers with missing pre-requirements
|
||||||
klog.V(4).InfoS("requested synthetic requeue", "controller", c.Name(), "key", key)
|
plog.Debug("requested synthetic requeue", "controller", c.Name(), "key", key)
|
||||||
} else {
|
} else {
|
||||||
utilruntime.HandleError(fmt.Errorf("%s: %v failed with: %w", c.Name(), key, err))
|
utilruntime.HandleError(fmt.Errorf("%s: %v failed with: %w", c.Name(), key, err))
|
||||||
}
|
}
|
||||||
|
@ -13,7 +13,8 @@ import (
|
|||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
"k8s.io/client-go/tools/events"
|
"k8s.io/client-go/tools/events"
|
||||||
"k8s.io/client-go/util/workqueue"
|
"k8s.io/client-go/util/workqueue"
|
||||||
"k8s.io/klog/v2"
|
|
||||||
|
"go.pinniped.dev/internal/plog"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Option func(*controller)
|
type Option func(*controller)
|
||||||
@ -62,7 +63,7 @@ func WithInformer(getter InformerGetter, filter Filter, opt InformerOption) Opti
|
|||||||
AddFunc: func(obj interface{}) {
|
AddFunc: func(obj interface{}) {
|
||||||
object := metaOrDie(obj)
|
object := metaOrDie(obj)
|
||||||
if filter.Add(object) {
|
if filter.Add(object) {
|
||||||
klog.V(4).InfoS("handling add",
|
plog.Debug("handling add",
|
||||||
"controller", c.Name(),
|
"controller", c.Name(),
|
||||||
"namespace", object.GetNamespace(),
|
"namespace", object.GetNamespace(),
|
||||||
"name", object.GetName(),
|
"name", object.GetName(),
|
||||||
@ -76,7 +77,7 @@ func WithInformer(getter InformerGetter, filter Filter, opt InformerOption) Opti
|
|||||||
oldObject := metaOrDie(oldObj)
|
oldObject := metaOrDie(oldObj)
|
||||||
newObject := metaOrDie(newObj)
|
newObject := metaOrDie(newObj)
|
||||||
if filter.Update(oldObject, newObject) {
|
if filter.Update(oldObject, newObject) {
|
||||||
klog.V(4).InfoS("handling update",
|
plog.Debug("handling update",
|
||||||
"controller", c.Name(),
|
"controller", c.Name(),
|
||||||
"namespace", newObject.GetNamespace(),
|
"namespace", newObject.GetNamespace(),
|
||||||
"name", newObject.GetName(),
|
"name", newObject.GetName(),
|
||||||
@ -101,7 +102,7 @@ func WithInformer(getter InformerGetter, filter Filter, opt InformerOption) Opti
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if filter.Delete(accessor) {
|
if filter.Delete(accessor) {
|
||||||
klog.V(4).InfoS("handling delete",
|
plog.Debug("handling delete",
|
||||||
"controller", c.Name(),
|
"controller", c.Name(),
|
||||||
"namespace", accessor.GetNamespace(),
|
"namespace", accessor.GetNamespace(),
|
||||||
"name", accessor.GetName(),
|
"name", accessor.GetName(),
|
||||||
|
@ -8,7 +8,8 @@ import (
|
|||||||
|
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/client-go/tools/events"
|
"k8s.io/client-go/tools/events"
|
||||||
"k8s.io/klog/v2"
|
|
||||||
|
"go.pinniped.dev/internal/plog"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ events.EventRecorder = klogRecorder{}
|
var _ events.EventRecorder = klogRecorder{}
|
||||||
@ -16,7 +17,7 @@ var _ events.EventRecorder = klogRecorder{}
|
|||||||
type klogRecorder struct{}
|
type klogRecorder struct{}
|
||||||
|
|
||||||
func (n klogRecorder) Eventf(regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) {
|
func (n klogRecorder) Eventf(regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) {
|
||||||
klog.V(4).InfoS("recording event",
|
plog.Debug("recording event",
|
||||||
"regarding", regarding,
|
"regarding", regarding,
|
||||||
"related", related,
|
"related", related,
|
||||||
"eventtype", eventtype,
|
"eventtype", eventtype,
|
||||||
|
@ -14,10 +14,10 @@ import (
|
|||||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||||
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
|
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
"k8s.io/client-go/tools/events"
|
"k8s.io/client-go/tools/events"
|
||||||
"k8s.io/klog/v2"
|
|
||||||
|
|
||||||
"go.pinniped.dev/internal/controllerlib"
|
"go.pinniped.dev/internal/controllerlib"
|
||||||
"go.pinniped.dev/internal/controllerlib/test/integration/examplecontroller/api"
|
"go.pinniped.dev/internal/controllerlib/test/integration/examplecontroller/api"
|
||||||
|
"go.pinniped.dev/internal/plog"
|
||||||
)
|
)
|
||||||
|
|
||||||
//nolint:funlen
|
//nolint:funlen
|
||||||
@ -59,7 +59,7 @@ func NewExampleCreatingController(
|
|||||||
}
|
}
|
||||||
|
|
||||||
generateSecret := func(service *corev1.Service) error {
|
generateSecret := func(service *corev1.Service) error {
|
||||||
klog.V(4).InfoS("generating new secret for service", "namespace", service.Namespace, "name", service.Name)
|
plog.Debug("generating new secret for service", "namespace", service.Namespace, "name", service.Name)
|
||||||
|
|
||||||
secret := &corev1.Secret{
|
secret := &corev1.Secret{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
@ -97,7 +97,7 @@ func NewExampleCreatingController(
|
|||||||
return nil // drop from queue because we cannot safely update this secret
|
return nil // drop from queue because we cannot safely update this secret
|
||||||
}
|
}
|
||||||
|
|
||||||
klog.V(4).InfoS("updating data in existing secret", "namespace", secret.Namespace, "name", secret.Name)
|
plog.Debug("updating data in existing secret", "namespace", secret.Namespace, "name", secret.Name)
|
||||||
// Actually update the secret in the regeneration case (the secret already exists but we want to update to new secretData).
|
// Actually update the secret in the regeneration case (the secret already exists but we want to update to new secretData).
|
||||||
_, updateErr := secretClient.Secrets(secret.Namespace).Update(context.TODO(), secret, metav1.UpdateOptions{})
|
_, updateErr := secretClient.Secrets(secret.Namespace).Update(context.TODO(), secret, metav1.UpdateOptions{})
|
||||||
return updateErr
|
return updateErr
|
||||||
@ -169,7 +169,7 @@ func NewExampleCreatingController(
|
|||||||
utilruntime.HandleError(fmt.Errorf("unable to get service %s/%s: %w", secret.Namespace, serviceName, err))
|
utilruntime.HandleError(fmt.Errorf("unable to get service %s/%s: %w", secret.Namespace, serviceName, err))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
klog.V(4).InfoS("recreating secret", "namespace", service.Namespace, "name", service.Name)
|
plog.Debug("recreating secret", "namespace", service.Namespace, "name", service.Name)
|
||||||
return true
|
return true
|
||||||
},
|
},
|
||||||
}, controllerlib.InformerOption{}),
|
}, controllerlib.InformerOption{}),
|
||||||
|
@ -11,7 +11,6 @@ import (
|
|||||||
|
|
||||||
k8sinformers "k8s.io/client-go/informers"
|
k8sinformers "k8s.io/client-go/informers"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/klog/v2/klogr"
|
|
||||||
"k8s.io/utils/clock"
|
"k8s.io/utils/clock"
|
||||||
|
|
||||||
pinnipedclientset "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned"
|
pinnipedclientset "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned"
|
||||||
@ -34,6 +33,7 @@ import (
|
|||||||
"go.pinniped.dev/internal/groupsuffix"
|
"go.pinniped.dev/internal/groupsuffix"
|
||||||
"go.pinniped.dev/internal/kubeclient"
|
"go.pinniped.dev/internal/kubeclient"
|
||||||
"go.pinniped.dev/internal/leaderelection"
|
"go.pinniped.dev/internal/leaderelection"
|
||||||
|
"go.pinniped.dev/internal/plog"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -197,6 +197,7 @@ func PrepareControllers(c *Config) (controllerinit.RunnerBuilder, error) {
|
|||||||
controllerlib.WithInformer,
|
controllerlib.WithInformer,
|
||||||
c.ServingCertRenewBefore,
|
c.ServingCertRenewBefore,
|
||||||
apicerts.TLSCertificateChainSecretKey,
|
apicerts.TLSCertificateChainSecretKey,
|
||||||
|
plog.New(),
|
||||||
),
|
),
|
||||||
singletonWorker,
|
singletonWorker,
|
||||||
).
|
).
|
||||||
@ -222,7 +223,7 @@ func PrepareControllers(c *Config) (controllerinit.RunnerBuilder, error) {
|
|||||||
agentConfig,
|
agentConfig,
|
||||||
client,
|
client,
|
||||||
informers.installationNamespaceK8s.Core().V1().Pods(),
|
informers.installationNamespaceK8s.Core().V1().Pods(),
|
||||||
klogr.New(),
|
plog.Logr(), // nolint: staticcheck // old controller with lots of log statements
|
||||||
),
|
),
|
||||||
singletonWorker,
|
singletonWorker,
|
||||||
).
|
).
|
||||||
@ -232,7 +233,7 @@ func PrepareControllers(c *Config) (controllerinit.RunnerBuilder, error) {
|
|||||||
webhookcachefiller.New(
|
webhookcachefiller.New(
|
||||||
c.AuthenticatorCache,
|
c.AuthenticatorCache,
|
||||||
informers.pinniped.Authentication().V1alpha1().WebhookAuthenticators(),
|
informers.pinniped.Authentication().V1alpha1().WebhookAuthenticators(),
|
||||||
klogr.New(),
|
plog.Logr(), // nolint: staticcheck // old controller with lots of log statements
|
||||||
),
|
),
|
||||||
singletonWorker,
|
singletonWorker,
|
||||||
).
|
).
|
||||||
@ -240,7 +241,7 @@ func PrepareControllers(c *Config) (controllerinit.RunnerBuilder, error) {
|
|||||||
jwtcachefiller.New(
|
jwtcachefiller.New(
|
||||||
c.AuthenticatorCache,
|
c.AuthenticatorCache,
|
||||||
informers.pinniped.Authentication().V1alpha1().JWTAuthenticators(),
|
informers.pinniped.Authentication().V1alpha1().JWTAuthenticators(),
|
||||||
klogr.New(),
|
plog.Logr(), // nolint: staticcheck // old controller with lots of log statements
|
||||||
),
|
),
|
||||||
singletonWorker,
|
singletonWorker,
|
||||||
).
|
).
|
||||||
@ -249,7 +250,7 @@ func PrepareControllers(c *Config) (controllerinit.RunnerBuilder, error) {
|
|||||||
c.AuthenticatorCache,
|
c.AuthenticatorCache,
|
||||||
informers.pinniped.Authentication().V1alpha1().WebhookAuthenticators(),
|
informers.pinniped.Authentication().V1alpha1().WebhookAuthenticators(),
|
||||||
informers.pinniped.Authentication().V1alpha1().JWTAuthenticators(),
|
informers.pinniped.Authentication().V1alpha1().JWTAuthenticators(),
|
||||||
klogr.New(),
|
plog.Logr(), // nolint: staticcheck // old controller with lots of log statements
|
||||||
),
|
),
|
||||||
singletonWorker,
|
singletonWorker,
|
||||||
).
|
).
|
||||||
@ -275,7 +276,7 @@ func PrepareControllers(c *Config) (controllerinit.RunnerBuilder, error) {
|
|||||||
impersonator.New,
|
impersonator.New,
|
||||||
c.NamesConfig.ImpersonationSignerSecret,
|
c.NamesConfig.ImpersonationSignerSecret,
|
||||||
c.ImpersonationSigningCertProvider,
|
c.ImpersonationSigningCertProvider,
|
||||||
klogr.New(),
|
plog.Logr(), // nolint: staticcheck // old controller with lots of log statements
|
||||||
),
|
),
|
||||||
singletonWorker,
|
singletonWorker,
|
||||||
).
|
).
|
||||||
@ -303,6 +304,7 @@ func PrepareControllers(c *Config) (controllerinit.RunnerBuilder, error) {
|
|||||||
controllerlib.WithInformer,
|
controllerlib.WithInformer,
|
||||||
365*24*time.Hour-time.Hour, // 1 year minus 1 hour hard coded value (i.e. wait until the last moment to break the signer)
|
365*24*time.Hour-time.Hour, // 1 year minus 1 hour hard coded value (i.e. wait until the last moment to break the signer)
|
||||||
apicerts.CACertificateSecretKey,
|
apicerts.CACertificateSecretKey,
|
||||||
|
plog.New(),
|
||||||
),
|
),
|
||||||
singletonWorker,
|
singletonWorker,
|
||||||
)
|
)
|
||||||
|
@ -20,7 +20,8 @@ import (
|
|||||||
_ "crypto/tls/fipsonly" // restricts all TLS configuration to FIPS-approved settings.
|
_ "crypto/tls/fipsonly" // restricts all TLS configuration to FIPS-approved settings.
|
||||||
|
|
||||||
"k8s.io/apiserver/pkg/server/options"
|
"k8s.io/apiserver/pkg/server/options"
|
||||||
"k8s.io/klog/v2"
|
|
||||||
|
"go.pinniped.dev/internal/plog"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Always use TLS 1.2 for FIPs
|
// Always use TLS 1.2 for FIPs
|
||||||
@ -36,9 +37,7 @@ func init() {
|
|||||||
|
|
||||||
// this init runs before we have parsed our config to determine our log level
|
// this init runs before we have parsed our config to determine our log level
|
||||||
// thus we must use a log statement that will always print instead of conditionally print
|
// thus we must use a log statement that will always print instead of conditionally print
|
||||||
// for plog, that is only error and warning logs, neither of which seem appropriate here
|
plog.Always("using boring crypto in fips only mode", "go version", runtime.Version())
|
||||||
// therefore, just use klog directly with no V level requirement
|
|
||||||
klog.InfoS("using boring crypto in fips only mode", "go version", runtime.Version())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func Default(rootCAs *x509.CertPool) *tls.Config {
|
func Default(rootCAs *x509.CertPool) *tls.Config {
|
||||||
|
@ -16,7 +16,6 @@ import (
|
|||||||
loginv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/login/v1alpha1"
|
loginv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/login/v1alpha1"
|
||||||
"go.pinniped.dev/internal/constable"
|
"go.pinniped.dev/internal/constable"
|
||||||
"go.pinniped.dev/internal/kubeclient"
|
"go.pinniped.dev/internal/kubeclient"
|
||||||
"go.pinniped.dev/internal/plog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -86,8 +85,8 @@ func New(apiGroupSuffix string) kubeclient.Middleware {
|
|||||||
// want our middleware to be opinionated so that it can be really good at a specific task
|
// want our middleware to be opinionated so that it can be really good at a specific task
|
||||||
// and give us specific feedback when it can't do that specific task
|
// and give us specific feedback when it can't do that specific task
|
||||||
return fmt.Errorf(
|
return fmt.Errorf(
|
||||||
"cannot replace token credential request %q without authenticator API group",
|
"cannot replace token credential request %s/%s without authenticator API group",
|
||||||
plog.KObj(obj),
|
obj.GetNamespace(), obj.GetName(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -95,8 +94,8 @@ func New(apiGroupSuffix string) kubeclient.Middleware {
|
|||||||
if !ok {
|
if !ok {
|
||||||
// see comment above about specificity of middleware
|
// see comment above about specificity of middleware
|
||||||
return fmt.Errorf(
|
return fmt.Errorf(
|
||||||
"cannot replace token credential request %q authenticator API group %q with group suffix %q",
|
"cannot replace token credential request %s/%s authenticator API group %q with group suffix %q",
|
||||||
plog.KObj(obj),
|
obj.GetNamespace(), obj.GetName(),
|
||||||
*tokenCredentialRequest.Spec.Authenticator.APIGroup,
|
*tokenCredentialRequest.Spec.Authenticator.APIGroup,
|
||||||
apiGroupSuffix,
|
apiGroupSuffix,
|
||||||
)
|
)
|
||||||
@ -176,7 +175,7 @@ func Unreplace(baseAPIGroup, apiGroupSuffix string) (string, bool) {
|
|||||||
// makes sure that the provided apiGroupSuffix is a valid DNS-1123 subdomain with at least one dot,
|
// makes sure that the provided apiGroupSuffix is a valid DNS-1123 subdomain with at least one dot,
|
||||||
// to match Kubernetes behavior.
|
// to match Kubernetes behavior.
|
||||||
func Validate(apiGroupSuffix string) error {
|
func Validate(apiGroupSuffix string) error {
|
||||||
var errs []error //nolint: prealloc
|
var errs []error // nolint: prealloc
|
||||||
|
|
||||||
if len(strings.Split(apiGroupSuffix, ".")) < 2 {
|
if len(strings.Split(apiGroupSuffix, ".")) < 2 {
|
||||||
errs = append(errs, constable.Error("must contain '.'"))
|
errs = append(errs, constable.Error("must contain '.'"))
|
||||||
|
@ -435,7 +435,7 @@ func TestMiddlware(t *testing.T) {
|
|||||||
responseObj: tokenCredentialRequestWithNewGroup, // a token credential response does not contain a spec
|
responseObj: tokenCredentialRequestWithNewGroup, // a token credential response does not contain a spec
|
||||||
wantMutateRequests: 3,
|
wantMutateRequests: 3,
|
||||||
wantMutateResponses: 1,
|
wantMutateResponses: 1,
|
||||||
wantMutateRequestErrors: []string{`cannot replace token credential request "/" authenticator API group "authentication.concierge.some.suffix.com" with group suffix "some.suffix.com"`},
|
wantMutateRequestErrors: []string{`cannot replace token credential request / authenticator API group "authentication.concierge.some.suffix.com" with group suffix "some.suffix.com"`},
|
||||||
wantResponseObj: tokenCredentialRequestWithNewGroup, // the middleware will reset object GVK for us
|
wantResponseObj: tokenCredentialRequestWithNewGroup, // the middleware will reset object GVK for us
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -475,7 +475,7 @@ func TestMiddlware(t *testing.T) {
|
|||||||
responseObj: tokenCredentialRequestWithNewGroup, // a token credential response does not contain a spec
|
responseObj: tokenCredentialRequestWithNewGroup, // a token credential response does not contain a spec
|
||||||
wantMutateRequests: 3,
|
wantMutateRequests: 3,
|
||||||
wantMutateResponses: 1,
|
wantMutateResponses: 1,
|
||||||
wantMutateRequestErrors: []string{`cannot replace token credential request "/" without authenticator API group`},
|
wantMutateRequestErrors: []string{`cannot replace token credential request / without authenticator API group`},
|
||||||
wantResponseObj: tokenCredentialRequestWithNewGroup, // the middleware will reset object GVK for us
|
wantResponseObj: tokenCredentialRequestWithNewGroup, // the middleware will reset object GVK for us
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -39,9 +39,9 @@ func glogBody(prefix string, body []byte) {
|
|||||||
if bytes.IndexFunc(body, func(r rune) bool {
|
if bytes.IndexFunc(body, func(r rune) bool {
|
||||||
return r < 0x0a
|
return r < 0x0a
|
||||||
}) != -1 {
|
}) != -1 {
|
||||||
plog.Debug(prefix, "body", hex.Dump(body))
|
plog.All(prefix, "body", hex.Dump(body))
|
||||||
} else {
|
} else {
|
||||||
plog.Debug(prefix, "body", string(body))
|
plog.All(prefix, "body", string(body))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -22,6 +22,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"strings"
|
"strings"
|
||||||
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"golang.org/x/crypto/bcrypt"
|
"golang.org/x/crypto/bcrypt"
|
||||||
@ -31,7 +32,6 @@ import (
|
|||||||
kubeinformers "k8s.io/client-go/informers"
|
kubeinformers "k8s.io/client-go/informers"
|
||||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/klog/v2"
|
|
||||||
|
|
||||||
"go.pinniped.dev/internal/constable"
|
"go.pinniped.dev/internal/constable"
|
||||||
"go.pinniped.dev/internal/controller/apicerts"
|
"go.pinniped.dev/internal/controller/apicerts"
|
||||||
@ -339,10 +339,7 @@ func waitForSignal() os.Signal {
|
|||||||
return <-signalCh
|
return <-signalCh
|
||||||
}
|
}
|
||||||
|
|
||||||
func run() error {
|
func run(ctx context.Context) error {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
client, err := kubeclient.New()
|
client, err := kubeclient.New()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot create k8s client: %w", err)
|
return fmt.Errorf("cannot create k8s client: %w", err)
|
||||||
@ -359,7 +356,7 @@ func run() error {
|
|||||||
startControllers(ctx, dynamicCertProvider, client.Kubernetes, kubeInformers)
|
startControllers(ctx, dynamicCertProvider, client.Kubernetes, kubeInformers)
|
||||||
plog.Debug("controllers are ready")
|
plog.Debug("controllers are ready")
|
||||||
|
|
||||||
//nolint: gosec // Intentionally binding to all network interfaces.
|
// nolint: gosec // Intentionally binding to all network interfaces.
|
||||||
l, err := net.Listen("tcp", ":8443")
|
l, err := net.Listen("tcp", ":8443")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot create listener: %w", err)
|
return fmt.Errorf("cannot create listener: %w", err)
|
||||||
@ -378,13 +375,35 @@ func run() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func Main() {
|
func main() error { // return an error instead of plog.Fatal to allow defer statements to run
|
||||||
|
ctx := signalCtx()
|
||||||
|
|
||||||
// Hardcode the logging level to debug, since this is a test app and it is very helpful to have
|
// Hardcode the logging level to debug, since this is a test app and it is very helpful to have
|
||||||
// verbose logs to debug test failures.
|
// verbose logs to debug test failures.
|
||||||
if err := plog.ValidateAndSetLogLevelGlobally(plog.LevelDebug); err != nil {
|
if err := plog.ValidateAndSetLogLevelAndFormatGlobally(ctx, plog.LogSpec{Level: plog.LevelDebug}); err != nil {
|
||||||
klog.Fatal(err)
|
plog.Fatal(err)
|
||||||
}
|
}
|
||||||
if err := run(); err != nil {
|
|
||||||
klog.Fatal(err)
|
return run(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Main() {
|
||||||
|
if err := main(); err != nil {
|
||||||
|
plog.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func signalCtx() context.Context {
|
||||||
|
signalCh := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(signalCh, os.Interrupt, syscall.SIGTERM)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
go func() {
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
s := <-signalCh
|
||||||
|
plog.Debug("saw signal", "signal", s)
|
||||||
|
}()
|
||||||
|
|
||||||
|
return ctx
|
||||||
|
}
|
||||||
|
107
internal/plog/config.go
Normal file
107
internal/plog/config.go
Normal file
@ -0,0 +1,107 @@
|
|||||||
|
// Copyright 2020-2022 the Pinniped contributors. All Rights Reserved.
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
package plog
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"go.uber.org/zap/zapcore"
|
||||||
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
|
"k8s.io/component-base/logs"
|
||||||
|
|
||||||
|
"go.pinniped.dev/internal/constable"
|
||||||
|
)
|
||||||
|
|
||||||
|
type LogFormat string
|
||||||
|
|
||||||
|
func (l *LogFormat) UnmarshalJSON(b []byte) error {
|
||||||
|
switch string(b) {
|
||||||
|
case `""`, `"json"`:
|
||||||
|
*l = FormatJSON
|
||||||
|
case `"text"`:
|
||||||
|
*l = FormatText
|
||||||
|
// there is no "cli" case because it is not a supported option via our config
|
||||||
|
default:
|
||||||
|
return errInvalidLogFormat
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
FormatJSON LogFormat = "json"
|
||||||
|
FormatText LogFormat = "text"
|
||||||
|
FormatCLI LogFormat = "cli" // only used by the pinniped CLI and not the server components
|
||||||
|
|
||||||
|
errInvalidLogLevel = constable.Error("invalid log level, valid choices are the empty string, info, debug, trace and all")
|
||||||
|
errInvalidLogFormat = constable.Error("invalid log format, valid choices are the empty string, json and text")
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ json.Unmarshaler = func() *LogFormat {
|
||||||
|
var f LogFormat
|
||||||
|
return &f
|
||||||
|
}()
|
||||||
|
|
||||||
|
type LogSpec struct {
|
||||||
|
Level LogLevel `json:"level,omitempty"`
|
||||||
|
Format LogFormat `json:"format,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func MaybeSetDeprecatedLogLevel(level *LogLevel, log *LogSpec) {
|
||||||
|
if level != nil {
|
||||||
|
Warning("logLevel is deprecated, set log.level instead")
|
||||||
|
log.Level = *level
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ValidateAndSetLogLevelAndFormatGlobally(ctx context.Context, spec LogSpec) error {
|
||||||
|
klogLevel := klogLevelForPlogLevel(spec.Level)
|
||||||
|
if klogLevel < 0 {
|
||||||
|
return errInvalidLogLevel
|
||||||
|
}
|
||||||
|
|
||||||
|
// set the global log levels used by our code and the kube code underneath us
|
||||||
|
if _, err := logs.GlogSetter(strconv.Itoa(int(klogLevel))); err != nil {
|
||||||
|
panic(err) // programmer error
|
||||||
|
}
|
||||||
|
globalLevel.SetLevel(zapcore.Level(-klogLevel)) // klog levels are inverted when zap handles them
|
||||||
|
|
||||||
|
var encoding string
|
||||||
|
switch spec.Format {
|
||||||
|
case "", FormatJSON:
|
||||||
|
encoding = "json"
|
||||||
|
case FormatCLI:
|
||||||
|
encoding = "console"
|
||||||
|
case FormatText:
|
||||||
|
encoding = "text"
|
||||||
|
default:
|
||||||
|
return errInvalidLogFormat
|
||||||
|
}
|
||||||
|
|
||||||
|
log, flush, err := newLogr(ctx, encoding, klogLevel)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
setGlobalLoggers(log, flush)
|
||||||
|
|
||||||
|
// nolint: exhaustive // the switch above is exhaustive for format already
|
||||||
|
switch spec.Format {
|
||||||
|
case FormatCLI:
|
||||||
|
return nil // do not spawn go routines on the CLI to allow the CLI to call this more than once
|
||||||
|
case FormatText:
|
||||||
|
Warning("setting log.format to 'text' is deprecated - this option will be removed in a future release")
|
||||||
|
}
|
||||||
|
|
||||||
|
// do spawn go routines on the server
|
||||||
|
go wait.UntilWithContext(ctx, func(_ context.Context) { flush() }, time.Minute)
|
||||||
|
go func() {
|
||||||
|
<-ctx.Done()
|
||||||
|
flush() // best effort flush before shutdown as this is not coordinated with a wait group
|
||||||
|
}()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
359
internal/plog/config_test.go
Normal file
359
internal/plog/config_test.go
Normal file
@ -0,0 +1,359 @@
|
|||||||
|
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
package plog
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
"strconv"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
"k8s.io/component-base/logs"
|
||||||
|
"k8s.io/klog/v2"
|
||||||
|
"k8s.io/klog/v2/textlogger"
|
||||||
|
clocktesting "k8s.io/utils/clock/testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestFormat(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
t.Cleanup(cancel)
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(&buf)
|
||||||
|
|
||||||
|
now, err := time.Parse(time.RFC3339Nano, "2022-11-21T23:37:26.953313745Z")
|
||||||
|
require.NoError(t, err)
|
||||||
|
fakeClock := clocktesting.NewFakeClock(now)
|
||||||
|
nowStr := now.Local().Format(time.RFC1123)
|
||||||
|
|
||||||
|
ctx = TestZapOverrides(ctx, t, &buf, nil, zap.WithClock(ZapClock(fakeClock)))
|
||||||
|
|
||||||
|
err = ValidateAndSetLogLevelAndFormatGlobally(ctx, LogSpec{Level: LevelDebug})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
wd, err := os.Getwd()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
const startLogLine = 46 // make this match the current line number
|
||||||
|
|
||||||
|
Info("hello", "happy", "day", "duration", time.Hour+time.Minute)
|
||||||
|
require.True(t, scanner.Scan())
|
||||||
|
require.NoError(t, scanner.Err())
|
||||||
|
require.JSONEq(t, fmt.Sprintf(`
|
||||||
|
{
|
||||||
|
"level": "info",
|
||||||
|
"timestamp": "2022-11-21T23:37:26.953313Z",
|
||||||
|
"caller": "%s/config_test.go:%d$plog.TestFormat",
|
||||||
|
"message": "hello",
|
||||||
|
"happy": "day",
|
||||||
|
"duration": "1h1m0s"
|
||||||
|
}`, wd, startLogLine+2), scanner.Text())
|
||||||
|
|
||||||
|
Logr().WithName("burrito").Error(errInvalidLogLevel, "wee", "a", "b")
|
||||||
|
require.True(t, scanner.Scan())
|
||||||
|
require.NoError(t, scanner.Err())
|
||||||
|
require.JSONEq(t, fmt.Sprintf(`
|
||||||
|
{
|
||||||
|
"level": "error",
|
||||||
|
"timestamp": "2022-11-21T23:37:26.953313Z",
|
||||||
|
"caller": "%s/config_test.go:%d$plog.TestFormat",
|
||||||
|
"message": "wee",
|
||||||
|
"a": "b",
|
||||||
|
"error": "invalid log level, valid choices are the empty string, info, debug, trace and all",
|
||||||
|
"logger": "burrito"
|
||||||
|
}`, wd, startLogLine+2+13), scanner.Text())
|
||||||
|
|
||||||
|
Logr().V(klogLevelWarning).Info("hey") // note that this fails to set the custom warning key because it is not via plog
|
||||||
|
require.True(t, scanner.Scan())
|
||||||
|
require.NoError(t, scanner.Err())
|
||||||
|
require.JSONEq(t, fmt.Sprintf(`
|
||||||
|
{
|
||||||
|
"level": "info",
|
||||||
|
"timestamp": "2022-11-21T23:37:26.953313Z",
|
||||||
|
"caller": "%s/config_test.go:%d$plog.TestFormat",
|
||||||
|
"message": "hey"
|
||||||
|
}`, wd, startLogLine+2+13+14), scanner.Text())
|
||||||
|
|
||||||
|
Warning("bad stuff") // note that this sets the custom warning key because it is via plog
|
||||||
|
require.True(t, scanner.Scan())
|
||||||
|
require.NoError(t, scanner.Err())
|
||||||
|
require.JSONEq(t, fmt.Sprintf(`
|
||||||
|
{
|
||||||
|
"level": "info",
|
||||||
|
"timestamp": "2022-11-21T23:37:26.953313Z",
|
||||||
|
"caller": "%s/config_test.go:%d$plog.TestFormat",
|
||||||
|
"message": "bad stuff",
|
||||||
|
"warning": true
|
||||||
|
}`, wd, startLogLine+2+13+14+11), scanner.Text())
|
||||||
|
|
||||||
|
func() { DebugErr("something happened", errInvalidLogFormat, "an", "item") }()
|
||||||
|
require.True(t, scanner.Scan())
|
||||||
|
require.NoError(t, scanner.Err())
|
||||||
|
require.JSONEq(t, fmt.Sprintf(`
|
||||||
|
{
|
||||||
|
"level": "debug",
|
||||||
|
"timestamp": "2022-11-21T23:37:26.953313Z",
|
||||||
|
"caller": "%s/config_test.go:%d$plog.TestFormat.func1",
|
||||||
|
"message": "something happened",
|
||||||
|
"error": "invalid log format, valid choices are the empty string, json and text",
|
||||||
|
"an": "item"
|
||||||
|
}`, wd, startLogLine+2+13+14+11+12), scanner.Text())
|
||||||
|
|
||||||
|
Trace("should not be logged", "for", "sure")
|
||||||
|
require.Empty(t, buf.String())
|
||||||
|
|
||||||
|
Logr().V(klogLevelAll).Info("also should not be logged", "open", "close")
|
||||||
|
require.Empty(t, buf.String())
|
||||||
|
|
||||||
|
ctx = TestZapOverrides(ctx, t, &buf, nil, zap.WithClock(ZapClock(fakeClock)), zap.AddStacktrace(LevelInfo))
|
||||||
|
|
||||||
|
err = ValidateAndSetLogLevelAndFormatGlobally(ctx, LogSpec{Level: LevelDebug})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
WithName("stacky").WithName("does").Info("has a stack trace!")
|
||||||
|
require.True(t, scanner.Scan())
|
||||||
|
require.NoError(t, scanner.Err())
|
||||||
|
require.JSONEq(t, fmt.Sprintf(`
|
||||||
|
{
|
||||||
|
"level": "info",
|
||||||
|
"timestamp": "2022-11-21T23:37:26.953313Z",
|
||||||
|
"caller": "%s/config_test.go:%d$plog.TestFormat",
|
||||||
|
"message": "has a stack trace!",
|
||||||
|
"logger": "stacky.does",
|
||||||
|
"stacktrace": %s
|
||||||
|
}`, wd, startLogLine+2+13+14+11+12+24,
|
||||||
|
strconv.Quote(
|
||||||
|
fmt.Sprintf(
|
||||||
|
`go.pinniped.dev/internal/plog.TestFormat
|
||||||
|
%s/config_test.go:%d
|
||||||
|
testing.tRunner
|
||||||
|
%s/src/testing/testing.go:1439`,
|
||||||
|
wd, startLogLine+2+13+14+11+12+24, runtime.GOROOT(),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
), scanner.Text())
|
||||||
|
|
||||||
|
ctx = TestZapOverrides(ctx, t, &buf, nil, zap.WithClock(ZapClock(fakeClock)))
|
||||||
|
|
||||||
|
err = ValidateAndSetLogLevelAndFormatGlobally(ctx, LogSpec{Level: LevelDebug, Format: FormatCLI})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
DebugErr("something happened", errInvalidLogFormat, "an", "item")
|
||||||
|
require.True(t, scanner.Scan())
|
||||||
|
require.NoError(t, scanner.Err())
|
||||||
|
require.Equal(t, fmt.Sprintf(nowStr+` plog/config_test.go:%d something happened {"error": "invalid log format, valid choices are the empty string, json and text", "an": "item"}`,
|
||||||
|
startLogLine+2+13+14+11+12+24+28), scanner.Text())
|
||||||
|
|
||||||
|
Logr().WithName("burrito").Error(errInvalidLogLevel, "wee", "a", "b", "slightly less than a year", 363*24*time.Hour, "slightly more than 2 years", 2*367*24*time.Hour)
|
||||||
|
require.True(t, scanner.Scan())
|
||||||
|
require.NoError(t, scanner.Err())
|
||||||
|
require.Equal(t, fmt.Sprintf(nowStr+` burrito plog/config_test.go:%d wee {"a": "b", "slightly less than a year": "363d", "slightly more than 2 years": "2y4d", "error": "invalid log level, valid choices are the empty string, info, debug, trace and all"}`,
|
||||||
|
startLogLine+2+13+14+11+12+24+28+6), scanner.Text())
|
||||||
|
|
||||||
|
origTimeNow := textlogger.TimeNow
|
||||||
|
t.Cleanup(func() {
|
||||||
|
textlogger.TimeNow = origTimeNow
|
||||||
|
})
|
||||||
|
textlogger.TimeNow = func() time.Time {
|
||||||
|
return now
|
||||||
|
}
|
||||||
|
|
||||||
|
old := New().WithName("created before mode change").WithValues("is", "old")
|
||||||
|
|
||||||
|
err = ValidateAndSetLogLevelAndFormatGlobally(ctx, LogSpec{Level: LevelDebug, Format: FormatText})
|
||||||
|
require.NoError(t, err)
|
||||||
|
pid := os.Getpid()
|
||||||
|
|
||||||
|
// check for the deprecation warning
|
||||||
|
require.True(t, scanner.Scan())
|
||||||
|
require.NoError(t, scanner.Err())
|
||||||
|
require.Equal(t, fmt.Sprintf(`I1121 23:37:26.953313%8d config.go:96] "setting log.format to 'text' is deprecated - this option will be removed in a future release" warning=true`,
|
||||||
|
pid), scanner.Text())
|
||||||
|
|
||||||
|
Debug("what is happening", "does klog", "work?")
|
||||||
|
require.True(t, scanner.Scan())
|
||||||
|
require.NoError(t, scanner.Err())
|
||||||
|
require.Equal(t, fmt.Sprintf(`I1121 23:37:26.953313%8d config_test.go:%d] "what is happening" does klog="work?"`,
|
||||||
|
pid, startLogLine+2+13+14+11+12+24+28+6+26), scanner.Text())
|
||||||
|
|
||||||
|
Logr().WithName("panda").V(KlogLevelDebug).Info("are the best", "yes?", "yes.")
|
||||||
|
require.True(t, scanner.Scan())
|
||||||
|
require.NoError(t, scanner.Err())
|
||||||
|
require.Equal(t, fmt.Sprintf(`I1121 23:37:26.953313%8d config_test.go:%d] "panda: are the best" yes?="yes."`,
|
||||||
|
pid, startLogLine+2+13+14+11+12+24+28+6+26+6), scanner.Text())
|
||||||
|
|
||||||
|
New().WithName("hi").WithName("there").WithValues("a", 1, "b", 2).Always("do it")
|
||||||
|
require.True(t, scanner.Scan())
|
||||||
|
require.NoError(t, scanner.Err())
|
||||||
|
require.Equal(t, fmt.Sprintf(`I1121 23:37:26.953313%8d config_test.go:%d] "hi/there: do it" a=1 b=2`,
|
||||||
|
pid, startLogLine+2+13+14+11+12+24+28+6+26+6+6), scanner.Text())
|
||||||
|
|
||||||
|
l := WithValues("x", 33, "z", 22)
|
||||||
|
l.Debug("what to do")
|
||||||
|
l.Debug("and why")
|
||||||
|
require.True(t, scanner.Scan())
|
||||||
|
require.NoError(t, scanner.Err())
|
||||||
|
require.Equal(t, fmt.Sprintf(`I1121 23:37:26.953313%8d config_test.go:%d] "what to do" x=33 z=22`,
|
||||||
|
pid, startLogLine+2+13+14+11+12+24+28+6+26+6+6+7), scanner.Text())
|
||||||
|
require.True(t, scanner.Scan())
|
||||||
|
require.NoError(t, scanner.Err())
|
||||||
|
require.Equal(t, fmt.Sprintf(`I1121 23:37:26.953313%8d config_test.go:%d] "and why" x=33 z=22`,
|
||||||
|
pid, startLogLine+2+13+14+11+12+24+28+6+26+6+6+7+1), scanner.Text())
|
||||||
|
|
||||||
|
old.Always("should be klog text format", "for", "sure")
|
||||||
|
require.True(t, scanner.Scan())
|
||||||
|
require.NoError(t, scanner.Err())
|
||||||
|
require.Equal(t, fmt.Sprintf(`I1121 23:37:26.953313%8d config_test.go:%d] "created before mode change: should be klog text format" is="old" for="sure"`,
|
||||||
|
pid, startLogLine+2+13+14+11+12+24+28+6+26+6+6+7+1+10), scanner.Text())
|
||||||
|
|
||||||
|
// make sure child loggers do not share state
|
||||||
|
old1 := old.WithValues("i am", "old1")
|
||||||
|
old2 := old.WithName("old2")
|
||||||
|
old1.Warning("warn")
|
||||||
|
old2.Info("info")
|
||||||
|
require.True(t, scanner.Scan())
|
||||||
|
require.NoError(t, scanner.Err())
|
||||||
|
require.Equal(t, fmt.Sprintf(`I1121 23:37:26.953313%8d config_test.go:%d] "created before mode change: warn" is="old" i am="old1" warning=true`,
|
||||||
|
pid, startLogLine+2+13+14+11+12+24+28+6+26+6+6+7+1+10+9), scanner.Text())
|
||||||
|
require.True(t, scanner.Scan())
|
||||||
|
require.NoError(t, scanner.Err())
|
||||||
|
require.Equal(t, fmt.Sprintf(`I1121 23:37:26.953313%8d config_test.go:%d] "created before mode change/old2: info" is="old"`,
|
||||||
|
pid, startLogLine+2+13+14+11+12+24+28+6+26+6+6+7+1+10+9+1), scanner.Text())
|
||||||
|
|
||||||
|
Trace("should not be logged", "for", "sure")
|
||||||
|
require.Empty(t, buf.String())
|
||||||
|
|
||||||
|
Logr().V(klogLevelAll).Info("also should not be logged", "open", "close")
|
||||||
|
require.Empty(t, buf.String())
|
||||||
|
|
||||||
|
require.False(t, scanner.Scan())
|
||||||
|
require.NoError(t, scanner.Err())
|
||||||
|
require.Empty(t, scanner.Text())
|
||||||
|
require.Empty(t, buf.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValidateAndSetLogLevelGlobally(t *testing.T) {
|
||||||
|
originalLogLevel := getKlogLevel()
|
||||||
|
require.GreaterOrEqual(t, int(originalLogLevel), int(klog.Level(0)), "cannot get klog level")
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
level LogLevel
|
||||||
|
wantLevel klog.Level
|
||||||
|
wantEnabled []LogLevel
|
||||||
|
wantErr string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "unset",
|
||||||
|
wantLevel: 0,
|
||||||
|
wantEnabled: []LogLevel{LevelWarning},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "warning",
|
||||||
|
level: LevelWarning,
|
||||||
|
wantLevel: 0,
|
||||||
|
wantEnabled: []LogLevel{LevelWarning},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "info",
|
||||||
|
level: LevelInfo,
|
||||||
|
wantLevel: 2,
|
||||||
|
wantEnabled: []LogLevel{LevelWarning, LevelInfo},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "debug",
|
||||||
|
level: LevelDebug,
|
||||||
|
wantLevel: 4,
|
||||||
|
wantEnabled: []LogLevel{LevelWarning, LevelInfo, LevelDebug},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "trace",
|
||||||
|
level: LevelTrace,
|
||||||
|
wantLevel: 6,
|
||||||
|
wantEnabled: []LogLevel{LevelWarning, LevelInfo, LevelDebug, LevelTrace},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "all",
|
||||||
|
level: LevelAll,
|
||||||
|
wantLevel: 108,
|
||||||
|
wantEnabled: []LogLevel{LevelWarning, LevelInfo, LevelDebug, LevelTrace, LevelAll},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid level",
|
||||||
|
level: "panda",
|
||||||
|
wantLevel: originalLogLevel,
|
||||||
|
wantErr: errInvalidLogLevel.Error(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
tt := tt // capture range variable
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
defer func() {
|
||||||
|
undoGlobalLogLevelChanges(t, originalLogLevel)
|
||||||
|
}()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
t.Cleanup(cancel)
|
||||||
|
err := ValidateAndSetLogLevelAndFormatGlobally(ctx, LogSpec{Level: tt.level})
|
||||||
|
require.Equal(t, tt.wantErr, errString(err))
|
||||||
|
require.Equal(t, tt.wantLevel, getKlogLevel())
|
||||||
|
|
||||||
|
if tt.wantEnabled != nil {
|
||||||
|
allLevels := []LogLevel{LevelWarning, LevelInfo, LevelDebug, LevelTrace, LevelAll}
|
||||||
|
for _, level := range allLevels {
|
||||||
|
if contains(tt.wantEnabled, level) {
|
||||||
|
require.Truef(t, Enabled(level), "wanted %q to be enabled", level)
|
||||||
|
} else {
|
||||||
|
require.False(t, Enabled(level), "did not want %q to be enabled", level)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Equal(t, originalLogLevel, getKlogLevel())
|
||||||
|
}
|
||||||
|
|
||||||
|
func contains(haystack []LogLevel, needle LogLevel) bool {
|
||||||
|
for _, hay := range haystack {
|
||||||
|
if hay == needle {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func errString(err error) string {
|
||||||
|
if err == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return err.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
func undoGlobalLogLevelChanges(t *testing.T, originalLogLevel klog.Level) {
|
||||||
|
t.Helper()
|
||||||
|
_, err := logs.GlogSetter(strconv.Itoa(int(originalLogLevel)))
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getKlogLevel() klog.Level {
|
||||||
|
// hack around klog not exposing a Get method
|
||||||
|
for i := klog.Level(0); i < 256; i++ {
|
||||||
|
if klog.V(i).Enabled() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return i - 1
|
||||||
|
}
|
||||||
|
|
||||||
|
return -1
|
||||||
|
}
|
77
internal/plog/global.go
Normal file
77
internal/plog/global.go
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
// Copyright 2020-2022 the Pinniped contributors. All Rights Reserved.
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
package plog
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/go-logr/logr"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
"k8s.io/component-base/logs"
|
||||||
|
"k8s.io/klog/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// nolint: gochecknoglobals
|
||||||
|
var (
|
||||||
|
// note that these globals have no locks on purpose - they are expected to be set at init and then again after config parsing.
|
||||||
|
globalLevel zap.AtomicLevel
|
||||||
|
globalLogger logr.Logger
|
||||||
|
globalFlush func()
|
||||||
|
|
||||||
|
// used as a temporary storage for a buffer per call of newLogr. see the init function below for more details.
|
||||||
|
sinkMap sync.Map
|
||||||
|
)
|
||||||
|
|
||||||
|
// nolint: gochecknoinits
|
||||||
|
func init() {
|
||||||
|
// make sure we always have a functional global logger
|
||||||
|
globalLevel = zap.NewAtomicLevelAt(0) // log at the 0 verbosity level to start with, i.e. the "always" logs
|
||||||
|
// use json encoding to start with
|
||||||
|
// the context here is just used for test injection and thus can be ignored
|
||||||
|
log, flush, err := newLogr(context.Background(), "json", 0)
|
||||||
|
if err != nil {
|
||||||
|
panic(err) // default logging config must always work
|
||||||
|
}
|
||||||
|
setGlobalLoggers(log, flush)
|
||||||
|
|
||||||
|
// this is a little crazy but zap's builder code does not allow us to directly specify what
|
||||||
|
// writer we want to use as our log sink. to get around this limitation in tests, we use a
|
||||||
|
// global map to temporarily hold the writer (the key is a random string that is generated
|
||||||
|
// per invocation of newLogr). we register a fake "pinniped" scheme so that we can lookup
|
||||||
|
// the writer via pinniped:///<per newLogr invocation random string>.
|
||||||
|
if err := zap.RegisterSink("pinniped", func(u *url.URL) (zap.Sink, error) {
|
||||||
|
value, ok := sinkMap.Load(u.Path)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("key %q not in global sink", u.Path)
|
||||||
|
}
|
||||||
|
return value.(zap.Sink), nil
|
||||||
|
}); err != nil {
|
||||||
|
panic(err) // custom sink must always work
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use New instead. This is meant for old code only.
|
||||||
|
// New provides a more ergonomic API and correctly responds to global log config change.
|
||||||
|
func Logr() logr.Logger {
|
||||||
|
return globalLogger
|
||||||
|
}
|
||||||
|
|
||||||
|
func Setup() func() {
|
||||||
|
logs.InitLogs()
|
||||||
|
return func() {
|
||||||
|
logs.FlushLogs()
|
||||||
|
globalFlush()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// setGlobalLoggers sets the plog and klog global loggers. it is *not* go routine safe.
|
||||||
|
func setGlobalLoggers(log logr.Logger, flush func()) {
|
||||||
|
// a contextual logger does its own level based enablement checks, which is true for all of our loggers
|
||||||
|
klog.SetLoggerWithOptions(log, klog.ContextualLogger(true), klog.FlushLogger(flush))
|
||||||
|
globalLogger = log
|
||||||
|
globalFlush = flush
|
||||||
|
}
|
@ -1,32 +0,0 @@
|
|||||||
// Copyright 2020-2022 the Pinniped contributors. All Rights Reserved.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0
|
|
||||||
|
|
||||||
package plog
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"k8s.io/klog/v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
// KObj is (mostly) copied from klog - it is a standard way to represent a metav1.Object in logs.
|
|
||||||
func KObj(obj klog.KMetadata) string {
|
|
||||||
return fmt.Sprintf("%s/%s", obj.GetNamespace(), obj.GetName())
|
|
||||||
}
|
|
||||||
|
|
||||||
func klogLevelForPlogLevel(plogLevel LogLevel) klog.Level {
|
|
||||||
switch plogLevel {
|
|
||||||
case LevelWarning:
|
|
||||||
return klogLevelWarning // unset means minimal logs (Error and Warning)
|
|
||||||
case LevelInfo:
|
|
||||||
return klogLevelInfo
|
|
||||||
case LevelDebug:
|
|
||||||
return klogLevelDebug
|
|
||||||
case LevelTrace:
|
|
||||||
return klogLevelTrace
|
|
||||||
case LevelAll:
|
|
||||||
return klogLevelAll + 100 // make all really mean all
|
|
||||||
default:
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
}
|
|
@ -4,18 +4,18 @@
|
|||||||
package plog
|
package plog
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"strconv"
|
"go.uber.org/zap/zapcore"
|
||||||
|
|
||||||
"k8s.io/component-base/logs"
|
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
|
|
||||||
"go.pinniped.dev/internal/constable"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// LogLevel is an enum that controls verbosity of logs.
|
// LogLevel is an enum that controls verbosity of logs.
|
||||||
// Valid values in order of increasing verbosity are leaving it unset, info, debug, trace and all.
|
// Valid values in order of increasing verbosity are leaving it unset, info, debug, trace and all.
|
||||||
type LogLevel string
|
type LogLevel string
|
||||||
|
|
||||||
|
func (l LogLevel) Enabled(_ zapcore.Level) bool {
|
||||||
|
return Enabled(l) // this basically says "log if the global plog level is l or greater"
|
||||||
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// LevelWarning (i.e. leaving the log level unset) maps to klog log level 0.
|
// LevelWarning (i.e. leaving the log level unset) maps to klog log level 0.
|
||||||
LevelWarning LogLevel = ""
|
LevelWarning LogLevel = ""
|
||||||
@ -27,33 +27,40 @@ const (
|
|||||||
LevelTrace LogLevel = "trace"
|
LevelTrace LogLevel = "trace"
|
||||||
// LevelAll maps to klog log level 100 (conceptually it is log level 8).
|
// LevelAll maps to klog log level 100 (conceptually it is log level 8).
|
||||||
LevelAll LogLevel = "all"
|
LevelAll LogLevel = "all"
|
||||||
|
|
||||||
errInvalidLogLevel = constable.Error("invalid log level, valid choices are the empty string, info, debug, trace and all")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var _ zapcore.LevelEnabler = LevelWarning
|
||||||
|
|
||||||
const (
|
const (
|
||||||
klogLevelWarning = iota * 2
|
klogLevelWarning = iota * 2
|
||||||
klogLevelInfo
|
KlogLevelInfo
|
||||||
klogLevelDebug
|
KlogLevelDebug
|
||||||
klogLevelTrace
|
KlogLevelTrace
|
||||||
klogLevelAll
|
klogLevelAll
|
||||||
)
|
)
|
||||||
|
|
||||||
func ValidateAndSetLogLevelGlobally(level LogLevel) error {
|
|
||||||
klogLevel := klogLevelForPlogLevel(level)
|
|
||||||
if klogLevel < 0 {
|
|
||||||
return errInvalidLogLevel
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := logs.GlogSetter(strconv.Itoa(int(klogLevel))); err != nil {
|
|
||||||
panic(err) // programmer error
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Enabled returns whether the provided plog level is enabled, i.e., whether print statements at the
|
// Enabled returns whether the provided plog level is enabled, i.e., whether print statements at the
|
||||||
// provided level will show up.
|
// provided level will show up.
|
||||||
func Enabled(level LogLevel) bool {
|
func Enabled(level LogLevel) bool {
|
||||||
return klog.V(klogLevelForPlogLevel(level)).Enabled()
|
l := klogLevelForPlogLevel(level)
|
||||||
|
// check that both our global level and the klog global level agree that the plog level is enabled
|
||||||
|
// klog levels are inverted when zap handles them
|
||||||
|
return globalLevel.Enabled(zapcore.Level(-l)) && klog.V(l).Enabled()
|
||||||
|
}
|
||||||
|
|
||||||
|
func klogLevelForPlogLevel(plogLevel LogLevel) klog.Level {
|
||||||
|
switch plogLevel {
|
||||||
|
case LevelWarning:
|
||||||
|
return klogLevelWarning // unset means minimal logs (Error and Warning)
|
||||||
|
case LevelInfo:
|
||||||
|
return KlogLevelInfo
|
||||||
|
case LevelDebug:
|
||||||
|
return KlogLevelDebug
|
||||||
|
case LevelTrace:
|
||||||
|
return KlogLevelTrace
|
||||||
|
case LevelAll:
|
||||||
|
return klogLevelAll + 100 // make all really mean all
|
||||||
|
default:
|
||||||
|
return -1
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,128 +0,0 @@
|
|||||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0
|
|
||||||
|
|
||||||
package plog
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strconv"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"k8s.io/component-base/logs"
|
|
||||||
"k8s.io/klog/v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestValidateAndSetLogLevelGlobally(t *testing.T) {
|
|
||||||
originalLogLevel := getKlogLevel()
|
|
||||||
require.GreaterOrEqual(t, int(originalLogLevel), int(klog.Level(0)), "cannot get klog level")
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
level LogLevel
|
|
||||||
wantLevel klog.Level
|
|
||||||
wantEnabled []LogLevel
|
|
||||||
wantErr string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "unset",
|
|
||||||
wantLevel: 0,
|
|
||||||
wantEnabled: []LogLevel{LevelWarning},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "warning",
|
|
||||||
level: LevelWarning,
|
|
||||||
wantLevel: 0,
|
|
||||||
wantEnabled: []LogLevel{LevelWarning},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "info",
|
|
||||||
level: LevelInfo,
|
|
||||||
wantLevel: 2,
|
|
||||||
wantEnabled: []LogLevel{LevelWarning, LevelInfo},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "debug",
|
|
||||||
level: LevelDebug,
|
|
||||||
wantLevel: 4,
|
|
||||||
wantEnabled: []LogLevel{LevelWarning, LevelInfo, LevelDebug},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "trace",
|
|
||||||
level: LevelTrace,
|
|
||||||
wantLevel: 6,
|
|
||||||
wantEnabled: []LogLevel{LevelWarning, LevelInfo, LevelDebug, LevelTrace},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "all",
|
|
||||||
level: LevelAll,
|
|
||||||
wantLevel: 108,
|
|
||||||
wantEnabled: []LogLevel{LevelWarning, LevelInfo, LevelDebug, LevelTrace, LevelAll},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "invalid level",
|
|
||||||
level: "panda",
|
|
||||||
wantLevel: originalLogLevel,
|
|
||||||
wantErr: errInvalidLogLevel.Error(),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, tt := range tests {
|
|
||||||
tt := tt // capture range variable
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
defer func() {
|
|
||||||
undoGlobalLogLevelChanges(t, originalLogLevel)
|
|
||||||
}()
|
|
||||||
|
|
||||||
err := ValidateAndSetLogLevelGlobally(tt.level)
|
|
||||||
require.Equal(t, tt.wantErr, errString(err))
|
|
||||||
require.Equal(t, tt.wantLevel, getKlogLevel())
|
|
||||||
|
|
||||||
if tt.wantEnabled != nil {
|
|
||||||
allLevels := []LogLevel{LevelWarning, LevelInfo, LevelDebug, LevelTrace, LevelAll}
|
|
||||||
for _, level := range allLevels {
|
|
||||||
if contains(tt.wantEnabled, level) {
|
|
||||||
require.Truef(t, Enabled(level), "wanted %q to be enabled", level)
|
|
||||||
} else {
|
|
||||||
require.False(t, Enabled(level), "did not want %q to be enabled", level)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
require.Equal(t, originalLogLevel, getKlogLevel())
|
|
||||||
}
|
|
||||||
|
|
||||||
func contains(haystack []LogLevel, needle LogLevel) bool {
|
|
||||||
for _, hay := range haystack {
|
|
||||||
if hay == needle {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func errString(err error) string {
|
|
||||||
if err == nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
return err.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
func undoGlobalLogLevelChanges(t *testing.T, originalLogLevel klog.Level) {
|
|
||||||
t.Helper()
|
|
||||||
_, err := logs.GlogSetter(strconv.Itoa(int(originalLogLevel)))
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getKlogLevel() klog.Level {
|
|
||||||
// hack around klog not exposing a Get method
|
|
||||||
for i := klog.Level(0); i < 256; i++ {
|
|
||||||
if klog.V(i).Enabled() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return i - 1
|
|
||||||
}
|
|
||||||
|
|
||||||
return -1
|
|
||||||
}
|
|
@ -1,7 +1,7 @@
|
|||||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||||
// SPDX-License-Identifier: Apache-2.0
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
// Package plog implements a thin layer over klog to help enforce pinniped's logging convention.
|
// Package plog implements a thin layer over logr to help enforce pinniped's logging convention.
|
||||||
// Logs are always structured as a constant message with key and value pairs of related metadata.
|
// Logs are always structured as a constant message with key and value pairs of related metadata.
|
||||||
//
|
//
|
||||||
// The logging levels in order of increasing verbosity are:
|
// The logging levels in order of increasing verbosity are:
|
||||||
@ -26,10 +26,18 @@
|
|||||||
// act of desperation to determine why the system is broken.
|
// act of desperation to determine why the system is broken.
|
||||||
package plog
|
package plog
|
||||||
|
|
||||||
import "k8s.io/klog/v2"
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
const errorKey = "error"
|
"github.com/go-logr/logr"
|
||||||
|
)
|
||||||
|
|
||||||
|
const errorKey = "error" // this matches zapr's default for .Error calls (which is asserted via tests)
|
||||||
|
|
||||||
|
// Logger implements the plog logging convention described above. The global functions in this package
|
||||||
|
// such as Info should be used when one does not intend to write tests assertions for specific log messages.
|
||||||
|
// If test assertions are desired, Logger should be passed in as an input. New should be used as the
|
||||||
|
// production implementation and TestLogger should be used to write test assertions.
|
||||||
type Logger interface {
|
type Logger interface {
|
||||||
Error(msg string, err error, keysAndValues ...interface{})
|
Error(msg string, err error, keysAndValues ...interface{})
|
||||||
Warning(msg string, keysAndValues ...interface{})
|
Warning(msg string, keysAndValues ...interface{})
|
||||||
@ -41,100 +49,155 @@ type Logger interface {
|
|||||||
Trace(msg string, keysAndValues ...interface{})
|
Trace(msg string, keysAndValues ...interface{})
|
||||||
TraceErr(msg string, err error, keysAndValues ...interface{})
|
TraceErr(msg string, err error, keysAndValues ...interface{})
|
||||||
All(msg string, keysAndValues ...interface{})
|
All(msg string, keysAndValues ...interface{})
|
||||||
|
Always(msg string, keysAndValues ...interface{})
|
||||||
|
WithValues(keysAndValues ...interface{}) Logger
|
||||||
|
WithName(name string) Logger
|
||||||
|
|
||||||
|
// does not include Fatal on purpose because that is not a method you should be using
|
||||||
|
|
||||||
|
// for internal and test use only
|
||||||
|
withDepth(d int) Logger
|
||||||
|
withLogrMod(mod func(logr.Logger) logr.Logger) Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MinLogger is the overlap between Logger and logr.Logger.
|
||||||
|
type MinLogger interface {
|
||||||
|
Info(msg string, keysAndValues ...interface{})
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Logger = pLogger{}
|
||||||
|
var _, _, _ MinLogger = pLogger{}, logr.Logger{}, Logger(nil)
|
||||||
|
|
||||||
type pLogger struct {
|
type pLogger struct {
|
||||||
prefix string
|
mods []func(logr.Logger) logr.Logger
|
||||||
depth int
|
depth int
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(prefix string) Logger {
|
func New() Logger {
|
||||||
return &pLogger{
|
return pLogger{}
|
||||||
depth: 0,
|
}
|
||||||
prefix: prefix,
|
|
||||||
|
func (p pLogger) Error(msg string, err error, keysAndValues ...interface{}) {
|
||||||
|
p.logr().WithCallDepth(p.depth+1).Error(err, msg, keysAndValues...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p pLogger) warningDepth(msg string, depth int, keysAndValues ...interface{}) {
|
||||||
|
if p.logr().V(klogLevelWarning).Enabled() {
|
||||||
|
// klog's structured logging has no concept of a warning (i.e. no WarningS function)
|
||||||
|
// Thus we use info at log level zero as a proxy
|
||||||
|
// klog's info logs have an I prefix and its warning logs have a W prefix
|
||||||
|
// Since we lose the W prefix by using InfoS, just add a key to make these easier to find
|
||||||
|
keysAndValues = append([]interface{}{"warning", true}, keysAndValues...)
|
||||||
|
p.logr().V(klogLevelWarning).WithCallDepth(depth+1).Info(msg, keysAndValues...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *pLogger) Error(msg string, err error, keysAndValues ...interface{}) {
|
func (p pLogger) Warning(msg string, keysAndValues ...interface{}) {
|
||||||
klog.ErrorSDepth(p.depth+1, err, p.prefix+msg, keysAndValues...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *pLogger) warningDepth(msg string, depth int, keysAndValues ...interface{}) {
|
|
||||||
// klog's structured logging has no concept of a warning (i.e. no WarningS function)
|
|
||||||
// Thus we use info at log level zero as a proxy
|
|
||||||
// klog's info logs have an I prefix and its warning logs have a W prefix
|
|
||||||
// Since we lose the W prefix by using InfoS, just add a key to make these easier to find
|
|
||||||
keysAndValues = append([]interface{}{"warning", "true"}, keysAndValues...)
|
|
||||||
if klog.V(klogLevelWarning).Enabled() {
|
|
||||||
klog.InfoSDepth(depth+1, p.prefix+msg, keysAndValues...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *pLogger) Warning(msg string, keysAndValues ...interface{}) {
|
|
||||||
p.warningDepth(msg, p.depth+1, keysAndValues...)
|
p.warningDepth(msg, p.depth+1, keysAndValues...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use WarningErr to issue a Warning message with an error object as part of the message.
|
func (p pLogger) WarningErr(msg string, err error, keysAndValues ...interface{}) {
|
||||||
func (p *pLogger) WarningErr(msg string, err error, keysAndValues ...interface{}) {
|
|
||||||
p.warningDepth(msg, p.depth+1, append([]interface{}{errorKey, err}, keysAndValues...)...)
|
p.warningDepth(msg, p.depth+1, append([]interface{}{errorKey, err}, keysAndValues...)...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *pLogger) infoDepth(msg string, depth int, keysAndValues ...interface{}) {
|
func (p pLogger) infoDepth(msg string, depth int, keysAndValues ...interface{}) {
|
||||||
if klog.V(klogLevelInfo).Enabled() {
|
if p.logr().V(KlogLevelInfo).Enabled() {
|
||||||
klog.InfoSDepth(depth+1, p.prefix+msg, keysAndValues...)
|
p.logr().V(KlogLevelInfo).WithCallDepth(depth+1).Info(msg, keysAndValues...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *pLogger) Info(msg string, keysAndValues ...interface{}) {
|
func (p pLogger) Info(msg string, keysAndValues ...interface{}) {
|
||||||
p.infoDepth(msg, p.depth+1, keysAndValues...)
|
p.infoDepth(msg, p.depth+1, keysAndValues...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use InfoErr to log an expected error, e.g. validation failure of an http parameter.
|
func (p pLogger) InfoErr(msg string, err error, keysAndValues ...interface{}) {
|
||||||
func (p *pLogger) InfoErr(msg string, err error, keysAndValues ...interface{}) {
|
|
||||||
p.infoDepth(msg, p.depth+1, append([]interface{}{errorKey, err}, keysAndValues...)...)
|
p.infoDepth(msg, p.depth+1, append([]interface{}{errorKey, err}, keysAndValues...)...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *pLogger) debugDepth(msg string, depth int, keysAndValues ...interface{}) {
|
func (p pLogger) debugDepth(msg string, depth int, keysAndValues ...interface{}) {
|
||||||
if klog.V(klogLevelDebug).Enabled() {
|
if p.logr().V(KlogLevelDebug).Enabled() {
|
||||||
klog.InfoSDepth(depth+1, p.prefix+msg, keysAndValues...)
|
p.logr().V(KlogLevelDebug).WithCallDepth(depth+1).Info(msg, keysAndValues...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *pLogger) Debug(msg string, keysAndValues ...interface{}) {
|
func (p pLogger) Debug(msg string, keysAndValues ...interface{}) {
|
||||||
p.debugDepth(msg, p.depth+1, keysAndValues...)
|
p.debugDepth(msg, p.depth+1, keysAndValues...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use DebugErr to issue a Debug message with an error object as part of the message.
|
func (p pLogger) DebugErr(msg string, err error, keysAndValues ...interface{}) {
|
||||||
func (p *pLogger) DebugErr(msg string, err error, keysAndValues ...interface{}) {
|
|
||||||
p.debugDepth(msg, p.depth+1, append([]interface{}{errorKey, err}, keysAndValues...)...)
|
p.debugDepth(msg, p.depth+1, append([]interface{}{errorKey, err}, keysAndValues...)...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *pLogger) traceDepth(msg string, depth int, keysAndValues ...interface{}) {
|
func (p pLogger) traceDepth(msg string, depth int, keysAndValues ...interface{}) {
|
||||||
if klog.V(klogLevelTrace).Enabled() {
|
if p.logr().V(KlogLevelTrace).Enabled() {
|
||||||
klog.InfoSDepth(depth+1, p.prefix+msg, keysAndValues...)
|
p.logr().V(KlogLevelTrace).WithCallDepth(depth+1).Info(msg, keysAndValues...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *pLogger) Trace(msg string, keysAndValues ...interface{}) {
|
func (p pLogger) Trace(msg string, keysAndValues ...interface{}) {
|
||||||
p.traceDepth(msg, p.depth+1, keysAndValues...)
|
p.traceDepth(msg, p.depth+1, keysAndValues...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use TraceErr to issue a Trace message with an error object as part of the message.
|
func (p pLogger) TraceErr(msg string, err error, keysAndValues ...interface{}) {
|
||||||
func (p *pLogger) TraceErr(msg string, err error, keysAndValues ...interface{}) {
|
|
||||||
p.traceDepth(msg, p.depth+1, append([]interface{}{errorKey, err}, keysAndValues...)...)
|
p.traceDepth(msg, p.depth+1, append([]interface{}{errorKey, err}, keysAndValues...)...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *pLogger) All(msg string, keysAndValues ...interface{}) {
|
func (p pLogger) All(msg string, keysAndValues ...interface{}) {
|
||||||
if klog.V(klogLevelAll).Enabled() {
|
if p.logr().V(klogLevelAll).Enabled() {
|
||||||
klog.InfoSDepth(p.depth+1, p.prefix+msg, keysAndValues...)
|
p.logr().V(klogLevelAll).WithCallDepth(p.depth+1).Info(msg, keysAndValues...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var logger Logger = &pLogger{ //nolint:gochecknoglobals
|
func (p pLogger) Always(msg string, keysAndValues ...interface{}) {
|
||||||
depth: 1,
|
p.logr().WithCallDepth(p.depth+1).Info(msg, keysAndValues...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use Error to log an unexpected system error.
|
func (p pLogger) WithValues(keysAndValues ...interface{}) Logger {
|
||||||
|
if len(keysAndValues) == 0 {
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
return p.withLogrMod(func(l logr.Logger) logr.Logger {
|
||||||
|
return l.WithValues(keysAndValues...)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p pLogger) WithName(name string) Logger {
|
||||||
|
if len(name) == 0 {
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
return p.withLogrMod(func(l logr.Logger) logr.Logger {
|
||||||
|
return l.WithName(name)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p pLogger) withDepth(d int) Logger {
|
||||||
|
out := p
|
||||||
|
out.depth += d // out is a copy so this does not mutate p
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p pLogger) withLogrMod(mod func(logr.Logger) logr.Logger) Logger {
|
||||||
|
out := p // make a copy and carefully avoid mutating the mods slice
|
||||||
|
mods := make([]func(logr.Logger) logr.Logger, 0, len(out.mods)+1)
|
||||||
|
mods = append(mods, out.mods...)
|
||||||
|
mods = append(mods, mod)
|
||||||
|
out.mods = mods
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p pLogger) logr() logr.Logger {
|
||||||
|
l := Logr() // grab the current global logger and its current config
|
||||||
|
for _, mod := range p.mods {
|
||||||
|
mod := mod
|
||||||
|
l = mod(l) // and then update it with all modifications
|
||||||
|
}
|
||||||
|
return l // this logger is guaranteed to have the latest config and all modifications
|
||||||
|
}
|
||||||
|
|
||||||
|
var logger = New().withDepth(1) //nolint:gochecknoglobals
|
||||||
|
|
||||||
func Error(msg string, err error, keysAndValues ...interface{}) {
|
func Error(msg string, err error, keysAndValues ...interface{}) {
|
||||||
logger.Error(msg, err, keysAndValues...)
|
logger.Error(msg, err, keysAndValues...)
|
||||||
}
|
}
|
||||||
@ -143,7 +206,6 @@ func Warning(msg string, keysAndValues ...interface{}) {
|
|||||||
logger.Warning(msg, keysAndValues...)
|
logger.Warning(msg, keysAndValues...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use WarningErr to issue a Warning message with an error object as part of the message.
|
|
||||||
func WarningErr(msg string, err error, keysAndValues ...interface{}) {
|
func WarningErr(msg string, err error, keysAndValues ...interface{}) {
|
||||||
logger.WarningErr(msg, err, keysAndValues...)
|
logger.WarningErr(msg, err, keysAndValues...)
|
||||||
}
|
}
|
||||||
@ -152,7 +214,6 @@ func Info(msg string, keysAndValues ...interface{}) {
|
|||||||
logger.Info(msg, keysAndValues...)
|
logger.Info(msg, keysAndValues...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use InfoErr to log an expected error, e.g. validation failure of an http parameter.
|
|
||||||
func InfoErr(msg string, err error, keysAndValues ...interface{}) {
|
func InfoErr(msg string, err error, keysAndValues ...interface{}) {
|
||||||
logger.InfoErr(msg, err, keysAndValues...)
|
logger.InfoErr(msg, err, keysAndValues...)
|
||||||
}
|
}
|
||||||
@ -161,7 +222,6 @@ func Debug(msg string, keysAndValues ...interface{}) {
|
|||||||
logger.Debug(msg, keysAndValues...)
|
logger.Debug(msg, keysAndValues...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use DebugErr to issue a Debug message with an error object as part of the message.
|
|
||||||
func DebugErr(msg string, err error, keysAndValues ...interface{}) {
|
func DebugErr(msg string, err error, keysAndValues ...interface{}) {
|
||||||
logger.DebugErr(msg, err, keysAndValues...)
|
logger.DebugErr(msg, err, keysAndValues...)
|
||||||
}
|
}
|
||||||
@ -170,7 +230,6 @@ func Trace(msg string, keysAndValues ...interface{}) {
|
|||||||
logger.Trace(msg, keysAndValues...)
|
logger.Trace(msg, keysAndValues...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use TraceErr to issue a Trace message with an error object as part of the message.
|
|
||||||
func TraceErr(msg string, err error, keysAndValues ...interface{}) {
|
func TraceErr(msg string, err error, keysAndValues ...interface{}) {
|
||||||
logger.TraceErr(msg, err, keysAndValues...)
|
logger.TraceErr(msg, err, keysAndValues...)
|
||||||
}
|
}
|
||||||
@ -178,3 +237,23 @@ func TraceErr(msg string, err error, keysAndValues ...interface{}) {
|
|||||||
func All(msg string, keysAndValues ...interface{}) {
|
func All(msg string, keysAndValues ...interface{}) {
|
||||||
logger.All(msg, keysAndValues...)
|
logger.All(msg, keysAndValues...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func Always(msg string, keysAndValues ...interface{}) {
|
||||||
|
logger.Always(msg, keysAndValues...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithValues(keysAndValues ...interface{}) Logger {
|
||||||
|
// this looks weird but it is the same as New().WithValues(keysAndValues...) because it returns a new logger rooted at the call site
|
||||||
|
return logger.withDepth(-1).WithValues(keysAndValues...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithName(name string) Logger {
|
||||||
|
// this looks weird but it is the same as New().WithName(name) because it returns a new logger rooted at the call site
|
||||||
|
return logger.withDepth(-1).WithName(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Fatal(err error, keysAndValues ...interface{}) {
|
||||||
|
logger.Error("unrecoverable error encountered", err, keysAndValues...)
|
||||||
|
globalFlush()
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
366
internal/plog/plog_test.go
Normal file
366
internal/plog/plog_test.go
Normal file
@ -0,0 +1,366 @@
|
|||||||
|
// Copyright 2022 the Pinniped contributors. All Rights Reserved.
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
package plog
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestPlog(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
run func(Logger)
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "basic",
|
||||||
|
run: testAllPlogMethods,
|
||||||
|
want: `
|
||||||
|
{"level":"error","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"e","panda":2,"error":"some err"}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"w","warning":true,"panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"we","warning":true,"error":"some err","panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"i","panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"ie","error":"some err","panda":2}
|
||||||
|
{"level":"debug","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"d","panda":2}
|
||||||
|
{"level":"debug","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"de","error":"some err","panda":2}
|
||||||
|
{"level":"trace","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"t","panda":2}
|
||||||
|
{"level":"trace","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"te","error":"some err","panda":2}
|
||||||
|
{"level":"all","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"all","panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"always","panda":2}
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "with values",
|
||||||
|
run: func(l Logger) {
|
||||||
|
testAllPlogMethods(l.WithValues("hi", 42))
|
||||||
|
},
|
||||||
|
want: `
|
||||||
|
{"level":"error","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"e","hi":42,"panda":2,"error":"some err"}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"w","hi":42,"warning":true,"panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"we","hi":42,"warning":true,"error":"some err","panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"i","hi":42,"panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"ie","hi":42,"error":"some err","panda":2}
|
||||||
|
{"level":"debug","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"d","hi":42,"panda":2}
|
||||||
|
{"level":"debug","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"de","hi":42,"error":"some err","panda":2}
|
||||||
|
{"level":"trace","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"t","hi":42,"panda":2}
|
||||||
|
{"level":"trace","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"te","hi":42,"error":"some err","panda":2}
|
||||||
|
{"level":"all","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"all","hi":42,"panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"always","hi":42,"panda":2}
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "with values conflict", // duplicate key is included twice ...
|
||||||
|
run: func(l Logger) {
|
||||||
|
testAllPlogMethods(l.WithValues("panda", false))
|
||||||
|
},
|
||||||
|
want: `
|
||||||
|
{"level":"error","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"e","panda":false,"panda":2,"error":"some err"}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"w","panda":false,"warning":true,"panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"we","panda":false,"warning":true,"error":"some err","panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"i","panda":false,"panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"ie","panda":false,"error":"some err","panda":2}
|
||||||
|
{"level":"debug","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"d","panda":false,"panda":2}
|
||||||
|
{"level":"debug","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"de","panda":false,"error":"some err","panda":2}
|
||||||
|
{"level":"trace","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"t","panda":false,"panda":2}
|
||||||
|
{"level":"trace","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"te","panda":false,"error":"some err","panda":2}
|
||||||
|
{"level":"all","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"all","panda":false,"panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"always","panda":false,"panda":2}
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "with values nested",
|
||||||
|
run: func(l Logger) {
|
||||||
|
testAllPlogMethods(l.WithValues("hi", 42).WithValues("not", time.Hour))
|
||||||
|
},
|
||||||
|
want: `
|
||||||
|
{"level":"error","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"e","hi":42,"not":"1h0m0s","panda":2,"error":"some err"}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"w","hi":42,"not":"1h0m0s","warning":true,"panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"we","hi":42,"not":"1h0m0s","warning":true,"error":"some err","panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"i","hi":42,"not":"1h0m0s","panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"ie","hi":42,"not":"1h0m0s","error":"some err","panda":2}
|
||||||
|
{"level":"debug","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"d","hi":42,"not":"1h0m0s","panda":2}
|
||||||
|
{"level":"debug","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"de","hi":42,"not":"1h0m0s","error":"some err","panda":2}
|
||||||
|
{"level":"trace","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"t","hi":42,"not":"1h0m0s","panda":2}
|
||||||
|
{"level":"trace","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"te","hi":42,"not":"1h0m0s","error":"some err","panda":2}
|
||||||
|
{"level":"all","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"all","hi":42,"not":"1h0m0s","panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"always","hi":42,"not":"1h0m0s","panda":2}
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "with name",
|
||||||
|
run: func(l Logger) {
|
||||||
|
testAllPlogMethods(l.WithName("yoyo"))
|
||||||
|
},
|
||||||
|
want: `
|
||||||
|
{"level":"error","timestamp":"2099-08-08T13:57:36.123456Z","logger":"yoyo","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"e","panda":2,"error":"some err"}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","logger":"yoyo","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"w","warning":true,"panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","logger":"yoyo","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"we","warning":true,"error":"some err","panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","logger":"yoyo","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"i","panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","logger":"yoyo","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"ie","error":"some err","panda":2}
|
||||||
|
{"level":"debug","timestamp":"2099-08-08T13:57:36.123456Z","logger":"yoyo","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"d","panda":2}
|
||||||
|
{"level":"debug","timestamp":"2099-08-08T13:57:36.123456Z","logger":"yoyo","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"de","error":"some err","panda":2}
|
||||||
|
{"level":"trace","timestamp":"2099-08-08T13:57:36.123456Z","logger":"yoyo","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"t","panda":2}
|
||||||
|
{"level":"trace","timestamp":"2099-08-08T13:57:36.123456Z","logger":"yoyo","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"te","error":"some err","panda":2}
|
||||||
|
{"level":"all","timestamp":"2099-08-08T13:57:36.123456Z","logger":"yoyo","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"all","panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","logger":"yoyo","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"always","panda":2}
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "with name nested",
|
||||||
|
run: func(l Logger) {
|
||||||
|
testAllPlogMethods(l.WithName("yoyo").WithName("gold"))
|
||||||
|
},
|
||||||
|
want: `
|
||||||
|
{"level":"error","timestamp":"2099-08-08T13:57:36.123456Z","logger":"yoyo.gold","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"e","panda":2,"error":"some err"}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","logger":"yoyo.gold","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"w","warning":true,"panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","logger":"yoyo.gold","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"we","warning":true,"error":"some err","panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","logger":"yoyo.gold","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"i","panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","logger":"yoyo.gold","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"ie","error":"some err","panda":2}
|
||||||
|
{"level":"debug","timestamp":"2099-08-08T13:57:36.123456Z","logger":"yoyo.gold","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"d","panda":2}
|
||||||
|
{"level":"debug","timestamp":"2099-08-08T13:57:36.123456Z","logger":"yoyo.gold","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"de","error":"some err","panda":2}
|
||||||
|
{"level":"trace","timestamp":"2099-08-08T13:57:36.123456Z","logger":"yoyo.gold","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"t","panda":2}
|
||||||
|
{"level":"trace","timestamp":"2099-08-08T13:57:36.123456Z","logger":"yoyo.gold","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"te","error":"some err","panda":2}
|
||||||
|
{"level":"all","timestamp":"2099-08-08T13:57:36.123456Z","logger":"yoyo.gold","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"all","panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","logger":"yoyo.gold","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"always","panda":2}
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "depth 3",
|
||||||
|
run: func(l Logger) {
|
||||||
|
testAllPlogMethods(l.withDepth(3))
|
||||||
|
},
|
||||||
|
want: `
|
||||||
|
{"level":"error","timestamp":"2099-08-08T13:57:36.123456Z","caller":"testing/testing.go:<line>$testing.tRunner","message":"e","panda":2,"error":"some err"}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"testing/testing.go:<line>$testing.tRunner","message":"w","warning":true,"panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"testing/testing.go:<line>$testing.tRunner","message":"we","warning":true,"error":"some err","panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"testing/testing.go:<line>$testing.tRunner","message":"i","panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"testing/testing.go:<line>$testing.tRunner","message":"ie","error":"some err","panda":2}
|
||||||
|
{"level":"debug","timestamp":"2099-08-08T13:57:36.123456Z","caller":"testing/testing.go:<line>$testing.tRunner","message":"d","panda":2}
|
||||||
|
{"level":"debug","timestamp":"2099-08-08T13:57:36.123456Z","caller":"testing/testing.go:<line>$testing.tRunner","message":"de","error":"some err","panda":2}
|
||||||
|
{"level":"trace","timestamp":"2099-08-08T13:57:36.123456Z","caller":"testing/testing.go:<line>$testing.tRunner","message":"t","panda":2}
|
||||||
|
{"level":"trace","timestamp":"2099-08-08T13:57:36.123456Z","caller":"testing/testing.go:<line>$testing.tRunner","message":"te","error":"some err","panda":2}
|
||||||
|
{"level":"all","timestamp":"2099-08-08T13:57:36.123456Z","caller":"testing/testing.go:<line>$testing.tRunner","message":"all","panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"testing/testing.go:<line>$testing.tRunner","message":"always","panda":2}
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "depth 2",
|
||||||
|
run: func(l Logger) {
|
||||||
|
testAllPlogMethods(l.withDepth(2))
|
||||||
|
},
|
||||||
|
want: `
|
||||||
|
{"level":"error","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.TestPlog.func15","message":"e","panda":2,"error":"some err"}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.TestPlog.func15","message":"w","warning":true,"panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.TestPlog.func15","message":"we","warning":true,"error":"some err","panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.TestPlog.func15","message":"i","panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.TestPlog.func15","message":"ie","error":"some err","panda":2}
|
||||||
|
{"level":"debug","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.TestPlog.func15","message":"d","panda":2}
|
||||||
|
{"level":"debug","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.TestPlog.func15","message":"de","error":"some err","panda":2}
|
||||||
|
{"level":"trace","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.TestPlog.func15","message":"t","panda":2}
|
||||||
|
{"level":"trace","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.TestPlog.func15","message":"te","error":"some err","panda":2}
|
||||||
|
{"level":"all","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.TestPlog.func15","message":"all","panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.TestPlog.func15","message":"always","panda":2}
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "depth 1",
|
||||||
|
run: func(l Logger) {
|
||||||
|
testAllPlogMethods(l.withDepth(1))
|
||||||
|
},
|
||||||
|
want: `
|
||||||
|
{"level":"error","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.TestPlog.func8","message":"e","panda":2,"error":"some err"}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.TestPlog.func8","message":"w","warning":true,"panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.TestPlog.func8","message":"we","warning":true,"error":"some err","panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.TestPlog.func8","message":"i","panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.TestPlog.func8","message":"ie","error":"some err","panda":2}
|
||||||
|
{"level":"debug","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.TestPlog.func8","message":"d","panda":2}
|
||||||
|
{"level":"debug","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.TestPlog.func8","message":"de","error":"some err","panda":2}
|
||||||
|
{"level":"trace","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.TestPlog.func8","message":"t","panda":2}
|
||||||
|
{"level":"trace","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.TestPlog.func8","message":"te","error":"some err","panda":2}
|
||||||
|
{"level":"all","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.TestPlog.func8","message":"all","panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.TestPlog.func8","message":"always","panda":2}
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "depth 0",
|
||||||
|
run: func(l Logger) {
|
||||||
|
testAllPlogMethods(l.withDepth(0))
|
||||||
|
},
|
||||||
|
want: `
|
||||||
|
{"level":"error","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"e","panda":2,"error":"some err"}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"w","warning":true,"panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"we","warning":true,"error":"some err","panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"i","panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"ie","error":"some err","panda":2}
|
||||||
|
{"level":"debug","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"d","panda":2}
|
||||||
|
{"level":"debug","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"de","error":"some err","panda":2}
|
||||||
|
{"level":"trace","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"t","panda":2}
|
||||||
|
{"level":"trace","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"te","error":"some err","panda":2}
|
||||||
|
{"level":"all","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"all","panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.testAllPlogMethods","message":"always","panda":2}
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "depth -1",
|
||||||
|
run: func(l Logger) {
|
||||||
|
testAllPlogMethods(l.withDepth(-1))
|
||||||
|
},
|
||||||
|
want: `
|
||||||
|
{"level":"error","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog.go:<line>$plog.pLogger.Error","message":"e","panda":2,"error":"some err"}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog.go:<line>$plog.pLogger.Warning","message":"w","warning":true,"panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog.go:<line>$plog.pLogger.WarningErr","message":"we","warning":true,"error":"some err","panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog.go:<line>$plog.pLogger.Info","message":"i","panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog.go:<line>$plog.pLogger.InfoErr","message":"ie","error":"some err","panda":2}
|
||||||
|
{"level":"debug","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog.go:<line>$plog.pLogger.Debug","message":"d","panda":2}
|
||||||
|
{"level":"debug","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog.go:<line>$plog.pLogger.DebugErr","message":"de","error":"some err","panda":2}
|
||||||
|
{"level":"trace","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog.go:<line>$plog.pLogger.Trace","message":"t","panda":2}
|
||||||
|
{"level":"trace","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog.go:<line>$plog.pLogger.TraceErr","message":"te","error":"some err","panda":2}
|
||||||
|
{"level":"all","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog.go:<line>$plog.pLogger.All","message":"all","panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog.go:<line>$plog.pLogger.Always","message":"always","panda":2}
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "depth -2",
|
||||||
|
run: func(l Logger) {
|
||||||
|
testAllPlogMethods(l.withDepth(-2))
|
||||||
|
},
|
||||||
|
want: `
|
||||||
|
{"level":"error","timestamp":"2099-08-08T13:57:36.123456Z","caller":"logr@v1.2.3/logr.go:<line>$logr.Logger.Error","message":"e","panda":2,"error":"some err"}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog.go:<line>$plog.pLogger.warningDepth","message":"w","warning":true,"panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog.go:<line>$plog.pLogger.warningDepth","message":"we","warning":true,"error":"some err","panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog.go:<line>$plog.pLogger.infoDepth","message":"i","panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog.go:<line>$plog.pLogger.infoDepth","message":"ie","error":"some err","panda":2}
|
||||||
|
{"level":"debug","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog.go:<line>$plog.pLogger.debugDepth","message":"d","panda":2}
|
||||||
|
{"level":"debug","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog.go:<line>$plog.pLogger.debugDepth","message":"de","error":"some err","panda":2}
|
||||||
|
{"level":"trace","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog.go:<line>$plog.pLogger.traceDepth","message":"t","panda":2}
|
||||||
|
{"level":"trace","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog.go:<line>$plog.pLogger.traceDepth","message":"te","error":"some err","panda":2}
|
||||||
|
{"level":"all","timestamp":"2099-08-08T13:57:36.123456Z","caller":"logr@v1.2.3/logr.go:<line>$logr.Logger.Info","message":"all","panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"logr@v1.2.3/logr.go:<line>$logr.Logger.Info","message":"always","panda":2}
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "depth -3",
|
||||||
|
run: func(l Logger) {
|
||||||
|
testAllPlogMethods(l.withDepth(-3))
|
||||||
|
},
|
||||||
|
want: `
|
||||||
|
{"level":"error","timestamp":"2099-08-08T13:57:36.123456Z","caller":"zapr@v1.2.3/zapr.go:<line>$zapr.(*zapLogger).Error","message":"e","panda":2,"error":"some err"}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"logr@v1.2.3/logr.go:<line>$logr.Logger.Info","message":"w","warning":true,"panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"logr@v1.2.3/logr.go:<line>$logr.Logger.Info","message":"we","warning":true,"error":"some err","panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"logr@v1.2.3/logr.go:<line>$logr.Logger.Info","message":"i","panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"logr@v1.2.3/logr.go:<line>$logr.Logger.Info","message":"ie","error":"some err","panda":2}
|
||||||
|
{"level":"debug","timestamp":"2099-08-08T13:57:36.123456Z","caller":"logr@v1.2.3/logr.go:<line>$logr.Logger.Info","message":"d","panda":2}
|
||||||
|
{"level":"debug","timestamp":"2099-08-08T13:57:36.123456Z","caller":"logr@v1.2.3/logr.go:<line>$logr.Logger.Info","message":"de","error":"some err","panda":2}
|
||||||
|
{"level":"trace","timestamp":"2099-08-08T13:57:36.123456Z","caller":"logr@v1.2.3/logr.go:<line>$logr.Logger.Info","message":"t","panda":2}
|
||||||
|
{"level":"trace","timestamp":"2099-08-08T13:57:36.123456Z","caller":"logr@v1.2.3/logr.go:<line>$logr.Logger.Info","message":"te","error":"some err","panda":2}
|
||||||
|
{"level":"all","timestamp":"2099-08-08T13:57:36.123456Z","caller":"zapr@v1.2.3/zapr.go:<line>$zapr.(*zapLogger).Info","message":"all","panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"zapr@v1.2.3/zapr.go:<line>$zapr.(*zapLogger).Info","message":"always","panda":2}
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "closure",
|
||||||
|
run: func(l Logger) {
|
||||||
|
func() {
|
||||||
|
func() {
|
||||||
|
testErr := fmt.Errorf("some err")
|
||||||
|
|
||||||
|
l.Error("e", testErr, "panda", 2)
|
||||||
|
l.Warning("w", "panda", 2)
|
||||||
|
l.WarningErr("we", testErr, "panda", 2)
|
||||||
|
l.Info("i", "panda", 2)
|
||||||
|
l.InfoErr("ie", testErr, "panda", 2)
|
||||||
|
l.Debug("d", "panda", 2)
|
||||||
|
l.DebugErr("de", testErr, "panda", 2)
|
||||||
|
l.Trace("t", "panda", 2)
|
||||||
|
l.TraceErr("te", testErr, "panda", 2)
|
||||||
|
l.All("all", "panda", 2)
|
||||||
|
l.Always("always", "panda", 2)
|
||||||
|
}()
|
||||||
|
}()
|
||||||
|
},
|
||||||
|
want: `
|
||||||
|
{"level":"error","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.TestPlog.func13.1.1","message":"e","panda":2,"error":"some err"}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.TestPlog.func13.1.1","message":"w","warning":true,"panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.TestPlog.func13.1.1","message":"we","warning":true,"error":"some err","panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.TestPlog.func13.1.1","message":"i","panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.TestPlog.func13.1.1","message":"ie","error":"some err","panda":2}
|
||||||
|
{"level":"debug","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.TestPlog.func13.1.1","message":"d","panda":2}
|
||||||
|
{"level":"debug","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.TestPlog.func13.1.1","message":"de","error":"some err","panda":2}
|
||||||
|
{"level":"trace","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.TestPlog.func13.1.1","message":"t","panda":2}
|
||||||
|
{"level":"trace","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.TestPlog.func13.1.1","message":"te","error":"some err","panda":2}
|
||||||
|
{"level":"all","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.TestPlog.func13.1.1","message":"all","panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog_test.go:<line>$plog.TestPlog.func13.1.1","message":"always","panda":2}
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "closure depth -1",
|
||||||
|
run: func(l Logger) {
|
||||||
|
func() {
|
||||||
|
func() {
|
||||||
|
testErr := fmt.Errorf("some err")
|
||||||
|
|
||||||
|
l = l.withDepth(-1)
|
||||||
|
l.Error("e", testErr, "panda", 2)
|
||||||
|
l.Warning("w", "panda", 2)
|
||||||
|
l.WarningErr("we", testErr, "panda", 2)
|
||||||
|
l.Info("i", "panda", 2)
|
||||||
|
l.InfoErr("ie", testErr, "panda", 2)
|
||||||
|
l.Debug("d", "panda", 2)
|
||||||
|
l.DebugErr("de", testErr, "panda", 2)
|
||||||
|
l.Trace("t", "panda", 2)
|
||||||
|
l.TraceErr("te", testErr, "panda", 2)
|
||||||
|
l.All("all", "panda", 2)
|
||||||
|
l.Always("always", "panda", 2)
|
||||||
|
}()
|
||||||
|
}()
|
||||||
|
},
|
||||||
|
want: `
|
||||||
|
{"level":"error","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog.go:<line>$plog.pLogger.Error","message":"e","panda":2,"error":"some err"}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog.go:<line>$plog.pLogger.Warning","message":"w","warning":true,"panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog.go:<line>$plog.pLogger.WarningErr","message":"we","warning":true,"error":"some err","panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog.go:<line>$plog.pLogger.Info","message":"i","panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog.go:<line>$plog.pLogger.InfoErr","message":"ie","error":"some err","panda":2}
|
||||||
|
{"level":"debug","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog.go:<line>$plog.pLogger.Debug","message":"d","panda":2}
|
||||||
|
{"level":"debug","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog.go:<line>$plog.pLogger.DebugErr","message":"de","error":"some err","panda":2}
|
||||||
|
{"level":"trace","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog.go:<line>$plog.pLogger.Trace","message":"t","panda":2}
|
||||||
|
{"level":"trace","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog.go:<line>$plog.pLogger.TraceErr","message":"te","error":"some err","panda":2}
|
||||||
|
{"level":"all","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog.go:<line>$plog.pLogger.All","message":"all","panda":2}
|
||||||
|
{"level":"info","timestamp":"2099-08-08T13:57:36.123456Z","caller":"plog/plog.go:<line>$plog.pLogger.Always","message":"always","panda":2}
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
tt := tt
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
var log bytes.Buffer
|
||||||
|
tt.run(TestLogger(t, &log))
|
||||||
|
|
||||||
|
require.Equal(t, strings.TrimSpace(tt.want), strings.TrimSpace(log.String()))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAllPlogMethods(l Logger) {
|
||||||
|
testErr := fmt.Errorf("some err")
|
||||||
|
|
||||||
|
l.Error("e", testErr, "panda", 2)
|
||||||
|
l.Warning("w", "panda", 2)
|
||||||
|
l.WarningErr("we", testErr, "panda", 2)
|
||||||
|
l.Info("i", "panda", 2)
|
||||||
|
l.InfoErr("ie", testErr, "panda", 2)
|
||||||
|
l.Debug("d", "panda", 2)
|
||||||
|
l.DebugErr("de", testErr, "panda", 2)
|
||||||
|
l.Trace("t", "panda", 2)
|
||||||
|
l.TraceErr("te", testErr, "panda", 2)
|
||||||
|
l.All("all", "panda", 2)
|
||||||
|
l.Always("always", "panda", 2)
|
||||||
|
}
|
120
internal/plog/testing.go
Normal file
120
internal/plog/testing.go
Normal file
@ -0,0 +1,120 @@
|
|||||||
|
// Copyright 2020-2022 the Pinniped contributors. All Rights Reserved.
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
package plog
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-logr/logr"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
"go.uber.org/zap/zapcore"
|
||||||
|
"k8s.io/utils/clock"
|
||||||
|
clocktesting "k8s.io/utils/clock/testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
// contextKey type is unexported to prevent collisions.
|
||||||
|
type contextKey int
|
||||||
|
|
||||||
|
const zapOverridesKey contextKey = iota
|
||||||
|
|
||||||
|
func TestZapOverrides(ctx context.Context, t *testing.T, w io.Writer, f func(*zap.Config), opts ...zap.Option) context.Context {
|
||||||
|
t.Helper() // discourage use outside of tests
|
||||||
|
|
||||||
|
overrides := &testOverrides{
|
||||||
|
t: t,
|
||||||
|
w: w,
|
||||||
|
f: f,
|
||||||
|
opts: opts,
|
||||||
|
}
|
||||||
|
return context.WithValue(ctx, zapOverridesKey, overrides)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLogger(t *testing.T, w io.Writer) Logger {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
return New().withLogrMod(func(l logr.Logger) logr.Logger {
|
||||||
|
return l.WithSink(TestZapr(t, w).GetSink())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestZapr(t *testing.T, w io.Writer) logr.Logger {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
now, err := time.Parse(time.RFC3339Nano, "2099-08-08T13:57:36.123456789Z")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
t.Cleanup(cancel)
|
||||||
|
|
||||||
|
ctx = TestZapOverrides(ctx, t, w,
|
||||||
|
func(config *zap.Config) {
|
||||||
|
config.Level = zap.NewAtomicLevelAt(math.MinInt8) // log everything during tests
|
||||||
|
|
||||||
|
// make test assertions less painful to write while keeping them as close to the real thing as possible
|
||||||
|
config.EncoderConfig.EncodeCaller = func(caller zapcore.EntryCaller, enc zapcore.PrimitiveArrayEncoder) {
|
||||||
|
trimmed := caller.TrimmedPath()
|
||||||
|
if idx := strings.LastIndexByte(trimmed, ':'); idx != -1 {
|
||||||
|
trimmed = trimmed[:idx+1] + "<line>"
|
||||||
|
}
|
||||||
|
enc.AppendString(trimmed + funcEncoder(caller))
|
||||||
|
}
|
||||||
|
},
|
||||||
|
zap.WithClock(ZapClock(clocktesting.NewFakeClock(now))), // have the clock be static during tests
|
||||||
|
zap.AddStacktrace(nopLevelEnabler{}), // do not log stacktraces
|
||||||
|
)
|
||||||
|
|
||||||
|
// there is no buffering so we can ignore flush
|
||||||
|
zl, _, err := newLogr(ctx, "json", 0)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
return zl
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ zapcore.Clock = &clockAdapter{}
|
||||||
|
|
||||||
|
type clockAdapter struct {
|
||||||
|
clock clock.Clock
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *clockAdapter) Now() time.Time {
|
||||||
|
return c.clock.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *clockAdapter) NewTicker(duration time.Duration) *time.Ticker {
|
||||||
|
return &time.Ticker{C: c.clock.Tick(duration)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ZapClock(c clock.Clock) zapcore.Clock {
|
||||||
|
return &clockAdapter{clock: c}
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ zap.Sink = nopCloserSink{}
|
||||||
|
|
||||||
|
type nopCloserSink struct{ zapcore.WriteSyncer }
|
||||||
|
|
||||||
|
func (nopCloserSink) Close() error { return nil }
|
||||||
|
|
||||||
|
// newSink returns a wrapper around the input writer that is safe for concurrent use.
|
||||||
|
func newSink(w io.Writer) zap.Sink {
|
||||||
|
return nopCloserSink{WriteSyncer: zapcore.Lock(zapcore.AddSync(w))}
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ zapcore.LevelEnabler = nopLevelEnabler{}
|
||||||
|
|
||||||
|
type nopLevelEnabler struct{}
|
||||||
|
|
||||||
|
func (nopLevelEnabler) Enabled(_ zapcore.Level) bool { return false }
|
||||||
|
|
||||||
|
type testOverrides struct {
|
||||||
|
t *testing.T
|
||||||
|
w io.Writer
|
||||||
|
f func(*zap.Config)
|
||||||
|
opts []zap.Option
|
||||||
|
}
|
187
internal/plog/zap.go
Normal file
187
internal/plog/zap.go
Normal file
@ -0,0 +1,187 @@
|
|||||||
|
// Copyright 2020-2022 the Pinniped contributors. All Rights Reserved.
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
package plog
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-logr/logr"
|
||||||
|
"github.com/go-logr/zapr"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
"go.uber.org/zap/zapcore"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/util/duration"
|
||||||
|
"k8s.io/apimachinery/pkg/util/rand"
|
||||||
|
"k8s.io/klog/v2"
|
||||||
|
"k8s.io/klog/v2/textlogger"
|
||||||
|
)
|
||||||
|
|
||||||
|
func newLogr(ctx context.Context, encoding string, klogLevel klog.Level) (logr.Logger, func(), error) {
|
||||||
|
if encoding == "text" {
|
||||||
|
var w io.Writer = os.Stderr
|
||||||
|
flush := func() { _ = os.Stderr.Sync() }
|
||||||
|
|
||||||
|
// allow tests to override klog config (but cheat and re-use the zap override key)
|
||||||
|
if overrides, ok := ctx.Value(zapOverridesKey).(*testOverrides); ok {
|
||||||
|
if overrides.w != nil {
|
||||||
|
w = newSink(overrides.w) // make sure the value is safe for concurrent use
|
||||||
|
flush = func() {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return textlogger.NewLogger(textlogger.NewConfig(textlogger.Verbosity(int(klogLevel)), textlogger.Output(w))), flush, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
path := "stderr" // this is how zap refers to os.Stderr
|
||||||
|
f := func(config *zap.Config) {
|
||||||
|
if encoding == "console" {
|
||||||
|
config.EncoderConfig.LevelKey = zapcore.OmitKey
|
||||||
|
config.EncoderConfig.EncodeCaller = zapcore.ShortCallerEncoder
|
||||||
|
config.EncoderConfig.EncodeTime = humanTimeEncoder
|
||||||
|
config.EncoderConfig.EncodeDuration = humanDurationEncoder
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var opts []zap.Option
|
||||||
|
|
||||||
|
// allow tests to override zap config
|
||||||
|
if overrides, ok := ctx.Value(zapOverridesKey).(*testOverrides); ok {
|
||||||
|
if overrides.w != nil {
|
||||||
|
// use a per invocation random string as the key into the global map
|
||||||
|
testKey := "/" + base64.RawURLEncoding.EncodeToString([]byte(rand.String(32)))
|
||||||
|
|
||||||
|
// tell zap to use our custom sink registry to find the writer
|
||||||
|
path = "pinniped://" + testKey
|
||||||
|
|
||||||
|
// the registry may be called multiple times so make sure the value is safe for concurrent use
|
||||||
|
sink := newSink(overrides.w)
|
||||||
|
|
||||||
|
// store the test's buffer where we can find it globally
|
||||||
|
actual, loaded := sinkMap.LoadOrStore(testKey, sink)
|
||||||
|
require.False(overrides.t, loaded)
|
||||||
|
require.Equal(overrides.t, sink, actual)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
// delete buffer from the global map to prevent a memory leak
|
||||||
|
value, loaded := sinkMap.LoadAndDelete(testKey)
|
||||||
|
require.True(overrides.t, loaded)
|
||||||
|
require.Equal(overrides.t, sink, value)
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
if overrides.f != nil {
|
||||||
|
f = overrides.f
|
||||||
|
}
|
||||||
|
if overrides.opts != nil {
|
||||||
|
opts = overrides.opts
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// when using the trace or all log levels, an error log will contain the full stack.
|
||||||
|
// this is too noisy for regular use because things like leader election conflicts
|
||||||
|
// result in transient errors and we do not want all of that noise in the logs.
|
||||||
|
// this check is performed dynamically on the global log level.
|
||||||
|
return newZapr(globalLevel, LevelTrace, encoding, path, f, opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newZapr(level zap.AtomicLevel, addStack zapcore.LevelEnabler, encoding, path string, f func(config *zap.Config), opts ...zap.Option) (logr.Logger, func(), error) {
|
||||||
|
opts = append([]zap.Option{zap.AddStacktrace(addStack)}, opts...)
|
||||||
|
|
||||||
|
config := zap.Config{
|
||||||
|
Level: level,
|
||||||
|
Development: false,
|
||||||
|
DisableCaller: false,
|
||||||
|
DisableStacktrace: true, // handled via the AddStacktrace call above
|
||||||
|
Sampling: nil, // keep all logs for now
|
||||||
|
Encoding: encoding,
|
||||||
|
EncoderConfig: zapcore.EncoderConfig{
|
||||||
|
MessageKey: "message",
|
||||||
|
LevelKey: "level",
|
||||||
|
TimeKey: "timestamp",
|
||||||
|
NameKey: "logger",
|
||||||
|
CallerKey: "caller",
|
||||||
|
FunctionKey: zapcore.OmitKey, // included in caller
|
||||||
|
StacktraceKey: "stacktrace",
|
||||||
|
SkipLineEnding: false,
|
||||||
|
LineEnding: zapcore.DefaultLineEnding,
|
||||||
|
EncodeLevel: levelEncoder,
|
||||||
|
// human-readable and machine parsable with microsecond precision (same as klog, kube audit event, etc)
|
||||||
|
EncodeTime: zapcore.TimeEncoderOfLayout(metav1.RFC3339Micro),
|
||||||
|
EncodeDuration: zapcore.StringDurationEncoder,
|
||||||
|
EncodeCaller: callerEncoder,
|
||||||
|
EncodeName: nil,
|
||||||
|
NewReflectedEncoder: nil,
|
||||||
|
ConsoleSeparator: " ",
|
||||||
|
},
|
||||||
|
OutputPaths: []string{path},
|
||||||
|
ErrorOutputPaths: []string{path},
|
||||||
|
InitialFields: nil,
|
||||||
|
}
|
||||||
|
|
||||||
|
f(&config)
|
||||||
|
|
||||||
|
log, err := config.Build(opts...)
|
||||||
|
if err != nil {
|
||||||
|
return logr.Logger{}, nil, fmt.Errorf("failed to build zap logger: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return zapr.NewLogger(log), func() { _ = log.Sync() }, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func levelEncoder(l zapcore.Level, enc zapcore.PrimitiveArrayEncoder) {
|
||||||
|
plogLevel := zapLevelToPlogLevel(l)
|
||||||
|
|
||||||
|
if len(plogLevel) == 0 {
|
||||||
|
return // this tells zap that it should handle encoding the level itself because we do not know the mapping
|
||||||
|
}
|
||||||
|
|
||||||
|
enc.AppendString(string(plogLevel))
|
||||||
|
}
|
||||||
|
|
||||||
|
func zapLevelToPlogLevel(l zapcore.Level) LogLevel {
|
||||||
|
if l > 0 {
|
||||||
|
// best effort mapping, the zap levels do not really translate to klog
|
||||||
|
// but this is correct for "error" level which is all we need for logr
|
||||||
|
return LogLevel(l.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
// klog levels are inverted when zap handles them
|
||||||
|
switch {
|
||||||
|
case -l >= klogLevelAll:
|
||||||
|
return LevelAll
|
||||||
|
case -l >= KlogLevelTrace:
|
||||||
|
return LevelTrace
|
||||||
|
case -l >= KlogLevelDebug:
|
||||||
|
return LevelDebug
|
||||||
|
case -l >= KlogLevelInfo:
|
||||||
|
return LevelInfo
|
||||||
|
default:
|
||||||
|
return "" // warning is handled via a custom key since klog level 0 is ambiguous
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func callerEncoder(caller zapcore.EntryCaller, enc zapcore.PrimitiveArrayEncoder) {
|
||||||
|
enc.AppendString(caller.String() + funcEncoder(caller))
|
||||||
|
}
|
||||||
|
|
||||||
|
func funcEncoder(caller zapcore.EntryCaller) string {
|
||||||
|
funcName := caller.Function
|
||||||
|
if idx := strings.LastIndexByte(funcName, '/'); idx != -1 {
|
||||||
|
funcName = funcName[idx+1:] // keep everything after the last /
|
||||||
|
}
|
||||||
|
return "$" + funcName
|
||||||
|
}
|
||||||
|
|
||||||
|
func humanDurationEncoder(d time.Duration, enc zapcore.PrimitiveArrayEncoder) {
|
||||||
|
enc.AppendString(duration.HumanDuration(d))
|
||||||
|
}
|
||||||
|
|
||||||
|
func humanTimeEncoder(t time.Time, enc zapcore.PrimitiveArrayEncoder) {
|
||||||
|
enc.AppendString(t.Local().Format(time.RFC1123))
|
||||||
|
}
|
@ -71,8 +71,8 @@ func TestCreate(t *testing.T) {
|
|||||||
it.Before(func() {
|
it.Before(func() {
|
||||||
r = require.New(t)
|
r = require.New(t)
|
||||||
ctrl = gomock.NewController(t)
|
ctrl = gomock.NewController(t)
|
||||||
logger = testutil.NewTranscriptLogger(t)
|
logger = testutil.NewTranscriptLogger(t) // nolint: staticcheck // old test with lots of log statements
|
||||||
klog.SetLogger(logr.New(logger)) // this is unfortunately a global logger, so can't run these tests in parallel :(
|
klog.SetLogger(logr.New(logger)) // this is unfortunately a global logger, so can't run these tests in parallel :(
|
||||||
})
|
})
|
||||||
|
|
||||||
it.After(func() {
|
it.After(func() {
|
||||||
|
@ -22,14 +22,12 @@ import (
|
|||||||
"github.com/joshlf/go-acl"
|
"github.com/joshlf/go-acl"
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
apimachineryversion "k8s.io/apimachinery/pkg/version"
|
||||||
genericapifilters "k8s.io/apiserver/pkg/endpoints/filters"
|
genericapifilters "k8s.io/apiserver/pkg/endpoints/filters"
|
||||||
kubeinformers "k8s.io/client-go/informers"
|
kubeinformers "k8s.io/client-go/informers"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/client-go/pkg/version"
|
"k8s.io/client-go/pkg/version"
|
||||||
"k8s.io/client-go/rest"
|
"k8s.io/client-go/rest"
|
||||||
"k8s.io/component-base/logs"
|
|
||||||
"k8s.io/klog/v2"
|
|
||||||
"k8s.io/klog/v2/klogr"
|
|
||||||
"k8s.io/utils/clock"
|
"k8s.io/utils/clock"
|
||||||
|
|
||||||
configv1alpha1 "go.pinniped.dev/generated/latest/apis/supervisor/config/v1alpha1"
|
configv1alpha1 "go.pinniped.dev/generated/latest/apis/supervisor/config/v1alpha1"
|
||||||
@ -272,7 +270,7 @@ func prepareControllers(
|
|||||||
pinnipedClient,
|
pinnipedClient,
|
||||||
pinnipedInformers.IDP().V1alpha1().OIDCIdentityProviders(),
|
pinnipedInformers.IDP().V1alpha1().OIDCIdentityProviders(),
|
||||||
secretInformer,
|
secretInformer,
|
||||||
klogr.New(),
|
plog.Logr(), // nolint: staticcheck // old controller with lots of log statements
|
||||||
controllerlib.WithInformer,
|
controllerlib.WithInformer,
|
||||||
),
|
),
|
||||||
singletonWorker).
|
singletonWorker).
|
||||||
@ -315,7 +313,7 @@ func startControllers(ctx context.Context, shutdown *sync.WaitGroup, buildContro
|
|||||||
}
|
}
|
||||||
|
|
||||||
//nolint:funlen
|
//nolint:funlen
|
||||||
func runSupervisor(podInfo *downward.PodInfo, cfg *supervisor.Config) error {
|
func runSupervisor(ctx context.Context, podInfo *downward.PodInfo, cfg *supervisor.Config) error {
|
||||||
serverInstallationNamespace := podInfo.Namespace
|
serverInstallationNamespace := podInfo.Namespace
|
||||||
|
|
||||||
dref, supervisorDeployment, supervisorPod, err := deploymentref.New(podInfo)
|
dref, supervisorDeployment, supervisorPod, err := deploymentref.New(podInfo)
|
||||||
@ -389,7 +387,6 @@ func runSupervisor(podInfo *downward.PodInfo, cfg *supervisor.Config) error {
|
|||||||
leaderElector,
|
leaderElector,
|
||||||
)
|
)
|
||||||
|
|
||||||
ctx := signalCtx()
|
|
||||||
shutdown := &sync.WaitGroup{}
|
shutdown := &sync.WaitGroup{}
|
||||||
|
|
||||||
if err := startControllers(ctx, shutdown, buildControllersFunc); err != nil {
|
if err := startControllers(ctx, shutdown, buildControllersFunc); err != nil {
|
||||||
@ -504,12 +501,14 @@ func maybeSetupUnixPerms(endpoint *supervisor.Endpoint, pod *corev1.Pod) func()
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() error { // return an error instead of klog.Fatal to allow defer statements to run
|
func main() error { // return an error instead of plog.Fatal to allow defer statements to run
|
||||||
logs.InitLogs()
|
defer plog.Setup()()
|
||||||
defer logs.FlushLogs()
|
|
||||||
|
|
||||||
klog.Infof("Running %s at %#v", rest.DefaultKubernetesUserAgent(), version.Get())
|
plog.Always("Running supervisor",
|
||||||
klog.Infof("Command-line arguments were: %s %s %s", os.Args[0], os.Args[1], os.Args[2])
|
"user-agent", rest.DefaultKubernetesUserAgent(),
|
||||||
|
"version", versionInfo(version.Get()),
|
||||||
|
"arguments", os.Args,
|
||||||
|
)
|
||||||
|
|
||||||
// Discover in which namespace we are installed.
|
// Discover in which namespace we are installed.
|
||||||
podInfo, err := downward.Load(os.Args[1])
|
podInfo, err := downward.Load(os.Args[1])
|
||||||
@ -517,17 +516,21 @@ func main() error { // return an error instead of klog.Fatal to allow defer stat
|
|||||||
return fmt.Errorf("could not read pod metadata: %w", err)
|
return fmt.Errorf("could not read pod metadata: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ctx := signalCtx()
|
||||||
|
|
||||||
// Read the server config file.
|
// Read the server config file.
|
||||||
cfg, err := supervisor.FromPath(os.Args[2])
|
cfg, err := supervisor.FromPath(ctx, os.Args[2])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not load config: %w", err)
|
return fmt.Errorf("could not load config: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return runSupervisor(podInfo, cfg)
|
return runSupervisor(ctx, podInfo, cfg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Main() {
|
func Main() {
|
||||||
if err := main(); err != nil {
|
if err := main(); err != nil {
|
||||||
klog.Fatal(err)
|
plog.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type versionInfo apimachineryversion.Info // hide .String() method from plog
|
||||||
|
@ -24,14 +24,14 @@ type Logger struct {
|
|||||||
buffer syncBuffer
|
buffer syncBuffer
|
||||||
}
|
}
|
||||||
|
|
||||||
// New returns a new test Logger. Use this for all new tests.
|
// Deprecated: Use plog.TestLogger or plog.TestZapr instead. This is meant for old tests only.
|
||||||
func New(t *testing.T) *Logger {
|
func New(t *testing.T) *Logger {
|
||||||
res := Logger{t: t}
|
res := Logger{t: t}
|
||||||
res.Logger = stdr.New(log.New(&res.buffer, "", 0))
|
res.Logger = stdr.New(log.New(&res.buffer, "", 0))
|
||||||
return &res
|
return &res
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deprecated: NewLegacy returns a new test Logger. Use this for old tests if necessary.
|
// Deprecated: Use plog.TestLogger or plog.TestZapr instead. This is meant for old tests only.
|
||||||
func NewLegacy(t *testing.T) *Logger {
|
func NewLegacy(t *testing.T) *Logger {
|
||||||
res := New(t)
|
res := New(t)
|
||||||
res.Logger = newStdLogger(log.New(&res.buffer, "", 0))
|
res.Logger = newStdLogger(log.New(&res.buffer, "", 0))
|
||||||
|
@ -24,6 +24,7 @@ type TranscriptLogMessage struct {
|
|||||||
Message string
|
Message string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use plog.TestLogger or plog.TestZapr instead. This is meant for old tests only.
|
||||||
func NewTranscriptLogger(t *testing.T) *TranscriptLogger {
|
func NewTranscriptLogger(t *testing.T) *TranscriptLogger {
|
||||||
return &TranscriptLogger{t: t}
|
return &TranscriptLogger{t: t}
|
||||||
}
|
}
|
||||||
|
@ -411,13 +411,13 @@ func (p *ProviderConfig) maybeFetchUserInfo(ctx context.Context, tok *oauth2.Tok
|
|||||||
func maybeLogClaims(msg, name string, claims map[string]interface{}) {
|
func maybeLogClaims(msg, name string, claims map[string]interface{}) {
|
||||||
if plog.Enabled(plog.LevelAll) { // log keys and values at all level
|
if plog.Enabled(plog.LevelAll) { // log keys and values at all level
|
||||||
data, _ := json.Marshal(claims) // nothing we can do if it fails, but it really never should
|
data, _ := json.Marshal(claims) // nothing we can do if it fails, but it really never should
|
||||||
plog.Info(msg, "providerName", name, "claims", string(data))
|
plog.All(msg, "providerName", name, "claims", string(data))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if plog.Enabled(plog.LevelDebug) { // log keys at debug level
|
if plog.Enabled(plog.LevelDebug) { // log keys at debug level
|
||||||
keys := sets.StringKeySet(claims).List() // note: this is only safe because the compiler asserts that claims is a map[string]<anything>
|
keys := sets.StringKeySet(claims).List() // note: this is only safe because the compiler asserts that claims is a map[string]<anything>
|
||||||
plog.Info(msg, "providerName", name, "keys", keys)
|
plog.Debug(msg, "providerName", name, "keys", keys)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -33,6 +33,7 @@ import (
|
|||||||
"go.pinniped.dev/internal/httputil/securityheader"
|
"go.pinniped.dev/internal/httputil/securityheader"
|
||||||
"go.pinniped.dev/internal/net/phttp"
|
"go.pinniped.dev/internal/net/phttp"
|
||||||
"go.pinniped.dev/internal/oidc/provider"
|
"go.pinniped.dev/internal/oidc/provider"
|
||||||
|
"go.pinniped.dev/internal/plog"
|
||||||
"go.pinniped.dev/internal/upstreamoidc"
|
"go.pinniped.dev/internal/upstreamoidc"
|
||||||
"go.pinniped.dev/pkg/oidcclient/nonce"
|
"go.pinniped.dev/pkg/oidcclient/nonce"
|
||||||
"go.pinniped.dev/pkg/oidcclient/oidctypes"
|
"go.pinniped.dev/pkg/oidcclient/oidctypes"
|
||||||
@ -63,8 +64,6 @@ const (
|
|||||||
defaultPasswordEnvVarName = "PINNIPED_PASSWORD" //nolint:gosec // this is not a credential
|
defaultPasswordEnvVarName = "PINNIPED_PASSWORD" //nolint:gosec // this is not a credential
|
||||||
|
|
||||||
httpLocationHeaderName = "Location"
|
httpLocationHeaderName = "Location"
|
||||||
|
|
||||||
debugLogLevel = 4
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// stdin returns the file descriptor for stdin as an int.
|
// stdin returns the file descriptor for stdin as an int.
|
||||||
@ -356,7 +355,7 @@ func (h *handlerState) baseLogin() (*oidctypes.Token, error) {
|
|||||||
// If the ID token is still valid for a bit, return it immediately and skip the rest of the flow.
|
// If the ID token is still valid for a bit, return it immediately and skip the rest of the flow.
|
||||||
cached := h.cache.GetToken(cacheKey)
|
cached := h.cache.GetToken(cacheKey)
|
||||||
if cached != nil && cached.IDToken != nil && time.Until(cached.IDToken.Expiry.Time) > minIDTokenValidity {
|
if cached != nil && cached.IDToken != nil && time.Until(cached.IDToken.Expiry.Time) > minIDTokenValidity {
|
||||||
h.logger.V(debugLogLevel).Info("Pinniped: Found unexpired cached token.")
|
h.logger.V(plog.KlogLevelDebug).Info("Pinniped: Found unexpired cached token.")
|
||||||
return cached, nil
|
return cached, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -520,7 +519,7 @@ func (h *handlerState) getUsernameAndPassword() (string, string, error) {
|
|||||||
return "", "", fmt.Errorf("error prompting for username: %w", err)
|
return "", "", fmt.Errorf("error prompting for username: %w", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
h.logger.V(debugLogLevel).Info("Pinniped: Read username from environment variable", "name", defaultUsernameEnvVarName)
|
h.logger.V(plog.KlogLevelDebug).Info("Pinniped: Read username from environment variable", "name", defaultUsernameEnvVarName)
|
||||||
}
|
}
|
||||||
|
|
||||||
password := h.getEnv(defaultPasswordEnvVarName)
|
password := h.getEnv(defaultPasswordEnvVarName)
|
||||||
@ -530,7 +529,7 @@ func (h *handlerState) getUsernameAndPassword() (string, string, error) {
|
|||||||
return "", "", fmt.Errorf("error prompting for password: %w", err)
|
return "", "", fmt.Errorf("error prompting for password: %w", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
h.logger.V(debugLogLevel).Info("Pinniped: Read password from environment variable", "name", defaultPasswordEnvVarName)
|
h.logger.V(plog.KlogLevelDebug).Info("Pinniped: Read password from environment variable", "name", defaultPasswordEnvVarName)
|
||||||
}
|
}
|
||||||
|
|
||||||
return username, password, nil
|
return username, password, nil
|
||||||
@ -542,7 +541,7 @@ func (h *handlerState) webBrowserBasedAuth(authorizeOptions *[]oauth2.AuthCodeOp
|
|||||||
// Attempt to open a local TCP listener, logging but otherwise ignoring any error.
|
// Attempt to open a local TCP listener, logging but otherwise ignoring any error.
|
||||||
listener, err := h.listen("tcp", h.listenAddr)
|
listener, err := h.listen("tcp", h.listenAddr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logger.V(debugLogLevel).Error(err, "could not open callback listener")
|
h.logger.V(plog.KlogLevelDebug).Error(err, "could not open callback listener")
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the listener failed to start and stdin is not a TTY, then we have no hope of succeeding,
|
// If the listener failed to start and stdin is not a TTY, then we have no hope of succeeding,
|
||||||
@ -578,7 +577,7 @@ func (h *handlerState) webBrowserBasedAuth(authorizeOptions *[]oauth2.AuthCodeOp
|
|||||||
|
|
||||||
// Open the authorize URL in the users browser, logging but otherwise ignoring any error.
|
// Open the authorize URL in the users browser, logging but otherwise ignoring any error.
|
||||||
if err := h.openURL(authorizeURL); err != nil {
|
if err := h.openURL(authorizeURL); err != nil {
|
||||||
h.logger.V(debugLogLevel).Error(err, "could not open browser")
|
h.logger.V(plog.KlogLevelDebug).Error(err, "could not open browser")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Prompt the user to visit the authorize URL, and to paste a manually-copied auth code (if possible).
|
// Prompt the user to visit the authorize URL, and to paste a manually-copied auth code (if possible).
|
||||||
@ -709,7 +708,7 @@ func (h *handlerState) initOIDCDiscovery() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
h.logger.V(debugLogLevel).Info("Pinniped: Performing OIDC discovery", "issuer", h.issuer)
|
h.logger.V(plog.KlogLevelDebug).Info("Pinniped: Performing OIDC discovery", "issuer", h.issuer)
|
||||||
var err error
|
var err error
|
||||||
h.provider, err = oidc.NewProvider(h.ctx, h.issuer)
|
h.provider, err = oidc.NewProvider(h.ctx, h.issuer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -767,7 +766,7 @@ func stringSliceContains(slice []string, s string) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (h *handlerState) tokenExchangeRFC8693(baseToken *oidctypes.Token) (*oidctypes.Token, error) {
|
func (h *handlerState) tokenExchangeRFC8693(baseToken *oidctypes.Token) (*oidctypes.Token, error) {
|
||||||
h.logger.V(debugLogLevel).Info("Pinniped: Performing RFC8693 token exchange", "requestedAudience", h.requestedAudience)
|
h.logger.V(plog.KlogLevelDebug).Info("Pinniped: Performing RFC8693 token exchange", "requestedAudience", h.requestedAudience)
|
||||||
// Perform OIDC discovery. This may have already been performed if there was not a cached base token.
|
// Perform OIDC discovery. This may have already been performed if there was not a cached base token.
|
||||||
if err := h.initOIDCDiscovery(); err != nil {
|
if err := h.initOIDCDiscovery(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -838,13 +837,13 @@ func (h *handlerState) tokenExchangeRFC8693(baseToken *oidctypes.Token) (*oidcty
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (h *handlerState) handleRefresh(ctx context.Context, refreshToken *oidctypes.RefreshToken) (*oidctypes.Token, error) {
|
func (h *handlerState) handleRefresh(ctx context.Context, refreshToken *oidctypes.RefreshToken) (*oidctypes.Token, error) {
|
||||||
h.logger.V(debugLogLevel).Info("Pinniped: Refreshing cached token.")
|
h.logger.V(plog.KlogLevelDebug).Info("Pinniped: Refreshing cached token.")
|
||||||
upstreamOIDCIdentityProvider := h.getProvider(h.oauth2Config, h.provider, h.httpClient)
|
upstreamOIDCIdentityProvider := h.getProvider(h.oauth2Config, h.provider, h.httpClient)
|
||||||
|
|
||||||
refreshed, err := upstreamOIDCIdentityProvider.PerformRefresh(ctx, refreshToken.Token)
|
refreshed, err := upstreamOIDCIdentityProvider.PerformRefresh(ctx, refreshToken.Token)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Ignore errors during refresh, but return nil which will trigger the full login flow.
|
// Ignore errors during refresh, but return nil which will trigger the full login flow.
|
||||||
h.logger.V(debugLogLevel).Info("Pinniped: Refresh failed.", "error", err.Error())
|
h.logger.V(plog.KlogLevelDebug).Info("Pinniped: Refresh failed.", "error", err.Error())
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -865,7 +864,7 @@ func (h *handlerState) handleAuthCodeCallback(w http.ResponseWriter, r *http.Req
|
|||||||
if h.useFormPost { // nolint:nestif
|
if h.useFormPost { // nolint:nestif
|
||||||
// Return HTTP 405 for anything that's not a POST or an OPTIONS request.
|
// Return HTTP 405 for anything that's not a POST or an OPTIONS request.
|
||||||
if r.Method != http.MethodPost && r.Method != http.MethodOptions {
|
if r.Method != http.MethodPost && r.Method != http.MethodOptions {
|
||||||
h.logger.V(debugLogLevel).Info("Pinniped: Got unexpected request on callback listener", "method", r.Method)
|
h.logger.V(plog.KlogLevelDebug).Info("Pinniped: Got unexpected request on callback listener", "method", r.Method)
|
||||||
w.WriteHeader(http.StatusMethodNotAllowed)
|
w.WriteHeader(http.StatusMethodNotAllowed)
|
||||||
return nil // keep listening for more requests
|
return nil // keep listening for more requests
|
||||||
}
|
}
|
||||||
@ -883,11 +882,11 @@ func (h *handlerState) handleAuthCodeCallback(w http.ResponseWriter, r *http.Req
|
|||||||
origin := r.Header.Get("Origin")
|
origin := r.Header.Get("Origin")
|
||||||
if origin == "" {
|
if origin == "" {
|
||||||
// The CORS preflight request should have an origin.
|
// The CORS preflight request should have an origin.
|
||||||
h.logger.V(debugLogLevel).Info("Pinniped: Got OPTIONS request without origin header")
|
h.logger.V(plog.KlogLevelDebug).Info("Pinniped: Got OPTIONS request without origin header")
|
||||||
w.WriteHeader(http.StatusBadRequest)
|
w.WriteHeader(http.StatusBadRequest)
|
||||||
return nil // keep listening for more requests
|
return nil // keep listening for more requests
|
||||||
}
|
}
|
||||||
h.logger.V(debugLogLevel).Info("Pinniped: Got CORS preflight request from browser", "origin", origin)
|
h.logger.V(plog.KlogLevelDebug).Info("Pinniped: Got CORS preflight request from browser", "origin", origin)
|
||||||
// To tell the browser that it is okay to make the real POST request, return the following response.
|
// To tell the browser that it is okay to make the real POST request, return the following response.
|
||||||
w.Header().Set("Access-Control-Allow-Origin", allowOrigin)
|
w.Header().Set("Access-Control-Allow-Origin", allowOrigin)
|
||||||
w.Header().Set("Vary", "*") // supposed to use Vary when Access-Control-Allow-Origin is a specific host
|
w.Header().Set("Vary", "*") // supposed to use Vary when Access-Control-Allow-Origin is a specific host
|
||||||
@ -921,7 +920,7 @@ func (h *handlerState) handleAuthCodeCallback(w http.ResponseWriter, r *http.Req
|
|||||||
} else {
|
} else {
|
||||||
// Return HTTP 405 for anything that's not a GET.
|
// Return HTTP 405 for anything that's not a GET.
|
||||||
if r.Method != http.MethodGet {
|
if r.Method != http.MethodGet {
|
||||||
h.logger.V(debugLogLevel).Info("Pinniped: Got unexpected request on callback listener", "method", r.Method)
|
h.logger.V(plog.KlogLevelDebug).Info("Pinniped: Got unexpected request on callback listener", "method", r.Method)
|
||||||
w.WriteHeader(http.StatusMethodNotAllowed)
|
w.WriteHeader(http.StatusMethodNotAllowed)
|
||||||
return nil // keep listening for more requests
|
return nil // keep listening for more requests
|
||||||
}
|
}
|
||||||
|
@ -20,10 +20,6 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"go.pinniped.dev/internal/net/phttp"
|
|
||||||
|
|
||||||
"go.pinniped.dev/internal/testutil/tlsserver"
|
|
||||||
|
|
||||||
"github.com/coreos/go-oidc/v3/oidc"
|
"github.com/coreos/go-oidc/v3/oidc"
|
||||||
"github.com/golang/mock/gomock"
|
"github.com/golang/mock/gomock"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
@ -35,9 +31,12 @@ import (
|
|||||||
"go.pinniped.dev/internal/httputil/httperr"
|
"go.pinniped.dev/internal/httputil/httperr"
|
||||||
"go.pinniped.dev/internal/httputil/roundtripper"
|
"go.pinniped.dev/internal/httputil/roundtripper"
|
||||||
"go.pinniped.dev/internal/mocks/mockupstreamoidcidentityprovider"
|
"go.pinniped.dev/internal/mocks/mockupstreamoidcidentityprovider"
|
||||||
|
"go.pinniped.dev/internal/net/phttp"
|
||||||
"go.pinniped.dev/internal/oidc/provider"
|
"go.pinniped.dev/internal/oidc/provider"
|
||||||
|
"go.pinniped.dev/internal/plog"
|
||||||
"go.pinniped.dev/internal/testutil"
|
"go.pinniped.dev/internal/testutil"
|
||||||
"go.pinniped.dev/internal/testutil/testlogger"
|
"go.pinniped.dev/internal/testutil/testlogger"
|
||||||
|
"go.pinniped.dev/internal/testutil/tlsserver"
|
||||||
"go.pinniped.dev/internal/upstreamoidc"
|
"go.pinniped.dev/internal/upstreamoidc"
|
||||||
"go.pinniped.dev/pkg/oidcclient/nonce"
|
"go.pinniped.dev/pkg/oidcclient/nonce"
|
||||||
"go.pinniped.dev/pkg/oidcclient/oidctypes"
|
"go.pinniped.dev/pkg/oidcclient/oidctypes"
|
||||||
@ -1891,7 +1890,7 @@ func TestLogin(t *testing.T) { // nolint:gocyclo
|
|||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
tt := tt
|
tt := tt
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
testLogger := testlogger.NewLegacy(t) //nolint: staticcheck // old test with lots of log statements
|
testLogger := testlogger.NewLegacy(t) // nolint: staticcheck // old test with lots of log statements
|
||||||
klog.SetLogger(testLogger.Logger)
|
klog.SetLogger(testLogger.Logger)
|
||||||
|
|
||||||
tok, err := Login(tt.issuer, tt.clientID,
|
tok, err := Login(tt.issuer, tt.clientID,
|
||||||
@ -2334,7 +2333,7 @@ func TestHandleAuthCodeCallback(t *testing.T) {
|
|||||||
state: state.State("test-state"),
|
state: state.State("test-state"),
|
||||||
pkce: pkce.Code("test-pkce"),
|
pkce: pkce.Code("test-pkce"),
|
||||||
nonce: nonce.Nonce("test-nonce"),
|
nonce: nonce.Nonce("test-nonce"),
|
||||||
logger: testlogger.New(t).Logger,
|
logger: plog.Logr(), // nolint: staticcheck // old test with no log assertions
|
||||||
issuer: "https://valid-issuer.com/with/some/path",
|
issuer: "https://valid-issuer.com/with/some/path",
|
||||||
}
|
}
|
||||||
if tt.opt != nil {
|
if tt.opt != nil {
|
||||||
|
Loading…
Reference in New Issue
Block a user