Merge branch 'main' into authorize_endpoint
This commit is contained in:
commit
b21c27b219
@ -38,6 +38,7 @@ import (
|
||||
"go.pinniped.dev/internal/controller/apicerts"
|
||||
"go.pinniped.dev/internal/controllerlib"
|
||||
"go.pinniped.dev/internal/dynamiccert"
|
||||
"go.pinniped.dev/internal/plog"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -92,11 +93,11 @@ func (w *webhook) start(ctx context.Context, l net.Listener) error {
|
||||
go func() {
|
||||
select {
|
||||
case err := <-errCh:
|
||||
klog.InfoS("server exited", "err", err)
|
||||
plog.Debug("server exited", "err", err)
|
||||
case <-ctx.Done():
|
||||
klog.InfoS("server context cancelled", "err", ctx.Err())
|
||||
plog.Debug("server context cancelled", "err", ctx.Err())
|
||||
if err := server.Shutdown(context.Background()); err != nil {
|
||||
klog.InfoS("server shutdown failed", "err", err)
|
||||
plog.Debug("server shutdown failed", "err", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
@ -114,13 +115,13 @@ func (w *webhook) ServeHTTP(rsp http.ResponseWriter, req *http.Request) {
|
||||
secret, err := w.secretInformer.Lister().Secrets(namespace).Get(username)
|
||||
notFound := k8serrors.IsNotFound(err)
|
||||
if err != nil && !notFound {
|
||||
klog.InfoS("could not get secret", "err", err)
|
||||
plog.Debug("could not get secret", "err", err)
|
||||
rsp.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if notFound {
|
||||
klog.InfoS("user not found")
|
||||
plog.Debug("user not found")
|
||||
respondWithUnauthenticated(rsp)
|
||||
return
|
||||
}
|
||||
@ -130,7 +131,7 @@ func (w *webhook) ServeHTTP(rsp http.ResponseWriter, req *http.Request) {
|
||||
[]byte(password),
|
||||
) == nil
|
||||
if !passwordMatches {
|
||||
klog.InfoS("authentication failed: wrong password")
|
||||
plog.Debug("authentication failed: wrong password")
|
||||
respondWithUnauthenticated(rsp)
|
||||
return
|
||||
}
|
||||
@ -141,32 +142,32 @@ func (w *webhook) ServeHTTP(rsp http.ResponseWriter, req *http.Request) {
|
||||
groupsCSVReader := csv.NewReader(groupsBuf)
|
||||
groups, err = groupsCSVReader.Read()
|
||||
if err != nil {
|
||||
klog.InfoS("could not read groups", "err", err)
|
||||
plog.Debug("could not read groups", "err", err)
|
||||
rsp.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
trimLeadingAndTrailingWhitespace(groups)
|
||||
}
|
||||
|
||||
klog.InfoS("successful authentication")
|
||||
plog.Debug("successful authentication")
|
||||
respondWithAuthenticated(rsp, secret.ObjectMeta.Name, string(secret.UID), groups)
|
||||
}
|
||||
|
||||
func getUsernameAndPasswordFromRequest(rsp http.ResponseWriter, req *http.Request) (string, string, error) {
|
||||
if req.URL.Path != "/authenticate" {
|
||||
klog.InfoS("received request path other than /authenticate", "path", req.URL.Path)
|
||||
plog.Debug("received request path other than /authenticate", "path", req.URL.Path)
|
||||
rsp.WriteHeader(http.StatusNotFound)
|
||||
return "", "", invalidRequest
|
||||
}
|
||||
|
||||
if req.Method != http.MethodPost {
|
||||
klog.InfoS("received request method other than post", "method", req.Method)
|
||||
plog.Debug("received request method other than post", "method", req.Method)
|
||||
rsp.WriteHeader(http.StatusMethodNotAllowed)
|
||||
return "", "", invalidRequest
|
||||
}
|
||||
|
||||
if !headerContains(req, "Content-Type", "application/json") {
|
||||
klog.InfoS("content type is not application/json", "Content-Type", req.Header.Values("Content-Type"))
|
||||
plog.Debug("content type is not application/json", "Content-Type", req.Header.Values("Content-Type"))
|
||||
rsp.WriteHeader(http.StatusUnsupportedMediaType)
|
||||
return "", "", invalidRequest
|
||||
}
|
||||
@ -174,39 +175,39 @@ func getUsernameAndPasswordFromRequest(rsp http.ResponseWriter, req *http.Reques
|
||||
if !headerContains(req, "Accept", "application/json") &&
|
||||
!headerContains(req, "Accept", "application/*") &&
|
||||
!headerContains(req, "Accept", "*/*") {
|
||||
klog.InfoS("client does not accept application/json", "Accept", req.Header.Values("Accept"))
|
||||
plog.Debug("client does not accept application/json", "Accept", req.Header.Values("Accept"))
|
||||
rsp.WriteHeader(http.StatusUnsupportedMediaType)
|
||||
return "", "", invalidRequest
|
||||
}
|
||||
|
||||
if req.Body == nil {
|
||||
klog.InfoS("invalid nil body")
|
||||
plog.Debug("invalid nil body")
|
||||
rsp.WriteHeader(http.StatusBadRequest)
|
||||
return "", "", invalidRequest
|
||||
}
|
||||
|
||||
var body authenticationv1beta1.TokenReview
|
||||
if err := json.NewDecoder(req.Body).Decode(&body); err != nil {
|
||||
klog.InfoS("failed to decode body", "err", err)
|
||||
plog.Debug("failed to decode body", "err", err)
|
||||
rsp.WriteHeader(http.StatusBadRequest)
|
||||
return "", "", invalidRequest
|
||||
}
|
||||
|
||||
if body.APIVersion != authenticationv1beta1.SchemeGroupVersion.String() {
|
||||
klog.InfoS("invalid TokenReview apiVersion", "apiVersion", body.APIVersion)
|
||||
plog.Debug("invalid TokenReview apiVersion", "apiVersion", body.APIVersion)
|
||||
rsp.WriteHeader(http.StatusBadRequest)
|
||||
return "", "", invalidRequest
|
||||
}
|
||||
|
||||
if body.Kind != "TokenReview" {
|
||||
klog.InfoS("invalid TokenReview kind", "kind", body.Kind)
|
||||
plog.Debug("invalid TokenReview kind", "kind", body.Kind)
|
||||
rsp.WriteHeader(http.StatusBadRequest)
|
||||
return "", "", invalidRequest
|
||||
}
|
||||
|
||||
tokenSegments := strings.SplitN(body.Spec.Token, ":", 2)
|
||||
if len(tokenSegments) != 2 {
|
||||
klog.InfoS("bad token format in request")
|
||||
plog.Debug("bad token format in request")
|
||||
rsp.WriteHeader(http.StatusBadRequest)
|
||||
return "", "", invalidRequest
|
||||
}
|
||||
@ -247,7 +248,7 @@ func respondWithUnauthenticated(rsp http.ResponseWriter) {
|
||||
},
|
||||
}
|
||||
if err := json.NewEncoder(rsp).Encode(body); err != nil {
|
||||
klog.InfoS("could not encode response", "err", err)
|
||||
plog.Debug("could not encode response", "err", err)
|
||||
rsp.WriteHeader(http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
@ -273,7 +274,7 @@ func respondWithAuthenticated(
|
||||
},
|
||||
}
|
||||
if err := json.NewEncoder(rsp).Encode(body); err != nil {
|
||||
klog.InfoS("could not encode response", "err", err)
|
||||
plog.Debug("could not encode response", "err", err)
|
||||
rsp.WriteHeader(http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
@ -372,7 +373,7 @@ func run() error {
|
||||
dynamicCertProvider := dynamiccert.New()
|
||||
|
||||
startControllers(ctx, dynamicCertProvider, kubeClient, kubeInformers)
|
||||
klog.InfoS("controllers are ready")
|
||||
plog.Debug("controllers are ready")
|
||||
|
||||
//nolint: gosec // Intentionally binding to all network interfaces.
|
||||
l, err := net.Listen("tcp", ":8443")
|
||||
@ -385,10 +386,10 @@ func run() error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot start webhook: %w", err)
|
||||
}
|
||||
klog.InfoS("webhook is ready", "address", l.Addr().String())
|
||||
plog.Debug("webhook is ready", "address", l.Addr().String())
|
||||
|
||||
gotSignal := waitForSignal()
|
||||
klog.InfoS("webhook exiting", "signal", gotSignal)
|
||||
plog.Debug("webhook exiting", "signal", gotSignal)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -32,6 +32,7 @@ import (
|
||||
"go.pinniped.dev/internal/oidc/jwks"
|
||||
"go.pinniped.dev/internal/oidc/provider"
|
||||
"go.pinniped.dev/internal/oidc/provider/manager"
|
||||
"go.pinniped.dev/internal/plog"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -50,11 +51,11 @@ func start(ctx context.Context, l net.Listener, handler http.Handler) {
|
||||
go func() {
|
||||
select {
|
||||
case err := <-errCh:
|
||||
klog.InfoS("server exited", "err", err)
|
||||
plog.Debug("server exited", "err", err)
|
||||
case <-ctx.Done():
|
||||
klog.InfoS("server context cancelled", "err", ctx.Err())
|
||||
plog.Debug("server context cancelled", "err", ctx.Err())
|
||||
if err := server.Shutdown(context.Background()); err != nil {
|
||||
klog.InfoS("server shutdown failed", "err", err)
|
||||
plog.Debug("server shutdown failed", "err", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
@ -212,7 +213,7 @@ func run(serverInstallationNamespace string, cfg *supervisor.Config) error {
|
||||
GetCertificate: func(info *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
cert := dynamicTLSCertProvider.GetTLSCert(strings.ToLower(info.ServerName))
|
||||
defaultCert := dynamicTLSCertProvider.GetDefaultTLSCert()
|
||||
klog.InfoS("GetCertificate called for port 8443",
|
||||
plog.Debug("GetCertificate called for port 8443",
|
||||
"info.ServerName", info.ServerName,
|
||||
"foundSNICert", cert != nil,
|
||||
"foundDefaultCert", defaultCert != nil,
|
||||
@ -229,13 +230,13 @@ func run(serverInstallationNamespace string, cfg *supervisor.Config) error {
|
||||
defer func() { _ = httpsListener.Close() }()
|
||||
start(ctx, httpsListener, oidProvidersManager)
|
||||
|
||||
klog.InfoS("supervisor is ready",
|
||||
plog.Debug("supervisor is ready",
|
||||
"httpAddress", httpListener.Addr().String(),
|
||||
"httpsAddress", httpsListener.Addr().String(),
|
||||
)
|
||||
|
||||
gotSignal := waitForSignal()
|
||||
klog.InfoS("supervisor exiting", "signal", gotSignal)
|
||||
plog.Debug("supervisor exiting", "signal", gotSignal)
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -243,6 +244,7 @@ func run(serverInstallationNamespace string, cfg *supervisor.Config) error {
|
||||
func main() {
|
||||
logs.InitLogs()
|
||||
defer logs.FlushLogs()
|
||||
plog.RemoveKlogGlobalFlags() // move this whenever the below code gets refactored to use cobra
|
||||
|
||||
klog.Infof("Running %s at %#v", rest.DefaultKubernetesUserAgent(), version.Get())
|
||||
klog.Infof("Command-line arguments were: %s %s %s", os.Args[0], os.Args[1], os.Args[2])
|
||||
|
@ -20,6 +20,7 @@ import (
|
||||
"go.pinniped.dev/internal/client"
|
||||
"go.pinniped.dev/internal/constable"
|
||||
"go.pinniped.dev/internal/here"
|
||||
"go.pinniped.dev/internal/plog"
|
||||
)
|
||||
|
||||
//nolint: gochecknoinits
|
||||
@ -80,6 +81,8 @@ func newExchangeCredentialCmd(args []string, stdout, stderr io.Writer) *exchange
|
||||
c.cmd.SetOut(stdout)
|
||||
c.cmd.SetErr(stderr)
|
||||
|
||||
plog.RemoveKlogGlobalFlags()
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
|
@ -120,7 +120,7 @@ func TestNewCredentialExchangeCmd(t *testing.T) {
|
||||
r.Equal(knownGoodHelpForExchangeCredential, stdout.String())
|
||||
r.Empty(stderr.String())
|
||||
})
|
||||
}, spec.Parallel(), spec.Report(report.Terminal{}))
|
||||
}, spec.Sequential(), spec.Report(report.Terminal{}))
|
||||
}
|
||||
|
||||
func TestExchangeCredential(t *testing.T) {
|
||||
|
@ -25,6 +25,7 @@ import (
|
||||
pinnipedclientset "go.pinniped.dev/generated/1.19/client/concierge/clientset/versioned"
|
||||
"go.pinniped.dev/internal/constable"
|
||||
"go.pinniped.dev/internal/here"
|
||||
"go.pinniped.dev/internal/plog"
|
||||
)
|
||||
|
||||
//nolint: gochecknoinits
|
||||
@ -91,6 +92,7 @@ func (c *getKubeConfigCommand) Command() *cobra.Command {
|
||||
cmd.Flags().StringVar(&c.flags.authenticatorType, "authenticator-type", c.flags.authenticatorType, "Authenticator type (e.g., 'webhook')")
|
||||
cmd.Flags().StringVar(&c.flags.authenticatorName, "authenticator-name", c.flags.authenticatorType, "Authenticator name")
|
||||
mustMarkRequired(cmd, "token")
|
||||
plog.RemoveKlogGlobalFlags()
|
||||
return cmd
|
||||
}
|
||||
|
||||
|
@ -72,7 +72,6 @@ var (
|
||||
)
|
||||
|
||||
func TestNewGetKubeConfigCmd(t *testing.T) {
|
||||
t.Parallel()
|
||||
tests := []struct {
|
||||
name string
|
||||
args []string
|
||||
@ -95,7 +94,6 @@ func TestNewGetKubeConfigCmd(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
cmd := newGetKubeConfigCommand().Command()
|
||||
require.NotNil(t, cmd)
|
||||
|
||||
|
@ -16,8 +16,6 @@ import (
|
||||
)
|
||||
|
||||
func TestLoginOIDCCommand(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cfgDir := mustGetConfigDir()
|
||||
|
||||
time1 := time.Date(3020, 10, 12, 13, 14, 15, 16, time.UTC)
|
||||
@ -88,7 +86,6 @@ func TestLoginOIDCCommand(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
var (
|
||||
gotIssuer string
|
||||
gotClientID string
|
||||
|
@ -36,7 +36,6 @@ var (
|
||||
)
|
||||
|
||||
func TestNewVersionCmd(t *testing.T) {
|
||||
t.Parallel()
|
||||
tests := []struct {
|
||||
name string
|
||||
args []string
|
||||
@ -64,7 +63,6 @@ func TestNewVersionCmd(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
cmd := newVersionCommand()
|
||||
require.NotNil(t, cmd)
|
||||
|
||||
|
@ -100,6 +100,7 @@ check_dependency ytt "Please install ytt. e.g. 'brew tap k14s/tap && brew instal
|
||||
check_dependency kapp "Please install kapp. e.g. 'brew tap k14s/tap && brew install kapp' for MacOS"
|
||||
check_dependency kubectl "Please install kubectl. e.g. 'brew install kubectl' for MacOS"
|
||||
check_dependency htpasswd "Please install htpasswd. Should be pre-installed on MacOS. Usually found in 'apache2-utils' package for linux."
|
||||
check_dependency chromedriver "Please install chromedriver. e.g. 'brew install chromedriver' for MacOS"
|
||||
|
||||
# Require kubectl >= 1.18.x
|
||||
if [ "$(kubectl version --client=true --short | cut -d '.' -f 2)" -lt 18 ]; then
|
||||
|
@ -15,10 +15,10 @@ import (
|
||||
"k8s.io/apiserver/pkg/registry/rest"
|
||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||
"k8s.io/client-go/pkg/version"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
loginapi "go.pinniped.dev/generated/1.19/apis/concierge/login"
|
||||
loginv1alpha1 "go.pinniped.dev/generated/1.19/apis/concierge/login/v1alpha1"
|
||||
"go.pinniped.dev/internal/plog"
|
||||
"go.pinniped.dev/internal/registry/credentialrequest"
|
||||
)
|
||||
|
||||
@ -111,7 +111,7 @@ func (c completedConfig) New() (*PinnipedServer, error) {
|
||||
|
||||
s.GenericAPIServer.AddPostStartHookOrDie("start-controllers",
|
||||
func(postStartContext genericapiserver.PostStartHookContext) error {
|
||||
klog.InfoS("start-controllers post start hook starting")
|
||||
plog.Debug("start-controllers post start hook starting")
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
go func() {
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
"go.pinniped.dev/internal/downward"
|
||||
"go.pinniped.dev/internal/dynamiccert"
|
||||
"go.pinniped.dev/internal/here"
|
||||
"go.pinniped.dev/internal/plog"
|
||||
"go.pinniped.dev/internal/registry/credentialrequest"
|
||||
)
|
||||
|
||||
@ -87,6 +88,8 @@ func addCommandlineFlagsToCommand(cmd *cobra.Command, app *App) {
|
||||
"/etc/podinfo",
|
||||
"path to Downward API volume mount",
|
||||
)
|
||||
|
||||
plog.RemoveKlogGlobalFlags()
|
||||
}
|
||||
|
||||
// Boot the aggregated API server, which will in turn boot the controllers.
|
||||
|
@ -23,10 +23,9 @@ Usage:
|
||||
pinniped-concierge [flags]
|
||||
|
||||
Flags:
|
||||
-c, --config string path to configuration file (default "pinniped.yaml")
|
||||
--downward-api-path string path to Downward API volume mount (default "/etc/podinfo")
|
||||
-h, --help help for pinniped-concierge
|
||||
--log-flush-frequency duration Maximum number of seconds between log flushes (default 5s)
|
||||
-c, --config string path to configuration file (default "pinniped.yaml")
|
||||
--downward-api-path string path to Downward API volume mount (default "/etc/podinfo")
|
||||
-h, --help help for pinniped-concierge
|
||||
`
|
||||
|
||||
func TestCommand(t *testing.T) {
|
||||
|
@ -13,6 +13,7 @@ import (
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
"go.pinniped.dev/internal/constable"
|
||||
"go.pinniped.dev/internal/plog"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -49,6 +50,10 @@ func FromPath(path string) (*Config, error) {
|
||||
return nil, fmt.Errorf("validate names: %w", err)
|
||||
}
|
||||
|
||||
if err := plog.ValidateAndSetLogLevelGlobally(config.LogLevel); err != nil {
|
||||
return nil, fmt.Errorf("validate log level: %w", err)
|
||||
}
|
||||
|
||||
if config.Labels == nil {
|
||||
config.Labels = make(map[string]string)
|
||||
}
|
||||
|
@ -3,6 +3,8 @@
|
||||
|
||||
package concierge
|
||||
|
||||
import "go.pinniped.dev/internal/plog"
|
||||
|
||||
// Config contains knobs to setup an instance of the Pinniped Concierge.
|
||||
type Config struct {
|
||||
DiscoveryInfo DiscoveryInfoSpec `json:"discovery"`
|
||||
@ -10,6 +12,7 @@ type Config struct {
|
||||
NamesConfig NamesConfigSpec `json:"names"`
|
||||
KubeCertAgentConfig KubeCertAgentSpec `json:"kubeCertAgent"`
|
||||
Labels map[string]string `json:"labels"`
|
||||
LogLevel plog.LogLevel `json:"logLevel"`
|
||||
}
|
||||
|
||||
// DiscoveryInfoSpec contains configuration knobs specific to
|
||||
|
@ -13,6 +13,7 @@ import (
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
"go.pinniped.dev/internal/constable"
|
||||
"go.pinniped.dev/internal/plog"
|
||||
)
|
||||
|
||||
// FromPath loads an Config from a provided local file path, inserts any
|
||||
@ -37,6 +38,10 @@ func FromPath(path string) (*Config, error) {
|
||||
return nil, fmt.Errorf("validate names: %w", err)
|
||||
}
|
||||
|
||||
if err := plog.ValidateAndSetLogLevelGlobally(config.LogLevel); err != nil {
|
||||
return nil, fmt.Errorf("validate log level: %w", err)
|
||||
}
|
||||
|
||||
return &config, nil
|
||||
}
|
||||
|
||||
|
@ -3,10 +3,13 @@
|
||||
|
||||
package supervisor
|
||||
|
||||
import "go.pinniped.dev/internal/plog"
|
||||
|
||||
// Config contains knobs to setup an instance of the Pinniped Supervisor.
|
||||
type Config struct {
|
||||
Labels map[string]string `json:"labels"`
|
||||
NamesConfig NamesConfigSpec `json:"names"`
|
||||
LogLevel plog.LogLevel `json:"logLevel"`
|
||||
}
|
||||
|
||||
// NamesConfigSpec configures the names of some Kubernetes resources for the Supervisor.
|
||||
|
@ -31,7 +31,7 @@ func New(cache *authncache.Cache, webhooks authinformers.WebhookAuthenticatorInf
|
||||
},
|
||||
controllerlib.WithInformer(
|
||||
webhooks,
|
||||
pinnipedcontroller.MatchAnythingFilter(),
|
||||
pinnipedcontroller.MatchAnythingFilter(pinnipedcontroller.SingletonQueue()),
|
||||
controllerlib.InformerOption{},
|
||||
),
|
||||
)
|
||||
|
@ -40,7 +40,7 @@ func New(cache *authncache.Cache, webhooks authinformers.WebhookAuthenticatorInf
|
||||
},
|
||||
controllerlib.WithInformer(
|
||||
webhooks,
|
||||
pinnipedcontroller.MatchAnythingFilter(),
|
||||
pinnipedcontroller.MatchAnythingFilter(nil), // nil parent func is fine because each event is distinct
|
||||
controllerlib.InformerOption{},
|
||||
),
|
||||
)
|
||||
|
@ -16,6 +16,7 @@ import (
|
||||
pinnipedclientset "go.pinniped.dev/generated/1.19/client/concierge/clientset/versioned"
|
||||
pinnipedcontroller "go.pinniped.dev/internal/controller"
|
||||
"go.pinniped.dev/internal/controllerlib"
|
||||
"go.pinniped.dev/internal/plog"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -75,7 +76,7 @@ func (c *kubeConigInfoPublisherController) Sync(ctx controllerlib.Context) error
|
||||
return fmt.Errorf("failed to get %s configmap: %w", clusterInfoName, err)
|
||||
}
|
||||
if notFound {
|
||||
klog.InfoS(
|
||||
plog.Debug(
|
||||
"could not find config map",
|
||||
"configmap",
|
||||
klog.KRef(ClusterInfoNamespace, clusterInfoName),
|
||||
@ -85,13 +86,13 @@ func (c *kubeConigInfoPublisherController) Sync(ctx controllerlib.Context) error
|
||||
|
||||
kubeConfig, kubeConfigPresent := configMap.Data[clusterInfoConfigMapKey]
|
||||
if !kubeConfigPresent {
|
||||
klog.InfoS("could not find kubeconfig configmap key")
|
||||
plog.Debug("could not find kubeconfig configmap key")
|
||||
return nil
|
||||
}
|
||||
|
||||
config, err := clientcmd.Load([]byte(kubeConfig))
|
||||
if err != nil {
|
||||
klog.InfoS("could not load kubeconfig configmap key")
|
||||
plog.Debug("could not load kubeconfig configmap key")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -19,6 +19,7 @@ import (
|
||||
pinnipedclientset "go.pinniped.dev/generated/1.19/client/concierge/clientset/versioned"
|
||||
pinnipedcontroller "go.pinniped.dev/internal/controller"
|
||||
"go.pinniped.dev/internal/controllerlib"
|
||||
"go.pinniped.dev/internal/plog"
|
||||
)
|
||||
|
||||
// These constants are the default values for the kube-controller-manager flags. If the flags are
|
||||
@ -71,12 +72,12 @@ func NewAnnotaterController(
|
||||
},
|
||||
withInformer(
|
||||
kubeSystemPodInformer,
|
||||
pinnipedcontroller.SimpleFilter(isControllerManagerPod),
|
||||
pinnipedcontroller.SimpleFilterWithSingletonQueue(isControllerManagerPod),
|
||||
controllerlib.InformerOption{},
|
||||
),
|
||||
withInformer(
|
||||
agentPodInformer,
|
||||
pinnipedcontroller.SimpleFilter(isAgentPod),
|
||||
pinnipedcontroller.SimpleFilterWithSingletonQueue(isAgentPod),
|
||||
controllerlib.InformerOption{},
|
||||
),
|
||||
)
|
||||
@ -177,7 +178,7 @@ func (c *annotaterController) reallyUpdateAgentPod(
|
||||
updatedAgentPod.Annotations[agentPodCertPathAnnotationKey] = certPath
|
||||
updatedAgentPod.Annotations[agentPodKeyPathAnnotationKey] = keyPath
|
||||
|
||||
klog.InfoS(
|
||||
plog.Debug(
|
||||
"updating agent pod annotations",
|
||||
"pod",
|
||||
klog.KObj(updatedAgentPod),
|
||||
|
@ -18,6 +18,7 @@ import (
|
||||
"go.pinniped.dev/internal/constable"
|
||||
pinnipedcontroller "go.pinniped.dev/internal/controller"
|
||||
"go.pinniped.dev/internal/controllerlib"
|
||||
"go.pinniped.dev/internal/plog"
|
||||
)
|
||||
|
||||
type createrController struct {
|
||||
@ -65,12 +66,12 @@ func NewCreaterController(
|
||||
},
|
||||
withInformer(
|
||||
kubeSystemPodInformer,
|
||||
pinnipedcontroller.SimpleFilter(isControllerManagerPod),
|
||||
pinnipedcontroller.SimpleFilterWithSingletonQueue(isControllerManagerPod),
|
||||
controllerlib.InformerOption{},
|
||||
),
|
||||
withInformer(
|
||||
agentPodInformer,
|
||||
pinnipedcontroller.SimpleFilter(isAgentPod),
|
||||
pinnipedcontroller.SimpleFilterWithSingletonQueue(isAgentPod),
|
||||
controllerlib.InformerOption{},
|
||||
),
|
||||
// Be sure to run once even to make sure the CI is updated if there are no controller manager
|
||||
@ -118,7 +119,7 @@ func (c *createrController) Sync(ctx controllerlib.Context) error {
|
||||
if agentPod == nil {
|
||||
agentPod = c.agentPodConfig.newAgentPod(controllerManagerPod)
|
||||
|
||||
klog.InfoS(
|
||||
plog.Debug(
|
||||
"creating agent pod",
|
||||
"pod",
|
||||
klog.KObj(agentPod),
|
||||
|
@ -13,6 +13,7 @@ import (
|
||||
|
||||
pinnipedcontroller "go.pinniped.dev/internal/controller"
|
||||
"go.pinniped.dev/internal/controllerlib"
|
||||
"go.pinniped.dev/internal/plog"
|
||||
)
|
||||
|
||||
type deleterController struct {
|
||||
@ -43,12 +44,12 @@ func NewDeleterController(
|
||||
},
|
||||
withInformer(
|
||||
kubeSystemPodInformer,
|
||||
pinnipedcontroller.SimpleFilter(isControllerManagerPod),
|
||||
pinnipedcontroller.SimpleFilterWithSingletonQueue(isControllerManagerPod),
|
||||
controllerlib.InformerOption{},
|
||||
),
|
||||
withInformer(
|
||||
agentPodInformer,
|
||||
pinnipedcontroller.SimpleFilter(isAgentPod),
|
||||
pinnipedcontroller.SimpleFilterWithSingletonQueue(isAgentPod),
|
||||
controllerlib.InformerOption{},
|
||||
),
|
||||
)
|
||||
@ -71,7 +72,7 @@ func (c *deleterController) Sync(ctx controllerlib.Context) error {
|
||||
}
|
||||
if controllerManagerPod == nil ||
|
||||
!isAgentPodUpToDate(agentPod, c.agentPodConfig.newAgentPod(controllerManagerPod)) {
|
||||
klog.InfoS("deleting agent pod", "pod", klog.KObj(agentPod))
|
||||
plog.Debug("deleting agent pod", "pod", klog.KObj(agentPod))
|
||||
err := c.k8sClient.
|
||||
CoreV1().
|
||||
Pods(agentPod.Namespace).
|
||||
|
@ -56,7 +56,7 @@ func NewExecerController(
|
||||
},
|
||||
withInformer(
|
||||
agentPodInformer,
|
||||
pinnipedcontroller.SimpleFilter(isAgentPod),
|
||||
pinnipedcontroller.SimpleFilter(isAgentPod, nil), // nil parent func is fine because each event is distinct
|
||||
controllerlib.InformerOption{},
|
||||
),
|
||||
)
|
||||
|
@ -23,11 +23,11 @@ import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
configv1alpha1 "go.pinniped.dev/generated/1.19/apis/concierge/config/v1alpha1"
|
||||
pinnipedclientset "go.pinniped.dev/generated/1.19/client/concierge/clientset/versioned"
|
||||
"go.pinniped.dev/internal/controller/issuerconfig"
|
||||
"go.pinniped.dev/internal/plog"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -257,13 +257,13 @@ func findControllerManagerPodForSpecificAgentPod(
|
||||
) (*corev1.Pod, error) {
|
||||
name, ok := agentPod.Annotations[controllerManagerNameAnnotationKey]
|
||||
if !ok {
|
||||
klog.InfoS("agent pod missing parent name annotation", "pod", agentPod.Name)
|
||||
plog.Debug("agent pod missing parent name annotation", "pod", agentPod.Name)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
uid, ok := agentPod.Annotations[controllerManagerUIDAnnotationKey]
|
||||
if !ok {
|
||||
klog.InfoS("agent pod missing parent uid annotation", "pod", agentPod.Name)
|
||||
plog.Debug("agent pod missing parent uid annotation", "pod", agentPod.Name)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
@ -10,11 +10,11 @@ import (
|
||||
"gopkg.in/square/go-jose.v2"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"go.pinniped.dev/generated/1.19/client/supervisor/informers/externalversions/config/v1alpha1"
|
||||
pinnipedcontroller "go.pinniped.dev/internal/controller"
|
||||
"go.pinniped.dev/internal/controllerlib"
|
||||
"go.pinniped.dev/internal/plog"
|
||||
)
|
||||
|
||||
type jwksObserverController struct {
|
||||
@ -49,12 +49,12 @@ func NewJWKSObserverController(
|
||||
},
|
||||
withInformer(
|
||||
secretInformer,
|
||||
pinnipedcontroller.MatchAnythingFilter(),
|
||||
pinnipedcontroller.MatchAnythingFilter(nil),
|
||||
controllerlib.InformerOption{},
|
||||
),
|
||||
withInformer(
|
||||
oidcProviderInformer,
|
||||
pinnipedcontroller.MatchAnythingFilter(),
|
||||
pinnipedcontroller.MatchAnythingFilter(nil),
|
||||
controllerlib.InformerOption{},
|
||||
),
|
||||
)
|
||||
@ -75,19 +75,19 @@ func (c *jwksObserverController) Sync(ctx controllerlib.Context) error {
|
||||
secretRef := provider.Status.JWKSSecret
|
||||
jwksSecret, err := c.secretInformer.Lister().Secrets(ns).Get(secretRef.Name)
|
||||
if err != nil {
|
||||
klog.InfoS("jwksObserverController Sync could not find JWKS secret", "namespace", ns, "secretName", secretRef.Name)
|
||||
plog.Debug("jwksObserverController Sync could not find JWKS secret", "namespace", ns, "secretName", secretRef.Name)
|
||||
continue
|
||||
}
|
||||
jwkFromSecret := jose.JSONWebKeySet{}
|
||||
err = json.Unmarshal(jwksSecret.Data[jwksKey], &jwkFromSecret)
|
||||
if err != nil {
|
||||
klog.InfoS("jwksObserverController Sync found a JWKS secret with Data in an unexpected format", "namespace", ns, "secretName", secretRef.Name)
|
||||
plog.Debug("jwksObserverController Sync found a JWKS secret with Data in an unexpected format", "namespace", ns, "secretName", secretRef.Name)
|
||||
continue
|
||||
}
|
||||
issuerToJWKSMap[provider.Spec.Issuer] = &jwkFromSecret
|
||||
}
|
||||
|
||||
klog.InfoS("jwksObserverController Sync updated the JWKS cache", "issuerCount", len(issuerToJWKSMap))
|
||||
plog.Debug("jwksObserverController Sync updated the JWKS cache", "issuerCount", len(issuerToJWKSMap))
|
||||
c.issuerToJWKSSetter.SetIssuerToJWKSMap(issuerToJWKSMap)
|
||||
|
||||
return nil
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
configinformers "go.pinniped.dev/generated/1.19/client/supervisor/informers/externalversions/config/v1alpha1"
|
||||
pinnipedcontroller "go.pinniped.dev/internal/controller"
|
||||
"go.pinniped.dev/internal/controllerlib"
|
||||
"go.pinniped.dev/internal/plog"
|
||||
)
|
||||
|
||||
// These constants are the keys in an OPC's Secret's Data map.
|
||||
@ -110,7 +111,7 @@ func NewJWKSWriterController(
|
||||
// We want to be notified when anything happens to an OPC.
|
||||
withInformer(
|
||||
opcInformer,
|
||||
pinnipedcontroller.MatchAnythingFilter(),
|
||||
pinnipedcontroller.MatchAnythingFilter(nil), // nil parent func is fine because each event is distinct
|
||||
controllerlib.InformerOption{},
|
||||
),
|
||||
)
|
||||
@ -132,7 +133,7 @@ func (c *jwksWriterController) Sync(ctx controllerlib.Context) error {
|
||||
if notFound {
|
||||
// The corresponding secret to this OPC should have been garbage collected since it should have
|
||||
// had this OPC as its owner.
|
||||
klog.InfoS(
|
||||
plog.Debug(
|
||||
"oidcprovider deleted",
|
||||
"oidcprovider",
|
||||
klog.KRef(ctx.Key.Namespace, ctx.Key.Name),
|
||||
@ -146,7 +147,7 @@ func (c *jwksWriterController) Sync(ctx controllerlib.Context) error {
|
||||
}
|
||||
if !secretNeedsUpdate {
|
||||
// Secret is up to date - we are good to go.
|
||||
klog.InfoS(
|
||||
plog.Debug(
|
||||
"secret is up to date",
|
||||
"oidcprovider",
|
||||
klog.KRef(ctx.Key.Namespace, ctx.Key.Name),
|
||||
@ -164,7 +165,7 @@ func (c *jwksWriterController) Sync(ctx controllerlib.Context) error {
|
||||
if err := c.createOrUpdateSecret(ctx.Context, secret); err != nil {
|
||||
return fmt.Errorf("cannot create or update secret: %w", err)
|
||||
}
|
||||
klog.InfoS("created/updated secret", "secret", klog.KObj(secret))
|
||||
plog.Debug("created/updated secret", "secret", klog.KObj(secret))
|
||||
|
||||
// Ensure that the OPC points to the secret.
|
||||
newOPC := opc.DeepCopy()
|
||||
@ -172,7 +173,7 @@ func (c *jwksWriterController) Sync(ctx controllerlib.Context) error {
|
||||
if err := c.updateOPC(ctx.Context, newOPC); err != nil {
|
||||
return fmt.Errorf("cannot update opc: %w", err)
|
||||
}
|
||||
klog.InfoS("updated oidcprovider", "oidcprovider", klog.KObj(newOPC))
|
||||
plog.Debug("updated oidcprovider", "oidcprovider", klog.KObj(newOPC))
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -323,45 +324,45 @@ func isOPCControllee(obj metav1.Object) bool {
|
||||
func isValid(secret *corev1.Secret) bool {
|
||||
jwkData, ok := secret.Data[activeJWKKey]
|
||||
if !ok {
|
||||
klog.InfoS("secret does not contain active jwk")
|
||||
plog.Debug("secret does not contain active jwk")
|
||||
return false
|
||||
}
|
||||
|
||||
var activeJWK jose.JSONWebKey
|
||||
if err := json.Unmarshal(jwkData, &activeJWK); err != nil {
|
||||
klog.InfoS("cannot unmarshal active jwk", "err", err)
|
||||
plog.Debug("cannot unmarshal active jwk", "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
if activeJWK.IsPublic() {
|
||||
klog.InfoS("active jwk is public", "keyid", activeJWK.KeyID)
|
||||
plog.Debug("active jwk is public", "keyid", activeJWK.KeyID)
|
||||
return false
|
||||
}
|
||||
|
||||
if !activeJWK.Valid() {
|
||||
klog.InfoS("active jwk is not valid", "keyid", activeJWK.KeyID)
|
||||
plog.Debug("active jwk is not valid", "keyid", activeJWK.KeyID)
|
||||
return false
|
||||
}
|
||||
|
||||
jwksData, ok := secret.Data[jwksKey]
|
||||
if !ok {
|
||||
klog.InfoS("secret does not contain valid jwks")
|
||||
plog.Debug("secret does not contain valid jwks")
|
||||
}
|
||||
|
||||
var validJWKS jose.JSONWebKeySet
|
||||
if err := json.Unmarshal(jwksData, &validJWKS); err != nil {
|
||||
klog.InfoS("cannot unmarshal valid jwks", "err", err)
|
||||
plog.Debug("cannot unmarshal valid jwks", "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
foundActiveJWK := false
|
||||
for _, validJWK := range validJWKS.Keys {
|
||||
if !validJWK.IsPublic() {
|
||||
klog.InfoS("jwks key is not public", "keyid", validJWK.KeyID)
|
||||
plog.Debug("jwks key is not public", "keyid", validJWK.KeyID)
|
||||
return false
|
||||
}
|
||||
if !validJWK.Valid() {
|
||||
klog.InfoS("jwks key is not valid", "keyid", validJWK.KeyID)
|
||||
plog.Debug("jwks key is not valid", "keyid", validJWK.KeyID)
|
||||
return false
|
||||
}
|
||||
if validJWK.KeyID == activeJWK.KeyID {
|
||||
@ -370,7 +371,7 @@ func isValid(secret *corev1.Secret) bool {
|
||||
}
|
||||
|
||||
if !foundActiveJWK {
|
||||
klog.InfoS("did not find active jwk in valid jwks", "keyid", activeJWK.KeyID)
|
||||
plog.Debug("did not find active jwk in valid jwks", "keyid", activeJWK.KeyID)
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -9,8 +9,6 @@ import (
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"go.pinniped.dev/internal/multierror"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
@ -22,7 +20,9 @@ import (
|
||||
configinformers "go.pinniped.dev/generated/1.19/client/supervisor/informers/externalversions/config/v1alpha1"
|
||||
pinnipedcontroller "go.pinniped.dev/internal/controller"
|
||||
"go.pinniped.dev/internal/controllerlib"
|
||||
"go.pinniped.dev/internal/multierror"
|
||||
"go.pinniped.dev/internal/oidc/provider"
|
||||
"go.pinniped.dev/internal/plog"
|
||||
)
|
||||
|
||||
// ProvidersSetter can be notified of all known valid providers with its SetIssuer function.
|
||||
@ -60,7 +60,7 @@ func NewOIDCProviderWatcherController(
|
||||
},
|
||||
withInformer(
|
||||
opcInformer,
|
||||
pinnipedcontroller.MatchAnythingFilter(),
|
||||
pinnipedcontroller.MatchAnythingFilter(pinnipedcontroller.SingletonQueue()),
|
||||
controllerlib.InformerOption{},
|
||||
),
|
||||
)
|
||||
@ -191,7 +191,7 @@ func (c *oidcProviderWatcherController) updateStatus(
|
||||
return nil
|
||||
}
|
||||
|
||||
klog.InfoS(
|
||||
plog.Debug(
|
||||
"attempting status update",
|
||||
"openidproviderconfig",
|
||||
klog.KRef(namespace, name),
|
||||
|
@ -11,11 +11,11 @@ import (
|
||||
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"go.pinniped.dev/generated/1.19/client/supervisor/informers/externalversions/config/v1alpha1"
|
||||
pinnipedcontroller "go.pinniped.dev/internal/controller"
|
||||
"go.pinniped.dev/internal/controllerlib"
|
||||
"go.pinniped.dev/internal/plog"
|
||||
)
|
||||
|
||||
type tlsCertObserverController struct {
|
||||
@ -49,12 +49,12 @@ func NewTLSCertObserverController(
|
||||
},
|
||||
withInformer(
|
||||
secretInformer,
|
||||
pinnipedcontroller.MatchAnythingFilter(),
|
||||
pinnipedcontroller.MatchAnythingFilter(nil),
|
||||
controllerlib.InformerOption{},
|
||||
),
|
||||
withInformer(
|
||||
oidcProviderInformer,
|
||||
pinnipedcontroller.MatchAnythingFilter(),
|
||||
pinnipedcontroller.MatchAnythingFilter(nil),
|
||||
controllerlib.InformerOption{},
|
||||
),
|
||||
)
|
||||
@ -78,7 +78,7 @@ func (c *tlsCertObserverController) Sync(ctx controllerlib.Context) error {
|
||||
}
|
||||
issuerURL, err := url.Parse(provider.Spec.Issuer)
|
||||
if err != nil {
|
||||
klog.InfoS("tlsCertObserverController Sync found an invalid issuer URL", "namespace", ns, "issuer", provider.Spec.Issuer)
|
||||
plog.Debug("tlsCertObserverController Sync found an invalid issuer URL", "namespace", ns, "issuer", provider.Spec.Issuer)
|
||||
continue
|
||||
}
|
||||
certFromSecret, err := c.certFromSecret(ns, secretName)
|
||||
@ -89,7 +89,7 @@ func (c *tlsCertObserverController) Sync(ctx controllerlib.Context) error {
|
||||
issuerHostToTLSCertMap[lowercaseHostWithoutPort(issuerURL)] = certFromSecret
|
||||
}
|
||||
|
||||
klog.InfoS("tlsCertObserverController Sync updated the TLS cert cache", "issuerHostCount", len(issuerHostToTLSCertMap))
|
||||
plog.Debug("tlsCertObserverController Sync updated the TLS cert cache", "issuerHostCount", len(issuerHostToTLSCertMap))
|
||||
c.issuerTLSCertSetter.SetIssuerHostToTLSCertMap(issuerHostToTLSCertMap)
|
||||
|
||||
defaultCert, err := c.certFromSecret(ns, c.defaultTLSCertificateSecretName)
|
||||
@ -105,12 +105,12 @@ func (c *tlsCertObserverController) Sync(ctx controllerlib.Context) error {
|
||||
func (c *tlsCertObserverController) certFromSecret(ns string, secretName string) (*tls.Certificate, error) {
|
||||
tlsSecret, err := c.secretInformer.Lister().Secrets(ns).Get(secretName)
|
||||
if err != nil {
|
||||
klog.InfoS("tlsCertObserverController Sync could not find TLS cert secret", "namespace", ns, "secretName", secretName)
|
||||
plog.Debug("tlsCertObserverController Sync could not find TLS cert secret", "namespace", ns, "secretName", secretName)
|
||||
return nil, err
|
||||
}
|
||||
certFromSecret, err := tls.X509KeyPair(tlsSecret.Data["tls.crt"], tlsSecret.Data["tls.key"])
|
||||
if err != nil {
|
||||
klog.InfoS("tlsCertObserverController Sync found a TLS secret with Data in an unexpected format", "namespace", ns, "secretName", secretName)
|
||||
plog.Debug("tlsCertObserverController Sync found a TLS secret with Data in an unexpected format", "namespace", ns, "secretName", secretName)
|
||||
return nil, err
|
||||
}
|
||||
return &certFromSecret, nil
|
||||
|
@ -12,23 +12,38 @@ import (
|
||||
func NameAndNamespaceExactMatchFilterFactory(name, namespace string) controllerlib.Filter {
|
||||
return SimpleFilter(func(obj metav1.Object) bool {
|
||||
return obj.GetName() == name && obj.GetNamespace() == namespace
|
||||
})
|
||||
// nil parent func is fine because we only match a key with the given name and namespace
|
||||
// i.e. it is equivalent to having a SingletonQueue() parent func
|
||||
}, nil)
|
||||
}
|
||||
|
||||
// MatchAnythingFilter returns a controllerlib.Filter that allows all objects.
|
||||
func MatchAnythingFilter() controllerlib.Filter {
|
||||
return SimpleFilter(func(object metav1.Object) bool { return true })
|
||||
func MatchAnythingFilter(parentFunc controllerlib.ParentFunc) controllerlib.Filter {
|
||||
return SimpleFilter(func(object metav1.Object) bool { return true }, parentFunc)
|
||||
}
|
||||
|
||||
// SimpleFilter takes a single boolean match function on a metav1.Object and wraps it into a proper controllerlib.Filter.
|
||||
func SimpleFilter(match func(metav1.Object) bool) controllerlib.Filter {
|
||||
func SimpleFilter(match func(metav1.Object) bool, parentFunc controllerlib.ParentFunc) controllerlib.Filter {
|
||||
return controllerlib.FilterFuncs{
|
||||
AddFunc: match,
|
||||
UpdateFunc: func(oldObj, newObj metav1.Object) bool { return match(oldObj) || match(newObj) },
|
||||
DeleteFunc: match,
|
||||
ParentFunc: parentFunc,
|
||||
}
|
||||
}
|
||||
|
||||
// SingletonQueue returns a parent func that treats all events as the same key.
|
||||
func SingletonQueue() controllerlib.ParentFunc {
|
||||
return func(_ metav1.Object) controllerlib.Key {
|
||||
return controllerlib.Key{}
|
||||
}
|
||||
}
|
||||
|
||||
// SimpleFilterWithSingletonQueue returns a Filter based on the given match function that treats all events as the same key.
|
||||
func SimpleFilterWithSingletonQueue(match func(metav1.Object) bool) controllerlib.Filter {
|
||||
return SimpleFilter(match, SingletonQueue())
|
||||
}
|
||||
|
||||
// Same signature as controllerlib.WithInformer().
|
||||
type WithInformerOptionFunc func(
|
||||
getter controllerlib.InformerGetter,
|
||||
|
@ -16,6 +16,8 @@ import (
|
||||
"k8s.io/client-go/tools/events"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"go.pinniped.dev/internal/plog"
|
||||
)
|
||||
|
||||
// Controller interface represents a runnable Kubernetes controller.
|
||||
@ -87,7 +89,7 @@ type controller struct {
|
||||
func (c *controller) Run(ctx context.Context, workers int) {
|
||||
defer utilruntime.HandleCrash(crash) // prevent panics from killing the process
|
||||
|
||||
klog.InfoS("starting controller", "controller", c.Name(), "workers", workers)
|
||||
plog.Debug("starting controller", "controller", c.Name(), "workers", workers)
|
||||
|
||||
c.invokeAllRunOpts()
|
||||
|
||||
@ -108,17 +110,17 @@ func (c *controller) Run(ctx context.Context, workers int) {
|
||||
// at this point the Run() can hang and callers have to implement the logic that will kill
|
||||
// this controller (SIGKILL).
|
||||
workerWg.Wait()
|
||||
klog.InfoS("all workers have been terminated, shutting down", "controller", c.Name(), "workers", workers)
|
||||
plog.Debug("all workers have been terminated, shutting down", "controller", c.Name(), "workers", workers)
|
||||
}()
|
||||
|
||||
for i := 1; i <= workers; i++ {
|
||||
idx := i
|
||||
klog.InfoS("starting worker", "controller", c.Name(), "worker", idx)
|
||||
plog.Debug("starting worker", "controller", c.Name(), "worker", idx)
|
||||
workerWg.Add(1)
|
||||
go func() {
|
||||
defer utilruntime.HandleCrash(crash) // prevent panics from killing the process
|
||||
defer func() {
|
||||
klog.InfoS("shutting down worker", "controller", c.Name(), "worker", idx)
|
||||
plog.Debug("shutting down worker", "controller", c.Name(), "worker", idx)
|
||||
workerWg.Done()
|
||||
}()
|
||||
c.runWorker(workerContext)
|
||||
|
@ -7,7 +7,7 @@ import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
"go.pinniped.dev/internal/plog"
|
||||
)
|
||||
|
||||
type Manager interface {
|
||||
@ -48,7 +48,7 @@ func (c *controllerManager) Start(ctx context.Context) {
|
||||
idx := i
|
||||
go func() {
|
||||
r := c.controllers[idx]
|
||||
defer klog.InfoS("controller terminated", "controller", r.controller.Name())
|
||||
defer plog.Debug("controller terminated", "controller", r.controller.Name())
|
||||
defer wg.Done()
|
||||
r.controller.Run(ctx, r.workers)
|
||||
}()
|
||||
|
@ -10,7 +10,6 @@ import (
|
||||
|
||||
"github.com/ory/fosite"
|
||||
"github.com/ory/fosite/storage"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"go.pinniped.dev/internal/oidc"
|
||||
"go.pinniped.dev/internal/oidc/auth"
|
||||
@ -20,6 +19,7 @@ import (
|
||||
"go.pinniped.dev/internal/oidcclient/nonce"
|
||||
"go.pinniped.dev/internal/oidcclient/pkce"
|
||||
"go.pinniped.dev/internal/oidcclient/state"
|
||||
"go.pinniped.dev/internal/plog"
|
||||
)
|
||||
|
||||
// Manager can manage multiple active OIDC providers. It acts as a request router for them.
|
||||
@ -81,7 +81,7 @@ func (m *Manager) SetProviders(oidcProviders ...*provider.OIDCProvider) {
|
||||
authURL := strings.ToLower(incomingProvider.IssuerHost()) + "/" + incomingProvider.IssuerPath() + oidc.AuthorizationEndpointPath
|
||||
m.providerHandlers[authURL] = auth.NewHandler(incomingProvider.Issuer(), m.idpListGetter, oauthHelper, state.Generate, pkce.Generate, nonce.Generate)
|
||||
|
||||
klog.InfoS("oidc provider manager added or updated issuer", "issuer", incomingProvider.Issuer())
|
||||
plog.Debug("oidc provider manager added or updated issuer", "issuer", incomingProvider.Issuer())
|
||||
}
|
||||
}
|
||||
|
||||
@ -89,7 +89,7 @@ func (m *Manager) SetProviders(oidcProviders ...*provider.OIDCProvider) {
|
||||
func (m *Manager) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
|
||||
requestHandler := m.findHandler(req)
|
||||
|
||||
klog.InfoS(
|
||||
plog.Debug(
|
||||
"oidc provider manager examining request",
|
||||
"method", req.Method,
|
||||
"host", req.Host,
|
||||
|
22
internal/plog/doc.go
Normal file
22
internal/plog/doc.go
Normal file
@ -0,0 +1,22 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Package plog implements a thin layer over klog to help enforce pinniped's logging convention.
|
||||
// Logs are always structured as a constant message with key and value pairs of related metadata.
|
||||
// The logging levels in order of increasing verbosity are:
|
||||
// error, warning, info, debug, trace and all.
|
||||
// error and warning logs are always emitted (there is no way for the end user to disable them),
|
||||
// and thus should be used sparingly. Ideally, logs at these levels should be actionable.
|
||||
// info should be reserved for "nice to know" information. It should be possible to run a production
|
||||
// pinniped server at the info log level with no performance degradation due to high log volume.
|
||||
// debug should be used for information targeted at developers and to aid in support cases. Care must
|
||||
// be taken at this level to not leak any secrets into the log stream. That is, even though debug may
|
||||
// cause performance issues in production, it must not cause security issues in production.
|
||||
// trace should be used to log information related to timing (i.e. the time it took a controller to sync).
|
||||
// Just like debug, trace should not leak secrets into the log stream. trace will likely leak information
|
||||
// about the current state of the process, but that, along with performance degradation, is expected.
|
||||
// all is reserved for the most verbose and security sensitive information. At this level, full request
|
||||
// metadata such as headers and parameters along with the body may be logged. This level is completely
|
||||
// unfit for production use both from a performance and security standpoint. Using it is generally an
|
||||
// act of desperation to determine why the system is broken.
|
||||
package plog
|
32
internal/plog/klog.go
Normal file
32
internal/plog/klog.go
Normal file
@ -0,0 +1,32 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package plog
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
//nolint: gochecknoglobals
|
||||
var removeKlogGlobalFlagsLock sync.Mutex
|
||||
|
||||
// RemoveKlogGlobalFlags attempts to "remove" flags that get unconditionally added by importing klog.
|
||||
func RemoveKlogGlobalFlags() {
|
||||
// since we mess with global state, we need a lock to synchronize us when called in parallel during tests
|
||||
removeKlogGlobalFlagsLock.Lock()
|
||||
defer removeKlogGlobalFlagsLock.Unlock()
|
||||
|
||||
// if this function starts to panic, it likely means that klog stopped mucking with global flags
|
||||
const globalLogFlushFlag = "log-flush-frequency"
|
||||
if err := pflag.CommandLine.MarkHidden(globalLogFlushFlag); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := pflag.CommandLine.MarkDeprecated(globalLogFlushFlag, "unsupported"); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if pflag.CommandLine.Changed(globalLogFlushFlag) {
|
||||
panic("unsupported global klog flag set")
|
||||
}
|
||||
}
|
64
internal/plog/level.go
Normal file
64
internal/plog/level.go
Normal file
@ -0,0 +1,64 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package plog
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"k8s.io/component-base/logs"
|
||||
|
||||
"go.pinniped.dev/internal/constable"
|
||||
)
|
||||
|
||||
// LogLevel is an enum that controls verbosity of logs.
|
||||
// Valid values in order of increasing verbosity are leaving it unset, info, debug, trace and all.
|
||||
type LogLevel string
|
||||
|
||||
const (
|
||||
// LevelWarning (i.e. leaving the log level unset) maps to klog log level 0.
|
||||
LevelWarning LogLevel = ""
|
||||
// LevelInfo maps to klog log level 2.
|
||||
LevelInfo LogLevel = "info"
|
||||
// LevelDebug maps to klog log level 4.
|
||||
LevelDebug LogLevel = "debug"
|
||||
// LevelTrace maps to klog log level 6.
|
||||
LevelTrace LogLevel = "trace"
|
||||
// LevelAll maps to klog log level 100 (conceptually it is log level 8).
|
||||
LevelAll LogLevel = "all"
|
||||
|
||||
errInvalidLogLevel = constable.Error("invalid log level, valid choices are the empty string, info, debug, trace and all")
|
||||
)
|
||||
|
||||
const (
|
||||
klogLevelWarning = iota * 2
|
||||
klogLevelInfo
|
||||
klogLevelDebug
|
||||
klogLevelTrace
|
||||
klogLevelAll
|
||||
)
|
||||
|
||||
func ValidateAndSetLogLevelGlobally(level LogLevel) error {
|
||||
var klogLogLevel int
|
||||
|
||||
switch level {
|
||||
case LevelWarning:
|
||||
klogLogLevel = klogLevelWarning // unset means minimal logs (Error and Warning)
|
||||
case LevelInfo:
|
||||
klogLogLevel = klogLevelInfo
|
||||
case LevelDebug:
|
||||
klogLogLevel = klogLevelDebug
|
||||
case LevelTrace:
|
||||
klogLogLevel = klogLevelTrace
|
||||
case LevelAll:
|
||||
klogLogLevel = klogLevelAll + 100 // make all really mean all
|
||||
default:
|
||||
return errInvalidLogLevel
|
||||
}
|
||||
|
||||
if _, err := logs.GlogSetter(strconv.Itoa(klogLogLevel)); err != nil {
|
||||
panic(err) // programmer error
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
103
internal/plog/level_test.go
Normal file
103
internal/plog/level_test.go
Normal file
@ -0,0 +1,103 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package plog
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"k8s.io/component-base/logs"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
func TestValidateAndSetLogLevelGlobally(t *testing.T) {
|
||||
originalLogLevel := getKlogLevel(t)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
level LogLevel
|
||||
wantLevel klog.Level
|
||||
wantErr string
|
||||
}{
|
||||
{
|
||||
name: "unset",
|
||||
wantLevel: 0,
|
||||
},
|
||||
{
|
||||
name: "warning",
|
||||
level: LevelWarning,
|
||||
wantLevel: 0,
|
||||
},
|
||||
{
|
||||
name: "info",
|
||||
level: LevelInfo,
|
||||
wantLevel: 2,
|
||||
},
|
||||
{
|
||||
name: "debug",
|
||||
level: LevelDebug,
|
||||
wantLevel: 4,
|
||||
},
|
||||
{
|
||||
name: "trace",
|
||||
level: LevelTrace,
|
||||
wantLevel: 6,
|
||||
},
|
||||
{
|
||||
name: "all",
|
||||
level: LevelAll,
|
||||
wantLevel: 108,
|
||||
},
|
||||
{
|
||||
name: "invalid level",
|
||||
level: "panda",
|
||||
wantLevel: originalLogLevel,
|
||||
wantErr: errInvalidLogLevel.Error(),
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt // capture range variable
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
defer func() {
|
||||
undoGlobalLogLevelChanges(t, originalLogLevel)
|
||||
}()
|
||||
|
||||
err := ValidateAndSetLogLevelGlobally(tt.level)
|
||||
require.Equal(t, tt.wantErr, errString(err))
|
||||
require.Equal(t, tt.wantLevel, getKlogLevel(t))
|
||||
})
|
||||
}
|
||||
|
||||
require.Equal(t, originalLogLevel, getKlogLevel(t))
|
||||
}
|
||||
|
||||
func getKlogLevel(t *testing.T) klog.Level {
|
||||
t.Helper()
|
||||
|
||||
// hack around klog not exposing a Get method
|
||||
for i := klog.Level(0); i < 256; i++ {
|
||||
if klog.V(i).Enabled() {
|
||||
continue
|
||||
}
|
||||
return i - 1
|
||||
}
|
||||
|
||||
t.Fatal("unknown log level")
|
||||
return 0
|
||||
}
|
||||
|
||||
func errString(err error) string {
|
||||
if err == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return err.Error()
|
||||
}
|
||||
|
||||
func undoGlobalLogLevelChanges(t *testing.T, originalLogLevel klog.Level) {
|
||||
t.Helper()
|
||||
_, err := logs.GlogSetter(strconv.Itoa(int(originalLogLevel)))
|
||||
require.NoError(t, err)
|
||||
}
|
35
internal/plog/plog.go
Normal file
35
internal/plog/plog.go
Normal file
@ -0,0 +1,35 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package plog
|
||||
|
||||
import "k8s.io/klog/v2"
|
||||
|
||||
func Error(err error, msg string, keysAndValues ...interface{}) {
|
||||
klog.ErrorS(err, msg, keysAndValues...)
|
||||
}
|
||||
|
||||
func Warning(msg string, keysAndValues ...interface{}) {
|
||||
// klog's structured logging has no concept of a warning (i.e. no WarningS function)
|
||||
// Thus we use info at log level zero as a proxy
|
||||
// klog's info logs have an I prefix and its warning logs have a W prefix
|
||||
// Since we lose the W prefix by using InfoS, just add a key to make these easier to find
|
||||
keysAndValues = append([]interface{}{"warning", "true"}, keysAndValues...)
|
||||
klog.V(klogLevelWarning).InfoS(msg, keysAndValues...)
|
||||
}
|
||||
|
||||
func Info(msg string, keysAndValues ...interface{}) {
|
||||
klog.V(klogLevelInfo).InfoS(msg, keysAndValues...)
|
||||
}
|
||||
|
||||
func Debug(msg string, keysAndValues ...interface{}) {
|
||||
klog.V(klogLevelDebug).InfoS(msg, keysAndValues...)
|
||||
}
|
||||
|
||||
func Trace(msg string, keysAndValues ...interface{}) {
|
||||
klog.V(klogLevelTrace).InfoS(msg, keysAndValues...)
|
||||
}
|
||||
|
||||
func All(msg string, keysAndValues ...interface{}) {
|
||||
klog.V(klogLevelAll).InfoS(msg, keysAndValues...)
|
||||
}
|
Loading…
Reference in New Issue
Block a user