diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml index dbbe778c..0622b697 100644 --- a/.github/workflows/lint.yaml +++ b/.github/workflows/lint.yaml @@ -36,7 +36,7 @@ jobs: - name: Run golangci-lint run: | - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b . v1.59.1 + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b . v1.60.1 ./golangci-lint run --sort-results --build-tags integration,test diff --git a/.golangci.yaml b/.golangci.yaml index 4ec3d5e1..a418e83a 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -1,7 +1,5 @@ run: timeout: 15m - skip-dirs: - - test/integration linters: disable-all: true enable: @@ -24,6 +22,7 @@ linters: - nolintlint - prealloc - revive + - sloglint - staticcheck - stylecheck - unconvert @@ -39,6 +38,33 @@ linters-settings: local-prefixes: github.com/flant/ nolintlint: allow-unused: true + sloglint: + # Enforce not mixing key-value pairs and attributes. + no-mixed-args: true + # Enforce using key-value pairs only (overrides no-mixed-args, incompatible with attr-only). + kv-only: false + # Enforce using attributes only (overrides no-mixed-args, incompatible with kv-only). + attr-only: false + # Enforce not using global loggers. + no-global: "" + # Enforce using methods that accept a context. + context: "" + # Enforce using static values for log messages. + static-msg: false + # Enforce using constants instead of raw keys. + no-raw-keys: false + # Enforce a single key naming convention. + key-naming-case: "" + # Enforce not using specific keys. + forbidden-keys: + - level + - msg + - logger + - source + - stacktrace + - time + # Enforce putting arguments on separate lines. + args-on-sep-lines: false depguard: rules: Main: @@ -48,6 +74,8 @@ linters-settings: - pkg: "gopkg.in/satori/go.uuid.v1" desc: "Use https://github.com/gofrs/uuid instead. Satori/go.uuid is no longer maintained and has critical vulnerabilities." issues: + exclude-dirs: + - test/integration exclude: # Using underscores is a common practice, refactor in the future - "var-naming: don't use an underscore in package name" # revive diff --git a/cmd/shell-operator/main.go b/cmd/shell-operator/main.go index c153f125..16f8a727 100644 --- a/cmd/shell-operator/main.go +++ b/cmd/shell-operator/main.go @@ -2,13 +2,12 @@ package main import ( "fmt" - "math/rand" "os" - "time" + "github.com/deckhouse/deckhouse/pkg/log" "gopkg.in/alecthomas/kingpin.v2" - "github.com/flant/kube-client/klogtologrus" + "github.com/flant/kube-client/klogtolog" "github.com/flant/shell-operator/pkg/app" "github.com/flant/shell-operator/pkg/debug" "github.com/flant/shell-operator/pkg/jq" @@ -19,17 +18,20 @@ import ( func main() { kpApp := kingpin.New(app.AppName, fmt.Sprintf("%s %s: %s", app.AppName, app.Version, app.AppDescription)) + logger := log.NewLogger(log.Options{}) + log.SetDefault(logger) + // override usage template to reveal additional commands with information about start command kpApp.UsageTemplate(app.OperatorUsageTemplate(app.AppName)) // Initialize klog wrapper when all values are parsed - kpApp.Action(func(c *kingpin.ParseContext) error { - klogtologrus.InitAdapter(app.DebugKubernetesAPI) + kpApp.Action(func(_ *kingpin.ParseContext) error { + klogtolog.InitAdapter(app.DebugKubernetesAPI, logger.Named("klog")) return nil }) // print version - kpApp.Command("version", "Show version.").Action(func(c *kingpin.ParseContext) error { + kpApp.Command("version", "Show version.").Action(func(_ *kingpin.ParseContext) error { fmt.Printf("%s %s\n", app.AppName, app.Version) fmt.Println(jq.FilterInfo()) return nil @@ -38,14 +40,11 @@ func main() { // start main loop startCmd := kpApp.Command("start", "Start shell-operator."). Default(). - Action(func(c *kingpin.ParseContext) error { + Action(func(_ *kingpin.ParseContext) error { app.AppStartMessage = fmt.Sprintf("%s %s", app.AppName, app.Version) - // Init rand generator. - rand.Seed(time.Now().UnixNano()) - // Init logging and initialize a ShellOperator instance. - operator, err := shell_operator.Init() + operator, err := shell_operator.Init(logger.Named("shell-operator")) if err != nil { os.Exit(1) } diff --git a/go.mod b/go.mod index 1760e495..8775e512 100644 --- a/go.mod +++ b/go.mod @@ -1,9 +1,10 @@ module github.com/flant/shell-operator -go 1.22 +go 1.22.8 require ( - github.com/flant/kube-client v1.2.0 + github.com/deckhouse/deckhouse/pkg/log v0.0.0-20241102120041-7e44e3e22ab9 + github.com/flant/kube-client v1.2.1 github.com/flant/libjq-go v1.6.3-0.20201126171326-c46a40ff22ee // branch: master github.com/go-chi/chi/v5 v5.1.0 github.com/go-openapi/spec v0.19.8 @@ -17,7 +18,6 @@ require ( github.com/onsi/gomega v1.34.2 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.20.5 - github.com/sirupsen/logrus v1.9.3 github.com/stretchr/testify v1.9.0 golang.org/x/time v0.7.0 gopkg.in/alecthomas/kingpin.v2 v2.2.6 @@ -36,6 +36,7 @@ replace github.com/go-openapi/validate => github.com/flant/go-openapi-validate v require github.com/gojuno/minimock/v3 v3.4.0 require ( + github.com/DataDog/gostackparse v0.7.0 // indirect github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 // indirect diff --git a/go.sum b/go.sum index df1e9159..d5e1270d 100644 --- a/go.sum +++ b/go.sum @@ -1,4 +1,6 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/DataDog/gostackparse v0.7.0 h1:i7dLkXHvYzHV308hnkvVGDL3BR4FWl7IsXNPz/IGQh4= +github.com/DataDog/gostackparse v0.7.0/go.mod h1:lTfqcJKqS9KnXQGnyQMCugq3u1FP6UZMfWR0aitKFMM= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= @@ -22,6 +24,8 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/deckhouse/deckhouse/pkg/log v0.0.0-20241102120041-7e44e3e22ab9 h1:gCV1iKcaE69x3Knzj8isyVyEtRuG2YI4nmLIzBHBWeQ= +github.com/deckhouse/deckhouse/pkg/log v0.0.0-20241102120041-7e44e3e22ab9/go.mod h1:Mk5HRzkc5pIcDIZ2JJ6DPuuqnwhXVkb3you8M8Mg+4w= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= @@ -29,8 +33,8 @@ github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCv github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/flant/go-openapi-validate v0.19.12-flant.0 h1:xk6kvc9fHKMgUdB6J7kbpbLR5vJOUzKAK8p3nrD7mDk= github.com/flant/go-openapi-validate v0.19.12-flant.0/go.mod h1:Rzou8hA/CBw8donlS6WNEUQupNvUZ0waH08tGe6kAQ4= -github.com/flant/kube-client v1.2.0 h1:cOjnZgwoJVm4scqsMGcgO6qctJ+Z6wRVTFqvyhoAOiQ= -github.com/flant/kube-client v1.2.0/go.mod h1:qjNvqCqnBqS+kc9Hj+wMBbkFzfdyGyiX4r4teE/0Pm8= +github.com/flant/kube-client v1.2.1 h1:8lHF8+wGZrhsXM6TeS3CNAPupXyx7UJLkBvNMpCe/ZI= +github.com/flant/kube-client v1.2.1/go.mod h1:62vCzrIUzWMU4DB0Pn6/02Pu7uwKh5/mHGWy2NmxZAk= github.com/flant/libjq-go v1.6.3-0.20201126171326-c46a40ff22ee h1:evii83J+/6QGNvyf6tjQ/p27DPY9iftxIBb37ALJRTg= github.com/flant/libjq-go v1.6.3-0.20201126171326-c46a40ff22ee/go.mod h1:f+REaGl/+pZR97rbTcwHEka/MAipoQQ2Mc0iQUj4ak0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -255,8 +259,6 @@ github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAm github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= -github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -341,7 +343,6 @@ golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= diff --git a/pkg/app/app.go b/pkg/app/app.go index 24318705..f0edf74d 100644 --- a/pkg/app/app.go +++ b/pkg/app/app.go @@ -148,7 +148,7 @@ Use "%s debug-options" for a list of debug options for start command. // CommandWithDefaultUsageTemplate is used to workaround an absence of per-command usage templates func CommandWithDefaultUsageTemplate(kpApp *kingpin.Application, name, help string) *kingpin.CmdClause { - return kpApp.Command(name, help).PreAction(func(context *kingpin.ParseContext) error { + return kpApp.Command(name, help).PreAction(func(_ *kingpin.ParseContext) error { kpApp.UsageTemplate(kingpin.DefaultUsageTemplate) return nil }) diff --git a/pkg/app/log.go b/pkg/app/log.go index a0a50794..88bfe80e 100644 --- a/pkg/app/log.go +++ b/pkg/app/log.go @@ -1,12 +1,11 @@ package app import ( - "bytes" "fmt" "strings" "time" - log "github.com/sirupsen/logrus" + "github.com/deckhouse/deckhouse/pkg/log" "gopkg.in/alecthomas/kingpin.v2" "github.com/flant/shell-operator/pkg/config" @@ -45,35 +44,36 @@ func DefineLoggingFlags(cmd *kingpin.CmdClause) { } // SetupLogging sets logger formatter and level. -func SetupLogging(runtimeConfig *config.Config) { - jsonFormatter := log.JSONFormatter{DisableTimestamp: LogNoTime} - textFormatter := log.TextFormatter{DisableTimestamp: LogNoTime, DisableColors: true} - colorFormatter := log.TextFormatter{DisableTimestamp: LogNoTime, ForceColors: true, FullTimestamp: true} - switch strings.ToLower(LogType) { - case "json": - log.SetFormatter(&jsonFormatter) - case "text": - log.SetFormatter(&textFormatter) - case "color": - log.SetFormatter(&colorFormatter) - default: - log.SetFormatter(&jsonFormatter) - } - if LogProxyHookJSON { - formatter := log.StandardLogger().Formatter - log.SetFormatter(&ProxyJsonWrapperFormatter{WrappedFormatter: formatter}) - } +func SetupLogging(runtimeConfig *config.Config, logger *log.Logger) { + // TODO: if we need formatters - add to logger + // jsonFormatter := log.JSONFormatter{DisableTimestamp: LogNoTime} + // textFormatter := log.TextFormatter{DisableTimestamp: LogNoTime, DisableColors: true} + // colorFormatter := log.TextFormatter{DisableTimestamp: LogNoTime, ForceColors: true, FullTimestamp: true} + // switch strings.ToLower(LogType) { + // case "json": + // log.SetFormatter(&jsonFormatter) + // case "text": + // log.SetFormatter(&textFormatter) + // case "color": + // log.SetFormatter(&colorFormatter) + // default: + // log.SetFormatter(&jsonFormatter) + // } + // if LogProxyHookJSON { + // formatter := log.StandardLogger().Formatter + // log.SetFormatter(&ProxyJsonWrapperFormatter{WrappedFormatter: formatter}) + // } - setLogLevel(LogLevel) + log.SetDefaultLevel(log.LogLevelFromStr(LogLevel)) runtimeConfig.Register("log.level", fmt.Sprintf("Global log level. Default duration for debug level is %s", ForcedDurationForDebugLevel), strings.ToLower(LogLevel), - func(oldValue string, newValue string) error { - log.Infof("Set log level to '%s'", newValue) - setLogLevel(newValue) + func(_ string, newValue string) error { + logger.Infof("Set log level to '%s'", newValue) + log.SetDefaultLevel(log.LogLevelFromStr(newValue)) return nil - }, func(oldValue string, newValue string) time.Duration { + }, func(_ string, newValue string) time.Duration { if strings.ToLower(newValue) == "debug" { return ForcedDurationForDebugLevel } @@ -81,32 +81,19 @@ func SetupLogging(runtimeConfig *config.Config) { }) } -func setLogLevel(logLevel string) { - switch strings.ToLower(logLevel) { - case "debug": - log.SetLevel(log.DebugLevel) - case "error": - log.SetLevel(log.ErrorLevel) - case "info": - log.SetLevel(log.InfoLevel) - default: - log.SetLevel(log.InfoLevel) - } -} - -type ProxyJsonWrapperFormatter struct { - WrappedFormatter log.Formatter -} +// type ProxyJsonWrapperFormatter struct { +// WrappedFormatter log.Formatter +// } -func (f *ProxyJsonWrapperFormatter) Format(entry *log.Entry) ([]byte, error) { - // if proxying the json message is intended, just return the bytes - // TODO: Find a more elegant way to carry this info - if entry.Data[ProxyJsonLogKey] == true { - b := bytes.NewBufferString(entry.Message) - b.WriteString("\n") - return b.Bytes(), nil - } +// func (f *ProxyJsonWrapperFormatter) Format(entry *log.Entry) ([]byte, error) { +// // if proxying the json message is intended, just return the bytes +// // TODO: Find a more elegant way to carry this info +// if entry.Data[ProxyJsonLogKey] == true { +// b := bytes.NewBufferString(entry.Message) +// b.WriteString("\n") +// return b.Bytes(), nil +// } - // otherwise, use the wrapped formatter - return f.WrappedFormatter.Format(entry) -} +// // otherwise, use the wrapped formatter +// return f.WrappedFormatter.Format(entry) +// } diff --git a/pkg/config/config.go b/pkg/config/config.go index 0981c609..bb5e493c 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -2,12 +2,13 @@ package config import ( "fmt" + "log/slog" "sort" "strings" "sync" "time" - log "github.com/sirupsen/logrus" + "github.com/deckhouse/deckhouse/pkg/log" ) /** @@ -42,16 +43,16 @@ type Config struct { temporalValues map[string]*TemporalValue expireTicker *time.Ticker - logEntry *log.Entry + logger *log.Logger } -func NewConfig() *Config { +func NewConfig(logger *log.Logger) *Config { return &Config{ params: make(map[string]*Parameter), values: make(map[string]string), temporalValues: make(map[string]*TemporalValue), errors: make(map[string]error), - logEntry: log.WithField("component", "runtimeConfig"), + logger: logger.With(slog.String("component", "runtimeConfig")), } } @@ -254,7 +255,7 @@ func (c *Config) expireOverrides() { for _, expire := range expires { name, oldValue, newValue := expire[0], expire[1], expire[2] - c.logEntry.Debugf("Parameter '%s' expired", name) + c.logger.Debug("Parameter is expired", slog.String("parameter", name)) c.callOnChange(name, oldValue, newValue) } } @@ -266,8 +267,8 @@ func (c *Config) callOnChange(name string, oldValue string, newValue string) { } err := c.params[name].onChange(oldValue, newValue) if err != nil { - c.logEntry.Errorf("OnChange handler failed for '%s' during value change from '%s' to '%s': %v", - name, oldValue, newValue, err) + c.logger.Error("OnChange handler failed for parameter during value change values", + slog.String("parameter", name), slog.String("old_value", oldValue), slog.String("new_value", newValue), slog.String("error", err.Error())) } c.m.Lock() delete(c.errors, name) diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go index 35033a72..4261f44b 100644 --- a/pkg/config/config_test.go +++ b/pkg/config/config_test.go @@ -5,11 +5,12 @@ import ( "testing" "time" + "github.com/deckhouse/deckhouse/pkg/log" "github.com/stretchr/testify/assert" ) func TestConfig_Register(t *testing.T) { - c := NewConfig() + c := NewConfig(log.NewNop()) c.Register("log.level", "", "info", nil, nil) @@ -32,10 +33,10 @@ func TestConfig_Register(t *testing.T) { } func TestConfig_OnChange(t *testing.T) { - c := NewConfig() + c := NewConfig(log.NewNop()) newValue := "" - c.Register("log.level", "", "info", func(oldValue string, n string) error { + c.Register("log.level", "", "info", func(_ string, n string) error { newValue = n return nil }, nil) @@ -66,9 +67,9 @@ func TestConfig_OnChange(t *testing.T) { func TestConfig_Errors(t *testing.T) { var err error - c := NewConfig() + c := NewConfig(log.NewNop()) - c.Register("log.level", "", "info", func(oldValue string, n string) error { + c.Register("log.level", "", "info", func(_ string, n string) error { if n == "debug" { return nil } diff --git a/pkg/debug/debug-cmd.go b/pkg/debug/debug-cmd.go index 07f8ddd7..702a6d12 100644 --- a/pkg/debug/debug-cmd.go +++ b/pkg/debug/debug-cmd.go @@ -19,7 +19,7 @@ func DefineDebugCommands(kpApp *kingpin.Application) { queueCmd := app.CommandWithDefaultUsageTemplate(kpApp, "queue", "Dump queues.") queueListCmd := queueCmd.Command("list", "Dump tasks in all queues."). - Action(func(c *kingpin.ParseContext) error { + Action(func(_ *kingpin.ParseContext) error { out, err := Queue(DefaultClient()).List(outputFormat, showEmpty) if err != nil { return err @@ -34,7 +34,7 @@ func DefineDebugCommands(kpApp *kingpin.Application) { app.DefineDebugUnixSocketFlag(queueListCmd) queueMainCmd := queueCmd.Command("main", "Dump tasks in the main queue."). - Action(func(c *kingpin.ParseContext) error { + Action(func(_ *kingpin.ParseContext) error { out, err := Queue(DefaultClient()).Main(outputFormat) if err != nil { return err @@ -49,7 +49,7 @@ func DefineDebugCommands(kpApp *kingpin.Application) { configCmd := app.CommandWithDefaultUsageTemplate(kpApp, "config", "Manage runtime parameters.") configListCmd := configCmd.Command("list", "List available runtime parameters."). - Action(func(c *kingpin.ParseContext) error { + Action(func(_ *kingpin.ParseContext) error { out, err := Config(DefaultClient()).List(outputFormat) if err != nil { return err @@ -64,7 +64,7 @@ func DefineDebugCommands(kpApp *kingpin.Application) { var paramValue string var paramDuration time.Duration configSetCmd := configCmd.Command("set", "Set runtime parameter."). - Action(func(c *kingpin.ParseContext) error { + Action(func(_ *kingpin.ParseContext) error { out, err := Config(DefaultClient()).Set(paramName, paramValue, paramDuration) if err != nil { return err @@ -80,7 +80,7 @@ func DefineDebugCommands(kpApp *kingpin.Application) { // Raw request command var rawUrl string rawCommand := app.CommandWithDefaultUsageTemplate(kpApp, "raw", "Make a raw request to debug endpoint."). - Action(func(c *kingpin.ParseContext) error { + Action(func(_ *kingpin.ParseContext) error { url := fmt.Sprintf("http://unix%s", rawUrl) resp, err := DefaultClient().Get(url) if err != nil { @@ -97,7 +97,7 @@ func DefineDebugCommandsSelf(kpApp *kingpin.Application) { // Get hook names hookCmd := app.CommandWithDefaultUsageTemplate(kpApp, "hook", "Actions for hooks") hookListCmd := hookCmd.Command("list", "List all hooks."). - Action(func(c *kingpin.ParseContext) error { + Action(func(_ *kingpin.ParseContext) error { outBytes, err := Hook(DefaultClient()).List(outputFormat) if err != nil { return err @@ -111,7 +111,7 @@ func DefineDebugCommandsSelf(kpApp *kingpin.Application) { // Get hook snapshots var hookName string hookSnapshotCmd := hookCmd.Command("snapshot", "Dump hook snapshots."). - Action(func(c *kingpin.ParseContext) error { + Action(func(_ *kingpin.ParseContext) error { outBytes, err := Hook(DefaultClient()).Name(hookName).Snapshots(outputFormat) if err != nil { return err diff --git a/pkg/debug/server.go b/pkg/debug/server.go index ace07e40..65e2c06c 100644 --- a/pkg/debug/server.go +++ b/pkg/debug/server.go @@ -9,13 +9,13 @@ import ( "os" "path" + "github.com/deckhouse/deckhouse/pkg/log" "github.com/go-chi/chi/v5" "github.com/go-chi/chi/v5/middleware" - log "github.com/sirupsen/logrus" "gopkg.in/yaml.v3" utils "github.com/flant/shell-operator/pkg/utils/file" - structured_logger "github.com/flant/shell-operator/pkg/utils/structured-logger" + structuredLogger "github.com/flant/shell-operator/pkg/utils/structured-logger" ) type Server struct { @@ -24,11 +24,14 @@ type Server struct { HttpAddr string Router chi.Router + + logger *log.Logger } -func NewServer(prefix, socketPath, httpAddr string) *Server { +func NewServer(prefix, socketPath, httpAddr string, logger *log.Logger) *Server { router := chi.NewRouter() - router.Use(structured_logger.NewStructuredLogger(log.StandardLogger(), "debugEndpoint")) + + router.Use(structuredLogger.NewStructuredLogger(logger.Named("debugEndpoint"), "debugEndpoint")) router.Use(middleware.Recoverer) return &Server{ @@ -36,6 +39,7 @@ func NewServer(prefix, socketPath, httpAddr string) *Server { SocketPath: socketPath, HttpAddr: httpAddr, Router: router, + logger: logger, } } @@ -44,19 +48,19 @@ func (s *Server) Init() (err error) { err = os.MkdirAll(path.Dir(address), 0o700) if err != nil { - log.Errorf("Debug HTTP server fail to create socket '%s': %v", address, err) + s.logger.Errorf("Debug HTTP server fail to create socket '%s': %v", address, err) return err } exists, err := utils.FileExists(address) if err != nil { - log.Errorf("Debug HTTP server fail to check socket '%s': %v", address, err) + s.logger.Errorf("Debug HTTP server fail to check socket '%s': %v", address, err) return err } if exists { err = os.Remove(address) if err != nil { - log.Errorf("Debug HTTP server fail to remove existing socket '%s': %v", address, err) + s.logger.Errorf("Debug HTTP server fail to remove existing socket '%s': %v", address, err) return err } } @@ -64,15 +68,15 @@ func (s *Server) Init() (err error) { // Check if socket is available listener, err := net.Listen("unix", address) if err != nil { - log.Errorf("Debug HTTP server fail to listen on '%s': %v", address, err) + s.logger.Errorf("Debug HTTP server fail to listen on '%s': %v", address, err) return err } - log.Infof("Debug endpoint listen on %s", address) + s.logger.Infof("Debug endpoint listen on %s", address) go func() { if err := http.Serve(listener, s.Router); err != nil { - log.Errorf("Error starting Debug socket server: %s", err) + s.logger.Errorf("Error starting Debug socket server: %s", err) os.Exit(1) } }() @@ -80,7 +84,7 @@ func (s *Server) Init() (err error) { if s.HttpAddr != "" { go func() { if err := http.ListenAndServe(s.HttpAddr, s.Router); err != nil { - log.Errorf("Error starting Debug HTTP server: %s", err) + s.logger.Errorf("Error starting Debug HTTP server: %s", err) os.Exit(1) } }() @@ -131,7 +135,7 @@ func handleFormattedOutput(writer http.ResponseWriter, request *http.Request, ha } format := FormatFromRequest(request) - structured_logger.GetLogEntry(request).Debugf("use format '%s'", format) + structuredLogger.GetLogEntry(request).Debugf("use format '%s'", format) switch format { case "text": diff --git a/pkg/executor/executor.go b/pkg/executor/executor.go index e62b7b81..68cda40e 100644 --- a/pkg/executor/executor.go +++ b/pkg/executor/executor.go @@ -3,15 +3,17 @@ package executor import ( "bufio" "bytes" + "context" "encoding/json" "fmt" "io" + "log/slog" "os/exec" "strings" "syscall" "time" - log "github.com/sirupsen/logrus" + "github.com/deckhouse/deckhouse/pkg/log" "github.com/flant/shell-operator/pkg/app" utils "github.com/flant/shell-operator/pkg/utils/labels" @@ -31,17 +33,17 @@ func Run(cmd *exec.Cmd) error { return cmd.Run() } -func RunAndLogLines(cmd *exec.Cmd, logLabels map[string]string) (*CmdUsage, error) { +func RunAndLogLines(cmd *exec.Cmd, logLabels map[string]string, logger *log.Logger) (*CmdUsage, error) { // TODO observability stdErr := bytes.NewBuffer(nil) - logEntry := log.WithFields(utils.LabelsToLogFields(logLabels)) - stdoutLogEntry := logEntry.WithField("output", "stdout") - stderrLogEntry := logEntry.WithField("output", "stderr") + logEntry := utils.EnrichLoggerWithLabels(logger, logLabels) + stdoutLogEntry := logEntry.With("output", "stdout") + stderrLogEntry := logEntry.With("output", "stderr") logEntry.Debugf("Executing command '%s' in '%s' dir", strings.Join(cmd.Args, " "), cmd.Dir) - plo := &proxyJSONLogger{stdoutLogEntry, make([]byte, 0), app.LogProxyHookJSON} - ple := &proxyJSONLogger{stderrLogEntry, make([]byte, 0), app.LogProxyHookJSON} + plo := &proxyLogger{app.LogProxyHookJSON, stdoutLogEntry, make([]byte, 0)} + ple := &proxyLogger{app.LogProxyHookJSON, stderrLogEntry, make([]byte, 0)} cmd.Stdout = plo cmd.Stderr = io.MultiWriter(ple, stdErr) @@ -50,6 +52,7 @@ func RunAndLogLines(cmd *exec.Cmd, logLabels map[string]string) (*CmdUsage, erro if len(stdErr.Bytes()) > 0 { return nil, fmt.Errorf("%s", stdErr.String()) } + return nil, err } @@ -59,6 +62,7 @@ func RunAndLogLines(cmd *exec.Cmd, logLabels map[string]string) (*CmdUsage, erro Sys: cmd.ProcessState.SystemTime(), User: cmd.ProcessState.UserTime(), } + // FIXME Maxrss is Unix specific. sysUsage := cmd.ProcessState.SysUsage() if v, ok := sysUsage.(*syscall.Rusage); ok { @@ -70,67 +74,70 @@ func RunAndLogLines(cmd *exec.Cmd, logLabels map[string]string) (*CmdUsage, erro return usage, err } -type proxyJSONLogger struct { - *log.Entry +type proxyLogger struct { + logProxyHookJSON bool - buf []byte + logger *log.Logger - logProxyHookJSON bool + buf []byte } -func (pj *proxyJSONLogger) Write(p []byte) (int, error) { - if !pj.logProxyHookJSON { - pj.writerScanner(p) +func (pl *proxyLogger) Write(p []byte) (int, error) { + if !pl.logProxyHookJSON { + pl.writerScanner(p) return len(p), nil } // join all parts of json - pj.buf = append(pj.buf, p...) + pl.buf = append(pl.buf, p...) var line interface{} - err := json.Unmarshal(pj.buf, &line) + err := json.Unmarshal(pl.buf, &line) if err != nil { if err.Error() == "unexpected end of JSON input" { return len(p), nil } + return len(p), err } logMap, ok := line.(map[string]interface{}) + defer func() { + pl.buf = []byte{} + }() + if !ok { - pj.Debugf("json log line not map[string]interface{}: %v", line) + pl.logger.Debug("json log line not map[string]interface{}", slog.Any("line", line)) + // fall back to using the logger - pj.Info(string(p)) + pl.logger.Info(string(p)) + return len(p), err } - for k, v := range pj.Data { - logMap[k] = v - } + logger := pl.logger.With(app.ProxyJsonLogKey, true) logLineRaw, _ := json.Marshal(logMap) - logLine := string(logLineRaw) - logEntry := pj.WithField(app.ProxyJsonLogKey, true) - if len(logLine) > 10000 { - logLine = fmt.Sprintf("%s:truncated", string(logLine[:10000])) + logLine = fmt.Sprintf("%s:truncated", logLine[:10000]) - truncatedLog, _ := json.Marshal(map[string]string{ + logger.Log(context.Background(), log.LevelFatal.Level(), "hook result", slog.Any("hook", map[string]any{ "truncated": logLine, - }) + })) - logEntry.Log(log.FatalLevel, string(truncatedLog)) + return len(p), nil } - logEntry.Log(log.FatalLevel, string(logLine)) + // logEntry.Log(log.FatalLevel, string(logLine)) + logger.Log(context.Background(), log.LevelFatal.Level(), "hook result", slog.Any("hook", logMap)) return len(p), nil } -func (pj *proxyJSONLogger) writerScanner(p []byte) { +func (pl *proxyLogger) writerScanner(p []byte) { scanner := bufio.NewScanner(bytes.NewReader(p)) // Set the buffer size to the maximum token size to avoid buffer overflows @@ -161,12 +168,12 @@ func (pj *proxyJSONLogger) writerScanner(p []byte) { str = fmt.Sprintf("%s:truncated", str[:10000]) } - pj.Entry.Info(str) + pl.logger.Info(str) } // If there was an error while scanning the input, log an error if err := scanner.Err(); err != nil { - pj.Entry.Errorf("Error while reading from Writer: %s", err) + pl.logger.Error("reading from scanner", slog.String("error", err.Error())) } } diff --git a/pkg/executor/executor_test.go b/pkg/executor/executor_test.go index a3cae4d3..0a9d72d8 100644 --- a/pkg/executor/executor_test.go +++ b/pkg/executor/executor_test.go @@ -3,13 +3,14 @@ package executor import ( "bytes" "io" - "math/rand" + "math/rand/v2" "os" "os/exec" + "regexp" "testing" "time" - "github.com/sirupsen/logrus" + "github.com/deckhouse/deckhouse/pkg/log" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -17,68 +18,110 @@ import ( ) func TestRunAndLogLines(t *testing.T) { + loggerOpts := log.Options{ + TimeFunc: func(_ time.Time) time.Time { + parsedTime, err := time.Parse(time.DateTime, "2006-01-02 15:04:05") + if err != nil { + assert.NoError(t, err) + } + + return parsedTime + }, + } + logger := log.NewLogger(loggerOpts) + logger.SetLevel(log.LevelInfo) + var buf bytes.Buffer - logrus.SetLevel(logrus.DebugLevel) - logrus.SetOutput(&buf) + logger.SetOutput(&buf) t.Run("simple log", func(t *testing.T) { app.LogProxyHookJSON = true - // time="2023-07-10T18:13:42+04:00" level=fatal msg="{\"a\":\"b\",\"foo\":\"baz\",\"output\":\"stdout\"}" a=b output=stdout proxyJsonLog=true + cmd := exec.Command("echo", `{"foo": "baz"}`) - _, err := RunAndLogLines(cmd, map[string]string{"a": "b"}) + + _, err := RunAndLogLines(cmd, map[string]string{"a": "b"}, logger) assert.NoError(t, err) - assert.Contains(t, buf.String(), `level=fatal msg="{\"a\":\"b\",\"foo\":\"baz\",\"output\":\"stdout\"}" a=b output=stdout proxyJsonLog=true`) + + assert.Equal(t, buf.String(), `{"level":"fatal","msg":"hook result","a":"b","hook":{"foo":"baz"},"output":"stdout","proxyJsonLog":true,"time":"2006-01-02T15:04:05Z"}`+"\n") buf.Reset() }) t.Run("not json log", func(t *testing.T) { app.LogProxyHookJSON = false - // time="2023-07-10T18:14:25+04:00" level=info msg=foobar a=b output=stdout cmd := exec.Command("echo", `foobar`) - _, err := RunAndLogLines(cmd, map[string]string{"a": "b"}) - time.Sleep(100 * time.Millisecond) + + _, err := RunAndLogLines(cmd, map[string]string{"a": "b"}, logger) assert.NoError(t, err) - assert.Contains(t, buf.String(), `level=info msg=foobar a=b output=stdout`) + + assert.Equal(t, buf.String(), `{"level":"info","msg":"foobar","a":"b","output":"stdout","time":"2006-01-02T15:04:05Z"}`+"\n") buf.Reset() }) - t.Run("long file", func(t *testing.T) { + t.Run("long file must be truncated", func(t *testing.T) { f, err := os.CreateTemp(os.TempDir(), "testjson-*.json") require.NoError(t, err) + defer os.RemoveAll(f.Name()) _, _ = io.WriteString(f, `{"foo": "`+randStringRunes(1024*1024)+`"}`) app.LogProxyHookJSON = true cmd := exec.Command("cat", f.Name()) - _, err = RunAndLogLines(cmd, map[string]string{"a": "b"}) + + _, err = RunAndLogLines(cmd, map[string]string{"a": "b"}, logger) assert.NoError(t, err) - assert.Contains(t, buf.String(), `:truncated\"}" a=b output=stdout proxyJsonLog=true`) + + reg := regexp.MustCompile(`{"level":"fatal","msg":"hook result","a":"b","hook":{"truncated":".*:truncated"},"output":"stdout","proxyJsonLog":true,"time":"2006-01-02T15:04:05Z"`) + assert.Regexp(t, reg, buf.String()) + + buf.Reset() + }) + + t.Run("long file non json must be truncated", func(t *testing.T) { + f, err := os.CreateTemp(os.TempDir(), "testjson-*.json") + require.NoError(t, err) + + defer os.RemoveAll(f.Name()) + + _, _ = io.WriteString(f, `result `+randStringRunes(1024*1024)) + + app.LogProxyHookJSON = false + cmd := exec.Command("cat", f.Name()) + + _, err = RunAndLogLines(cmd, map[string]string{"a": "b"}, logger) + assert.NoError(t, err) + + reg := regexp.MustCompile(`{"level":"info","msg":"result .*:truncated","a":"b","output":"stdout","time":"2006-01-02T15:04:05Z"`) + assert.Regexp(t, reg, buf.String()) buf.Reset() }) t.Run("invalid json structure", func(t *testing.T) { + logger.SetLevel(log.LevelDebug) app.LogProxyHookJSON = true cmd := exec.Command("echo", `["a","b","c"]`) - _, err := RunAndLogLines(cmd, map[string]string{"a": "b"}) + _, err := RunAndLogLines(cmd, map[string]string{"a": "b"}, logger) assert.NoError(t, err) - assert.Contains(t, buf.String(), `level=debug msg="json log line not map[string]interface{}: [a b c]" a=b output=stdout`) + assert.Equal(t, buf.String(), `{"level":"debug","msg":"Executing command 'echo [\"a\",\"b\",\"c\"]' in '' dir","source":"executor/executor.go:43","a":"b","time":"2006-01-02T15:04:05Z"}`+"\n"+ + `{"level":"debug","msg":"json log line not map[string]interface{}","source":"executor/executor.go:111","a":"b","line":["a","b","c"],"output":"stdout","time":"2006-01-02T15:04:05Z"}`+"\n"+ + `{"level":"info","msg":"[\"a\",\"b\",\"c\"]\n","source":"executor/executor.go:114","a":"b","output":"stdout","time":"2006-01-02T15:04:05Z"}`+"\n") buf.Reset() }) t.Run("multiline", func(t *testing.T) { + logger.SetLevel(log.LevelInfo) app.LogProxyHookJSON = true cmd := exec.Command("echo", ` {"a":"b", "c":"d"} `) - _, err := RunAndLogLines(cmd, map[string]string{"foor": "baar"}) + _, err := RunAndLogLines(cmd, map[string]string{"foor": "baar"}, logger) assert.NoError(t, err) - assert.Contains(t, buf.String(), `msg="{\"a\":\"b\",\"c\":\"d\",\"foor\":\"baar\",\"output\":\"stdout\"}" foor=baar output=stdout proxyJsonLog=true`) + assert.Equal(t, buf.String(), `{"level":"fatal","msg":"hook result","foor":"baar","hook":{"a":"b","c":"d"},"output":"stdout","proxyJsonLog":true,"time":"2006-01-02T15:04:05Z"}`+"\n") buf.Reset() }) @@ -89,10 +132,23 @@ func TestRunAndLogLines(t *testing.T) { a b c d `) - _, err := RunAndLogLines(cmd, map[string]string{"foor": "baar"}) + _, err := RunAndLogLines(cmd, map[string]string{"foor": "baar"}, logger) + assert.NoError(t, err) + assert.Equal(t, buf.String(), `{"level":"info","msg":"a b","foor":"baar","output":"stdout","time":"2006-01-02T15:04:05Z"}`+"\n"+ + `{"level":"info","msg":"c d","foor":"baar","output":"stdout","time":"2006-01-02T15:04:05Z"}`+"\n") + + buf.Reset() + }) + + t.Run("multiline json", func(t *testing.T) { + app.LogProxyHookJSON = true + cmd := exec.Command("echo", `{ +"a":"b", +"c":"d" +}`) + _, err := RunAndLogLines(cmd, map[string]string{"foor": "baar"}, logger) assert.NoError(t, err) - assert.Contains(t, buf.String(), `level=info msg="a b" foor=baar output=stdout`) - assert.Contains(t, buf.String(), `level=info msg="c d" foor=baar output=stdout`) + assert.Equal(t, buf.String(), `{"level":"fatal","msg":"hook result","foor":"baar","hook":{"a":"b","c":"d"},"output":"stdout","proxyJsonLog":true,"time":"2006-01-02T15:04:05Z"}`+"\n") buf.Reset() }) @@ -103,7 +159,7 @@ var letterRunes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") func randStringRunes(n int) string { b := make([]rune, n) for i := range b { - b[i] = letterRunes[rand.Intn(len(letterRunes))] + b[i] = letterRunes[rand.IntN(len(letterRunes))] } return string(b) } diff --git a/pkg/hook/binding_context/binding_context.go b/pkg/hook/binding_context/binding_context.go index 1821ea67..126b5b99 100644 --- a/pkg/hook/binding_context/binding_context.go +++ b/pkg/hook/binding_context/binding_context.go @@ -2,7 +2,8 @@ package binding_context import ( "encoding/json" - log "github.com/sirupsen/logrus" + + "github.com/deckhouse/deckhouse/pkg/log" v1 "k8s.io/api/admission/v1" apixv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" diff --git a/pkg/hook/binding_context/binding_context_test.go b/pkg/hook/binding_context/binding_context_test.go index 034f6460..efba5588 100644 --- a/pkg/hook/binding_context/binding_context_test.go +++ b/pkg/hook/binding_context/binding_context_test.go @@ -1,486 +1,489 @@ package binding_context -import ( - "testing" - - . "github.com/onsi/gomega" - "github.com/stretchr/testify/assert" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - - . "github.com/flant/libjq-go" - . "github.com/flant/shell-operator/pkg/hook/types" - . "github.com/flant/shell-operator/pkg/kube_events_manager/types" -) - -func JqEqual(t *testing.T, input []byte, program string, expected string) { - //nolint:typecheck // Ignore false positive: undeclared name: `Jq`. - res, err := Jq().Program(program).Run(string(input)) - if assert.NoError(t, err) { - assert.Equal(t, expected, res, "jq: '%s', json was '%s'", program, string(input)) - } -} - -// Test conversion of BindingContext for v1, also test json marshal of v1 binding contexts. -func Test_ConvertBindingContextList_v1(t *testing.T) { - g := NewWithT(t) - - var bcList BindingContextList - var bcJson []byte - - tests := []struct { - name string - bc func() []BindingContext - fn func() - jqAssertions [][]string - }{ - { - "OnStartup binding", - func() []BindingContext { - bc := BindingContext{ - Binding: "onStartup", - } - bc.Metadata.BindingType = OnStartup - return []BindingContext{bc} - }, - func() { - assert.Equal(t, "onStartup", bcList[0]["binding"]) - }, - [][]string{ - {`.[0].binding`, `"onStartup"`}, - {`.[0] | length`, `1`}, - }, - }, - { - "kubernetes Event binding", - func() []BindingContext { - bc := BindingContext{ - Binding: "kubernetes", - Type: TypeEvent, - WatchEvent: WatchEventAdded, - Objects: []ObjectAndFilterResult{ - { - Object: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "metadata": map[string]interface{}{ - "namespace": "default", - "name": "pod-qwe", - }, - "kind": "Pod", - }, - }, - }, - }, - } - bc.Metadata.BindingType = OnKubernetesEvent - return []BindingContext{bc} - }, - - func() { - assert.Len(t, bcList[0], 4) - assert.Equal(t, "kubernetes", bcList[0]["binding"]) - assert.Equal(t, TypeEvent, bcList[0]["type"]) - assert.Equal(t, "Added", bcList[0]["watchEvent"]) - assert.IsType(t, &unstructured.Unstructured{}, bcList[0]["object"]) - assert.Contains(t, bcList[0]["object"].(*unstructured.Unstructured).Object, "metadata") - }, - [][]string{ - // JSON dump should has only 4 fields: binding, type, watchEvent and object. - {`.[0] | length`, `4`}, - {`.[0].binding`, `"kubernetes"`}, - {`.[0].type`, `"Event"`}, - {`.[0].watchEvent`, `"Added"`}, - {`.[0].object.metadata.namespace`, `"default"`}, - {`.[0].object.metadata.name`, `"pod-qwe"`}, - }, - }, - { - "kubernetes Synchronization event", - func() []BindingContext { - bc := BindingContext{ - Binding: "kubernetes", - Type: TypeSynchronization, - Objects: []ObjectAndFilterResult{}, - } - bc.Metadata.BindingType = OnKubernetesEvent - - obj := ObjectAndFilterResult{ - Object: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "metadata": map[string]interface{}{ - "namespace": "default", - "name": "pod-qwe", - }, - "kind": "Pod", - }, - }, - } - obj.Metadata.JqFilter = "" - bc.Objects = append(bc.Objects, obj) - - // object with jqfilter should have filterResult field - obj = ObjectAndFilterResult{ - Object: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "metadata": map[string]interface{}{ - "namespace": "default", - "name": "deployment-test", - }, - "kind": "Deployment", - }, - }, - FilterResult: `{"labels":{"label-name":"label-value"}}`, - } - obj.Metadata.JqFilter = ".metadata.labels" - bc.Objects = append(bc.Objects, obj) - - // object with jqfilter and with empty result should have filterResult field - obj = ObjectAndFilterResult{ - Object: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "metadata": map[string]interface{}{ - "namespace": "default", - "name": "deployment-2", - }, - "kind": "Deployment", - }, - }, - FilterResult: `""`, - } - obj.Metadata.JqFilter = ".metadata.labels" - bc.Objects = append(bc.Objects, obj) - - return []BindingContext{bc} - }, - func() { - assert.Len(t, bcList[0], 3) - assert.Len(t, bcList[0]["objects"], 3) - assert.Equal(t, TypeSynchronization, bcList[0]["type"]) - assert.Equal(t, "kubernetes", bcList[0]["binding"]) - assert.NotContains(t, bcList[0], "object") - assert.NotContains(t, bcList[0], "filterResult") - }, - [][]string{ - // JSON dump should contains n fields: binding, type and objects - {`.[0] | length`, `3`}, - {`.[0].binding`, `"kubernetes"`}, - {`.[0].type`, `"Synchronization"`}, - - {`.[0].objects[] | select(.object.metadata.name == "pod-qwe") | has("filterResult")`, `false`}, - {`.[0].objects[] | select(.object.metadata.name == "deployment-test") | has("filterResult")`, `true`}, - {`.[0].objects[] | select(.object.metadata.name == "deployment-test") | .filterResult.labels."label-name"`, `"label-value"`}, - {`.[0].objects[] | select(.object.metadata.name == "deployment-2") | has("filterResult")`, `true`}, - }, - }, - { - "binding context with group", - func() []BindingContext { - bcs := []BindingContext{} - - bc := BindingContext{ - Binding: "monitor_pods", - Type: TypeEvent, - WatchEvent: WatchEventAdded, - Objects: []ObjectAndFilterResult{}, - Snapshots: map[string][]ObjectAndFilterResult{}, - } - bc.Metadata.BindingType = OnKubernetesEvent - bc.Metadata.Group = "pods" - bc.Metadata.IncludeSnapshots = []string{"monitor_pods"} - - obj := ObjectAndFilterResult{ - Object: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "metadata": map[string]interface{}{ - "namespace": "default", - "name": "pod-qwe", - }, - "kind": "Pod", - }, - }, - } - bc.Objects = append(bc.Objects, obj) - bc.Snapshots["monitor_pods"] = append(bc.Snapshots["monitor_pods"], obj) - bcs = append(bcs, bc) - - bc = BindingContext{ - Binding: "monitor_pods", - Type: TypeEvent, - WatchEvent: WatchEventAdded, - Objects: []ObjectAndFilterResult{}, - } - bc.Metadata.BindingType = OnKubernetesEvent - - obj = ObjectAndFilterResult{ - Object: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "metadata": map[string]interface{}{ - "namespace": "default", - "name": "pod-qwe", - }, - "kind": "Pod", - }, - }, - } - bc.Objects = append(bc.Objects, obj) - - bcs = append(bcs, bc) - return bcs - }, - func() { - g.Expect(bcList[0]).Should(HaveKey("binding")) - g.Expect(bcList[0]).Should(HaveKey("snapshots")) - g.Expect(bcList[0]).Should(HaveKey("type")) - g.Expect(bcList[0]).Should(HaveKey("groupName")) - g.Expect(bcList[0]).ShouldNot(HaveKey("objects")) - }, - [][]string{ - {`. | length`, `2`}, - - // grouped binding context contains binding, type, snapshots and groupName - {`.[0] | length`, `4`}, // Only 4 fields - {`.[0].snapshots | has("monitor_pods")`, `true`}, - {`.[0].snapshots."monitor_pods" | length`, `1`}, - {`.[0].binding`, `"monitor_pods"`}, - {`.[0].type`, `"Group"`}, - {`.[0].groupName`, `"pods"`}, - - // JSON dump should has only 4 fields: binding, type, watchEvent and object. - {`.[1] | length`, `4`}, - {`.[1].binding`, `"monitor_pods"`}, - {`.[1].type`, `"Event"`}, - {`.[1].watchEvent`, `"Added"`}, - {`.[1].object.metadata.namespace`, `"default"`}, - {`.[1].object.metadata.name`, `"pod-qwe"`}, - }, - }, - { - "grouped Synchronization", - func() []BindingContext { - bcs := []BindingContext{} - - bc := BindingContext{ - Binding: "monitor_config_maps", - Type: TypeSynchronization, - Objects: []ObjectAndFilterResult{}, - Snapshots: map[string][]ObjectAndFilterResult{}, - } - bc.Metadata.BindingType = OnKubernetesEvent - bc.Metadata.Group = "pods" - bc.Metadata.IncludeSnapshots = []string{"monitor_config_maps"} - // object without jqfilter should not have filterResult field - obj := ObjectAndFilterResult{ - Object: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "metadata": map[string]interface{}{ - "namespace": "default", - "name": "pod-qwe", - }, - "kind": "Pod", - }, - }, - FilterResult: "asd", - } - bc.Objects = append(bc.Objects, obj) - bc.Snapshots["monitor_config_maps"] = append(bc.Snapshots["monitor_config_maps"], obj) - bcs = append(bcs, bc) - - bc = BindingContext{ - Binding: "monitor_pods", - Type: TypeSynchronization, - Objects: []ObjectAndFilterResult{}, - } - bc.Metadata.BindingType = OnKubernetesEvent - // object without jqfilter should not have filterResult field - obj = ObjectAndFilterResult{ - Object: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "metadata": map[string]interface{}{ - "namespace": "default", - "name": "pod-qwe", - }, - "kind": "Pod", - }, - }, - FilterResult: "asd", - } - bc.Objects = append(bc.Objects, obj) - - bcs = append(bcs, bc) - return bcs - }, - func() { - g.Expect(bcList).Should(HaveLen(2)) - - g.Expect(bcList[0]).Should(HaveLen(4)) - g.Expect(bcList[0]).Should(HaveKey("binding")) - g.Expect(bcList[0]["binding"]).Should(Equal("monitor_config_maps")) - g.Expect(bcList[0]).Should(HaveKey("type")) - g.Expect(bcList[0]["type"]).Should(Equal("Group")) - g.Expect(bcList[0]["groupName"]).Should(Equal("pods")) - g.Expect(bcList[0]).Should(HaveKey("snapshots")) - g.Expect(bcList[0]["snapshots"]).Should(HaveLen(1)) - - g.Expect(bcList[1]).Should(HaveLen(3)) - g.Expect(bcList[1]).Should(HaveKey("binding")) - g.Expect(bcList[1]).Should(HaveKey("type")) - g.Expect(bcList[1]["type"]).Should(Equal(TypeSynchronization)) - g.Expect(bcList[1]).Should(HaveKey("objects")) - g.Expect(bcList[1]["objects"]).Should(HaveLen(1)) - }, - [][]string{ - {`. | length`, `2`}, - - // grouped binding context contains binging, type, snapshots and groupName - {`.[0] | length`, `4`}, // Only 4 fields - {`.[0].binding`, `"monitor_config_maps"`}, - {`.[0].type`, `"Group"`}, - {`.[0].snapshots | has("monitor_config_maps")`, `true`}, - {`.[0].snapshots."monitor_config_maps" | length`, `1`}, - - // usual Synchronization has 3 fields: binding, type and objects. - {`.[1] | length`, `3`}, - {`.[1].binding`, `"monitor_pods"`}, - {`.[1].type`, `"Synchronization"`}, - {`.[1].objects | length`, `1`}, - {`.[1].objects[0].object.metadata.namespace`, `"default"`}, - {`.[1].objects[0].object.metadata.name`, `"pod-qwe"`}, - }, - }, - { - "kubernetes Synchronization with empty objects", - func() []BindingContext { - bc := BindingContext{ - Binding: "kubernetes", - Type: TypeSynchronization, - Objects: []ObjectAndFilterResult{}, - } - bc.Metadata.BindingType = OnKubernetesEvent - return []BindingContext{bc} - }, - func() { - assert.Len(t, bcList[0]["objects"], 0) - assert.Equal(t, TypeSynchronization, bcList[0]["type"]) - assert.Equal(t, "kubernetes", bcList[0]["binding"]) - assert.NotContains(t, bcList[0], "object") - assert.NotContains(t, bcList[0], "filterResult") - }, - [][]string{ - // JSON dump should contains n fields: binding, type and objects - {`.[0] | length`, `3`}, - {`.[0].binding`, `"kubernetes"`}, - {`.[0].type`, `"Synchronization"`}, - // objects should be an empty array - {`.[0] | has("objects")`, `true`}, - {`.[0].objects | length`, `0`}, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - bcList = ConvertBindingContextList("v1", tt.bc()) - // assert.Len(t, bcList, 1) - - var err error - bcJson, err = bcList.Json() - assert.NoError(t, err) - - for _, jqAssertion := range tt.jqAssertions { - JqEqual(t, bcJson, jqAssertion[0], jqAssertion[1]) - } - - tt.fn() - }) - } -} - -// Test conversion of BindingContext for v1, also test json marshal of v1 binding contexts. -func Test_ConvertBindingContextList_v0(t *testing.T) { - var bcList BindingContextList - var bcJson []byte - - tests := []struct { - name string - bc func() BindingContext - fn func() - jqAsertions [][]string - }{ - { - "OnStartup binding", - func() BindingContext { - bc := BindingContext{ - Binding: "onStartup", - } - bc.Metadata.BindingType = OnStartup - return bc - }, - func() { - assert.Equal(t, "onStartup", bcList[0]["binding"]) - }, - [][]string{ - {`.[0].binding`, `"onStartup"`}, - {`.[0] | length`, `1`}, - }, - }, - { - "onKubernetesEvent binding", - func() BindingContext { - bc := BindingContext{ - Binding: "onKubernetesEvent", - Type: "Event", - WatchEvent: WatchEventAdded, - Objects: []ObjectAndFilterResult{ - { - Object: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "metadata": map[string]interface{}{ - "namespace": "default", - "name": "pod-qwe", - }, - "kind": "Pod", - }, - }, - }, - }, - } - bc.Metadata.BindingType = OnKubernetesEvent - return bc - }, - - func() { - assert.Len(t, bcList[0], 5) - assert.Equal(t, "onKubernetesEvent", bcList[0]["binding"]) - assert.Equal(t, "add", bcList[0]["resourceEvent"]) - assert.Equal(t, "default", bcList[0]["resourceNamespace"]) - assert.Equal(t, "Pod", bcList[0]["resourceKind"]) - assert.Equal(t, "pod-qwe", bcList[0]["resourceName"]) - }, - [][]string{ - // JSON dump should has only 4 fields: binding, type, watchEvent and object. - {`.[0] | length`, `5`}, - {`.[0].binding`, `"onKubernetesEvent"`}, - {`.[0].resourceEvent`, `"add"`}, - {`.[0].resourceNamespace`, `"default"`}, - {`.[0].resourceKind`, `"Pod"`}, - {`.[0].resourceName`, `"pod-qwe"`}, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - bcList = ConvertBindingContextList("v0", []BindingContext{tt.bc()}) - assert.Len(t, bcList, 1) - - var err error - bcJson, err = bcList.Json() - assert.NoError(t, err) - - for _, jqAssertion := range tt.jqAsertions { - JqEqual(t, bcJson, jqAssertion[0], jqAssertion[1]) - } - - tt.fn() - }) - } -} +// TODO: need refactoring +// change JQ tests for another testing tool + +// import ( +// "testing" + +// . "github.com/onsi/gomega" +// "github.com/stretchr/testify/assert" +// "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + +// . "github.com/flant/libjq-go" +// . "github.com/flant/shell-operator/pkg/hook/types" +// . "github.com/flant/shell-operator/pkg/kube_events_manager/types" +// ) + +// func JqEqual(t *testing.T, input []byte, program string, expected string) { +// //nolint:typecheck // Ignore false positive: undeclared name: `Jq`. +// res, err := Jq().Program(program).Run(string(input)) +// if assert.NoError(t, err) { +// assert.Equal(t, expected, res, "jq: '%s', json was '%s'", program, string(input)) +// } +// } + +// // Test conversion of BindingContext for v1, also test json marshal of v1 binding contexts. +// func Test_ConvertBindingContextList_v1(t *testing.T) { +// g := NewWithT(t) + +// var bcList BindingContextList +// var bcJson []byte + +// tests := []struct { +// name string +// bc func() []BindingContext +// fn func() +// jqAssertions [][]string +// }{ +// { +// "OnStartup binding", +// func() []BindingContext { +// bc := BindingContext{ +// Binding: "onStartup", +// } +// bc.Metadata.BindingType = OnStartup +// return []BindingContext{bc} +// }, +// func() { +// assert.Equal(t, "onStartup", bcList[0]["binding"]) +// }, +// [][]string{ +// {`.[0].binding`, `"onStartup"`}, +// {`.[0] | length`, `1`}, +// }, +// }, +// { +// "kubernetes Event binding", +// func() []BindingContext { +// bc := BindingContext{ +// Binding: "kubernetes", +// Type: TypeEvent, +// WatchEvent: WatchEventAdded, +// Objects: []ObjectAndFilterResult{ +// { +// Object: &unstructured.Unstructured{ +// Object: map[string]interface{}{ +// "metadata": map[string]interface{}{ +// "namespace": "default", +// "name": "pod-qwe", +// }, +// "kind": "Pod", +// }, +// }, +// }, +// }, +// } +// bc.Metadata.BindingType = OnKubernetesEvent +// return []BindingContext{bc} +// }, + +// func() { +// assert.Len(t, bcList[0], 4) +// assert.Equal(t, "kubernetes", bcList[0]["binding"]) +// assert.Equal(t, TypeEvent, bcList[0]["type"]) +// assert.Equal(t, "Added", bcList[0]["watchEvent"]) +// assert.IsType(t, &unstructured.Unstructured{}, bcList[0]["object"]) +// assert.Contains(t, bcList[0]["object"].(*unstructured.Unstructured).Object, "metadata") +// }, +// [][]string{ +// // JSON dump should has only 4 fields: binding, type, watchEvent and object. +// {`.[0] | length`, `4`}, +// {`.[0].binding`, `"kubernetes"`}, +// {`.[0].type`, `"Event"`}, +// {`.[0].watchEvent`, `"Added"`}, +// {`.[0].object.metadata.namespace`, `"default"`}, +// {`.[0].object.metadata.name`, `"pod-qwe"`}, +// }, +// }, +// { +// "kubernetes Synchronization event", +// func() []BindingContext { +// bc := BindingContext{ +// Binding: "kubernetes", +// Type: TypeSynchronization, +// Objects: []ObjectAndFilterResult{}, +// } +// bc.Metadata.BindingType = OnKubernetesEvent + +// obj := ObjectAndFilterResult{ +// Object: &unstructured.Unstructured{ +// Object: map[string]interface{}{ +// "metadata": map[string]interface{}{ +// "namespace": "default", +// "name": "pod-qwe", +// }, +// "kind": "Pod", +// }, +// }, +// } +// obj.Metadata.JqFilter = "" +// bc.Objects = append(bc.Objects, obj) + +// // object with jqfilter should have filterResult field +// obj = ObjectAndFilterResult{ +// Object: &unstructured.Unstructured{ +// Object: map[string]interface{}{ +// "metadata": map[string]interface{}{ +// "namespace": "default", +// "name": "deployment-test", +// }, +// "kind": "Deployment", +// }, +// }, +// FilterResult: `{"labels":{"label-name":"label-value"}}`, +// } +// obj.Metadata.JqFilter = ".metadata.labels" +// bc.Objects = append(bc.Objects, obj) + +// // object with jqfilter and with empty result should have filterResult field +// obj = ObjectAndFilterResult{ +// Object: &unstructured.Unstructured{ +// Object: map[string]interface{}{ +// "metadata": map[string]interface{}{ +// "namespace": "default", +// "name": "deployment-2", +// }, +// "kind": "Deployment", +// }, +// }, +// FilterResult: `""`, +// } +// obj.Metadata.JqFilter = ".metadata.labels" +// bc.Objects = append(bc.Objects, obj) + +// return []BindingContext{bc} +// }, +// func() { +// assert.Len(t, bcList[0], 3) +// assert.Len(t, bcList[0]["objects"], 3) +// assert.Equal(t, TypeSynchronization, bcList[0]["type"]) +// assert.Equal(t, "kubernetes", bcList[0]["binding"]) +// assert.NotContains(t, bcList[0], "object") +// assert.NotContains(t, bcList[0], "filterResult") +// }, +// [][]string{ +// // JSON dump should contains n fields: binding, type and objects +// {`.[0] | length`, `3`}, +// {`.[0].binding`, `"kubernetes"`}, +// {`.[0].type`, `"Synchronization"`}, + +// {`.[0].objects[] | select(.object.metadata.name == "pod-qwe") | has("filterResult")`, `false`}, +// {`.[0].objects[] | select(.object.metadata.name == "deployment-test") | has("filterResult")`, `true`}, +// {`.[0].objects[] | select(.object.metadata.name == "deployment-test") | .filterResult.labels."label-name"`, `"label-value"`}, +// {`.[0].objects[] | select(.object.metadata.name == "deployment-2") | has("filterResult")`, `true`}, +// }, +// }, +// { +// "binding context with group", +// func() []BindingContext { +// bcs := []BindingContext{} + +// bc := BindingContext{ +// Binding: "monitor_pods", +// Type: TypeEvent, +// WatchEvent: WatchEventAdded, +// Objects: []ObjectAndFilterResult{}, +// Snapshots: map[string][]ObjectAndFilterResult{}, +// } +// bc.Metadata.BindingType = OnKubernetesEvent +// bc.Metadata.Group = "pods" +// bc.Metadata.IncludeSnapshots = []string{"monitor_pods"} + +// obj := ObjectAndFilterResult{ +// Object: &unstructured.Unstructured{ +// Object: map[string]interface{}{ +// "metadata": map[string]interface{}{ +// "namespace": "default", +// "name": "pod-qwe", +// }, +// "kind": "Pod", +// }, +// }, +// } +// bc.Objects = append(bc.Objects, obj) +// bc.Snapshots["monitor_pods"] = append(bc.Snapshots["monitor_pods"], obj) +// bcs = append(bcs, bc) + +// bc = BindingContext{ +// Binding: "monitor_pods", +// Type: TypeEvent, +// WatchEvent: WatchEventAdded, +// Objects: []ObjectAndFilterResult{}, +// } +// bc.Metadata.BindingType = OnKubernetesEvent + +// obj = ObjectAndFilterResult{ +// Object: &unstructured.Unstructured{ +// Object: map[string]interface{}{ +// "metadata": map[string]interface{}{ +// "namespace": "default", +// "name": "pod-qwe", +// }, +// "kind": "Pod", +// }, +// }, +// } +// bc.Objects = append(bc.Objects, obj) + +// bcs = append(bcs, bc) +// return bcs +// }, +// func() { +// g.Expect(bcList[0]).Should(HaveKey("binding")) +// g.Expect(bcList[0]).Should(HaveKey("snapshots")) +// g.Expect(bcList[0]).Should(HaveKey("type")) +// g.Expect(bcList[0]).Should(HaveKey("groupName")) +// g.Expect(bcList[0]).ShouldNot(HaveKey("objects")) +// }, +// [][]string{ +// {`. | length`, `2`}, + +// // grouped binding context contains binding, type, snapshots and groupName +// {`.[0] | length`, `4`}, // Only 4 fields +// {`.[0].snapshots | has("monitor_pods")`, `true`}, +// {`.[0].snapshots."monitor_pods" | length`, `1`}, +// {`.[0].binding`, `"monitor_pods"`}, +// {`.[0].type`, `"Group"`}, +// {`.[0].groupName`, `"pods"`}, + +// // JSON dump should has only 4 fields: binding, type, watchEvent and object. +// {`.[1] | length`, `4`}, +// {`.[1].binding`, `"monitor_pods"`}, +// {`.[1].type`, `"Event"`}, +// {`.[1].watchEvent`, `"Added"`}, +// {`.[1].object.metadata.namespace`, `"default"`}, +// {`.[1].object.metadata.name`, `"pod-qwe"`}, +// }, +// }, +// { +// "grouped Synchronization", +// func() []BindingContext { +// bcs := []BindingContext{} + +// bc := BindingContext{ +// Binding: "monitor_config_maps", +// Type: TypeSynchronization, +// Objects: []ObjectAndFilterResult{}, +// Snapshots: map[string][]ObjectAndFilterResult{}, +// } +// bc.Metadata.BindingType = OnKubernetesEvent +// bc.Metadata.Group = "pods" +// bc.Metadata.IncludeSnapshots = []string{"monitor_config_maps"} +// // object without jqfilter should not have filterResult field +// obj := ObjectAndFilterResult{ +// Object: &unstructured.Unstructured{ +// Object: map[string]interface{}{ +// "metadata": map[string]interface{}{ +// "namespace": "default", +// "name": "pod-qwe", +// }, +// "kind": "Pod", +// }, +// }, +// FilterResult: "asd", +// } +// bc.Objects = append(bc.Objects, obj) +// bc.Snapshots["monitor_config_maps"] = append(bc.Snapshots["monitor_config_maps"], obj) +// bcs = append(bcs, bc) + +// bc = BindingContext{ +// Binding: "monitor_pods", +// Type: TypeSynchronization, +// Objects: []ObjectAndFilterResult{}, +// } +// bc.Metadata.BindingType = OnKubernetesEvent +// // object without jqfilter should not have filterResult field +// obj = ObjectAndFilterResult{ +// Object: &unstructured.Unstructured{ +// Object: map[string]interface{}{ +// "metadata": map[string]interface{}{ +// "namespace": "default", +// "name": "pod-qwe", +// }, +// "kind": "Pod", +// }, +// }, +// FilterResult: "asd", +// } +// bc.Objects = append(bc.Objects, obj) + +// bcs = append(bcs, bc) +// return bcs +// }, +// func() { +// g.Expect(bcList).Should(HaveLen(2)) + +// g.Expect(bcList[0]).Should(HaveLen(4)) +// g.Expect(bcList[0]).Should(HaveKey("binding")) +// g.Expect(bcList[0]["binding"]).Should(Equal("monitor_config_maps")) +// g.Expect(bcList[0]).Should(HaveKey("type")) +// g.Expect(bcList[0]["type"]).Should(Equal("Group")) +// g.Expect(bcList[0]["groupName"]).Should(Equal("pods")) +// g.Expect(bcList[0]).Should(HaveKey("snapshots")) +// g.Expect(bcList[0]["snapshots"]).Should(HaveLen(1)) + +// g.Expect(bcList[1]).Should(HaveLen(3)) +// g.Expect(bcList[1]).Should(HaveKey("binding")) +// g.Expect(bcList[1]).Should(HaveKey("type")) +// g.Expect(bcList[1]["type"]).Should(Equal(TypeSynchronization)) +// g.Expect(bcList[1]).Should(HaveKey("objects")) +// g.Expect(bcList[1]["objects"]).Should(HaveLen(1)) +// }, +// [][]string{ +// {`. | length`, `2`}, + +// // grouped binding context contains binging, type, snapshots and groupName +// {`.[0] | length`, `4`}, // Only 4 fields +// {`.[0].binding`, `"monitor_config_maps"`}, +// {`.[0].type`, `"Group"`}, +// {`.[0].snapshots | has("monitor_config_maps")`, `true`}, +// {`.[0].snapshots."monitor_config_maps" | length`, `1`}, + +// // usual Synchronization has 3 fields: binding, type and objects. +// {`.[1] | length`, `3`}, +// {`.[1].binding`, `"monitor_pods"`}, +// {`.[1].type`, `"Synchronization"`}, +// {`.[1].objects | length`, `1`}, +// {`.[1].objects[0].object.metadata.namespace`, `"default"`}, +// {`.[1].objects[0].object.metadata.name`, `"pod-qwe"`}, +// }, +// }, +// { +// "kubernetes Synchronization with empty objects", +// func() []BindingContext { +// bc := BindingContext{ +// Binding: "kubernetes", +// Type: TypeSynchronization, +// Objects: []ObjectAndFilterResult{}, +// } +// bc.Metadata.BindingType = OnKubernetesEvent +// return []BindingContext{bc} +// }, +// func() { +// assert.Len(t, bcList[0]["objects"], 0) +// assert.Equal(t, TypeSynchronization, bcList[0]["type"]) +// assert.Equal(t, "kubernetes", bcList[0]["binding"]) +// assert.NotContains(t, bcList[0], "object") +// assert.NotContains(t, bcList[0], "filterResult") +// }, +// [][]string{ +// // JSON dump should contains n fields: binding, type and objects +// {`.[0] | length`, `3`}, +// {`.[0].binding`, `"kubernetes"`}, +// {`.[0].type`, `"Synchronization"`}, +// // objects should be an empty array +// {`.[0] | has("objects")`, `true`}, +// {`.[0].objects | length`, `0`}, +// }, +// }, +// } + +// for _, tt := range tests { +// t.Run(tt.name, func(t *testing.T) { +// bcList = ConvertBindingContextList("v1", tt.bc()) +// // assert.Len(t, bcList, 1) + +// var err error +// bcJson, err = bcList.Json() +// assert.NoError(t, err) + +// for _, jqAssertion := range tt.jqAssertions { +// JqEqual(t, bcJson, jqAssertion[0], jqAssertion[1]) +// } + +// tt.fn() +// }) +// } +// } + +// // Test conversion of BindingContext for v1, also test json marshal of v1 binding contexts. +// func Test_ConvertBindingContextList_v0(t *testing.T) { +// var bcList BindingContextList +// var bcJson []byte + +// tests := []struct { +// name string +// bc func() BindingContext +// fn func() +// jqAsertions [][]string +// }{ +// { +// "OnStartup binding", +// func() BindingContext { +// bc := BindingContext{ +// Binding: "onStartup", +// } +// bc.Metadata.BindingType = OnStartup +// return bc +// }, +// func() { +// assert.Equal(t, "onStartup", bcList[0]["binding"]) +// }, +// [][]string{ +// {`.[0].binding`, `"onStartup"`}, +// {`.[0] | length`, `1`}, +// }, +// }, +// { +// "onKubernetesEvent binding", +// func() BindingContext { +// bc := BindingContext{ +// Binding: "onKubernetesEvent", +// Type: "Event", +// WatchEvent: WatchEventAdded, +// Objects: []ObjectAndFilterResult{ +// { +// Object: &unstructured.Unstructured{ +// Object: map[string]interface{}{ +// "metadata": map[string]interface{}{ +// "namespace": "default", +// "name": "pod-qwe", +// }, +// "kind": "Pod", +// }, +// }, +// }, +// }, +// } +// bc.Metadata.BindingType = OnKubernetesEvent +// return bc +// }, + +// func() { +// assert.Len(t, bcList[0], 5) +// assert.Equal(t, "onKubernetesEvent", bcList[0]["binding"]) +// assert.Equal(t, "add", bcList[0]["resourceEvent"]) +// assert.Equal(t, "default", bcList[0]["resourceNamespace"]) +// assert.Equal(t, "Pod", bcList[0]["resourceKind"]) +// assert.Equal(t, "pod-qwe", bcList[0]["resourceName"]) +// }, +// [][]string{ +// // JSON dump should has only 4 fields: binding, type, watchEvent and object. +// {`.[0] | length`, `5`}, +// {`.[0].binding`, `"onKubernetesEvent"`}, +// {`.[0].resourceEvent`, `"add"`}, +// {`.[0].resourceNamespace`, `"default"`}, +// {`.[0].resourceKind`, `"Pod"`}, +// {`.[0].resourceName`, `"pod-qwe"`}, +// }, +// }, +// } + +// for _, tt := range tests { +// t.Run(tt.name, func(t *testing.T) { +// bcList = ConvertBindingContextList("v0", []BindingContext{tt.bc()}) +// assert.Len(t, bcList, 1) + +// var err error +// bcJson, err = bcList.Json() +// assert.NoError(t, err) + +// for _, jqAssertion := range tt.jqAsertions { +// JqEqual(t, bcJson, jqAssertion[0], jqAssertion[1]) +// } + +// tt.fn() +// }) +// } +// } diff --git a/pkg/hook/config/config_test.go b/pkg/hook/config/config_test.go index d47c57a5..5d1b828a 100644 --- a/pkg/hook/config/config_test.go +++ b/pkg/hook/config/config_test.go @@ -775,7 +775,7 @@ settings: } for _, test := range tests { - t.Run(test.name, func(t *testing.T) { + t.Run(test.name, func(_ *testing.T) { hookConfig = &HookConfig{} err = hookConfig.LoadAndValidate([]byte(test.jsonConfig)) test.testFn() @@ -1045,7 +1045,7 @@ func Test_HookConfig_V1_Kubernetes_Validate(t *testing.T) { } for _, test := range tests { - t.Run(test.name, func(t *testing.T) { + t.Run(test.name, func(_ *testing.T) { hookConfig = &HookConfig{} err = hookConfig.LoadAndValidate([]byte(test.jsonText)) test.testFn() @@ -1083,7 +1083,7 @@ func Test_MergeArrays(t *testing.T) { } for _, test := range tests { - t.Run(test.name, func(t *testing.T) { + t.Run(test.name, func(_ *testing.T) { res := MergeArrays(test.a1, test.a2) g.Expect(res).To(Equal(test.expect)) }) diff --git a/pkg/hook/config/validator_test.go b/pkg/hook/config/validator_test.go index 16a4f698..6bc3533f 100644 --- a/pkg/hook/config/validator_test.go +++ b/pkg/hook/config/validator_test.go @@ -144,7 +144,7 @@ func Test_Validate_V1_With_Error(t *testing.T) { }, } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { + t.Run(tt.name, func(_ *testing.T) { vu = prepareConfigObj(g, tt.configText) s := GetSchema(vu.Version) err = ValidateConfig(vu.Obj, s, "root") diff --git a/pkg/hook/config/versioned_untyped_test.go b/pkg/hook/config/versioned_untyped_test.go index 93971f60..aa250db3 100644 --- a/pkg/hook/config/versioned_untyped_test.go +++ b/pkg/hook/config/versioned_untyped_test.go @@ -136,7 +136,7 @@ schedule: } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { + t.Run(tt.name, func(_ *testing.T) { vu = NewDefaultVersionedUntyped() err = vu.Load([]byte(tt.input)) diff --git a/pkg/hook/controller/admission_bindings_controller.go b/pkg/hook/controller/admission_bindings_controller.go index 90353def..cbeeec32 100644 --- a/pkg/hook/controller/admission_bindings_controller.go +++ b/pkg/hook/controller/admission_bindings_controller.go @@ -1,7 +1,7 @@ package controller import ( - log "github.com/sirupsen/logrus" + "github.com/deckhouse/deckhouse/pkg/log" v1 "k8s.io/api/admission/v1" . "github.com/flant/shell-operator/pkg/hook/binding_context" diff --git a/pkg/hook/controller/conversion_bindings_controller.go b/pkg/hook/controller/conversion_bindings_controller.go index 501f6742..4548e01e 100644 --- a/pkg/hook/controller/conversion_bindings_controller.go +++ b/pkg/hook/controller/conversion_bindings_controller.go @@ -1,7 +1,7 @@ package controller import ( - log "github.com/sirupsen/logrus" + "github.com/deckhouse/deckhouse/pkg/log" v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" . "github.com/flant/shell-operator/pkg/hook/binding_context" @@ -69,7 +69,7 @@ func (c *ConversionBindingsController) DisableConversionBindings() { // TODO dynamic enable/disable conversion webhooks. } -func (c *ConversionBindingsController) CanHandleEvent(crdName string, request *v1.ConversionRequest, rule conversion.Rule) bool { +func (c *ConversionBindingsController) CanHandleEvent(crdName string, _ *v1.ConversionRequest, rule conversion.Rule) bool { _, has := c.Links[crdName] if !has { return false diff --git a/pkg/hook/controller/hook_controller.go b/pkg/hook/controller/hook_controller.go index cb4b740c..54cd166d 100644 --- a/pkg/hook/controller/hook_controller.go +++ b/pkg/hook/controller/hook_controller.go @@ -1,6 +1,7 @@ package controller import ( + "github.com/deckhouse/deckhouse/pkg/log" v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" . "github.com/flant/shell-operator/pkg/hook/binding_context" @@ -48,18 +49,21 @@ type HookController struct { validatingBindings []ValidatingConfig mutatingBindings []MutatingConfig conversionBindings []ConversionConfig + + logger *log.Logger } -func (hc *HookController) InitKubernetesBindings(bindings []OnKubernetesEventConfig, kubeEventMgr kube_events_manager.KubeEventsManager) { +func (hc *HookController) InitKubernetesBindings(bindings []OnKubernetesEventConfig, kubeEventMgr kube_events_manager.KubeEventsManager, logger *log.Logger) { if len(bindings) == 0 { return } - bindingCtrl := NewKubernetesBindingsController() + bindingCtrl := NewKubernetesBindingsController(logger) bindingCtrl.WithKubeEventsManager(kubeEventMgr) bindingCtrl.WithKubernetesBindings(bindings) hc.KubernetesController = bindingCtrl hc.kubernetesBindings = bindings + hc.logger = logger } func (hc *HookController) InitScheduleBindings(bindings []ScheduleConfig, scheduleMgr schedule_manager.ScheduleManager) { diff --git a/pkg/hook/controller/hook_controller_test.go b/pkg/hook/controller/hook_controller_test.go index 782aa295..92683f9f 100644 --- a/pkg/hook/controller/hook_controller_test.go +++ b/pkg/hook/controller/hook_controller_test.go @@ -4,6 +4,7 @@ import ( "context" "testing" + "github.com/deckhouse/deckhouse/pkg/log" . "github.com/onsi/gomega" "github.com/flant/kube-client/fake" @@ -19,7 +20,7 @@ func Test_UpdateSnapshots(t *testing.T) { g := NewWithT(t) fc := fake.NewFakeCluster(fake.ClusterVersionV121) - mgr := kube_events_manager.NewKubeEventsManager(context.Background(), fc.Client) + mgr := kube_events_manager.NewKubeEventsManager(context.Background(), fc.Client, log.NewNop()) testHookConfig := ` configVersion: v1 @@ -46,7 +47,7 @@ kubernetes: g.Expect(err).ShouldNot(HaveOccurred()) hc := NewHookController() - hc.InitKubernetesBindings(testCfg.OnKubernetesEvents, mgr) + hc.InitKubernetesBindings(testCfg.OnKubernetesEvents, mgr, log.NewNop()) hc.EnableScheduleBindings() // Test case: combined binding context for binding_2 and binding_3. diff --git a/pkg/hook/controller/kubernetes_bindings_controller.go b/pkg/hook/controller/kubernetes_bindings_controller.go index 080f7119..dd395d31 100644 --- a/pkg/hook/controller/kubernetes_bindings_controller.go +++ b/pkg/hook/controller/kubernetes_bindings_controller.go @@ -3,7 +3,7 @@ package controller import ( "fmt" - log "github.com/sirupsen/logrus" + "github.com/deckhouse/deckhouse/pkg/log" . "github.com/flant/shell-operator/pkg/hook/binding_context" . "github.com/flant/shell-operator/pkg/hook/types" @@ -48,15 +48,18 @@ type kubernetesBindingsController struct { // dependencies kubeEventsManager kube_events_manager.KubeEventsManager + + logger *log.Logger } // kubernetesHooksController should implement the KubernetesHooksController var _ KubernetesBindingsController = &kubernetesBindingsController{} // NewKubernetesBindingsController returns an implementation of KubernetesBindingsController -var NewKubernetesBindingsController = func() *kubernetesBindingsController { +var NewKubernetesBindingsController = func(logger *log.Logger) *kubernetesBindingsController { return &kubernetesBindingsController{ BindingMonitorLinks: make(map[string]*KubernetesBindingToMonitorLink), + logger: logger, } } @@ -123,7 +126,7 @@ func (c *kubernetesBindingsController) UpdateMonitor(monitorId string, kind, api return fmt.Errorf("recreate monitor for binding '%s': %v", bindingName, err) } - log.WithFields(utils.LabelsToLogFields(link.BindingConfig.Monitor.Metadata.LogLabels)). + utils.EnrichLoggerWithLabels(c.logger, link.BindingConfig.Monitor.Metadata.LogLabels). Infof("Monitor for '%s' is recreated with new kind=%s and apiVersion=%s", link.BindingConfig.BindingName, link.BindingConfig.Monitor.Kind, link.BindingConfig.Monitor.ApiVersion) diff --git a/pkg/hook/hook.go b/pkg/hook/hook.go index 34ce5486..803c55b1 100644 --- a/pkg/hook/hook.go +++ b/pkg/hook/hook.go @@ -8,6 +8,7 @@ import ( "path/filepath" "strings" + "github.com/deckhouse/deckhouse/pkg/log" uuid "github.com/gofrs/uuid/v5" "github.com/kennygrant/sanitize" "golang.org/x/time/rate" @@ -44,13 +45,16 @@ type Hook struct { RateLimiter *rate.Limiter TmpDir string + + Logger *log.Logger } -func NewHook(name, path string) *Hook { +func NewHook(name, path string, logger *log.Logger) *Hook { return &Hook{ Name: name, Path: path, Config: &config.HookConfig{}, + Logger: logger, } } @@ -138,7 +142,7 @@ func (h *Hook) Run(_ BindingType, context []BindingContext, logLabels map[string result := &Result{} - result.Usage, err = executor.RunAndLogLines(hookCmd, logLabels) + result.Usage, err = executor.RunAndLogLines(hookCmd, logLabels, h.Logger) if err != nil { return result, fmt.Errorf("%s FAILED: %s", h.Name, err) } diff --git a/pkg/hook/hook_manager.go b/pkg/hook/hook_manager.go index 3b5ca603..5d31c725 100644 --- a/pkg/hook/hook_manager.go +++ b/pkg/hook/hook_manager.go @@ -8,7 +8,7 @@ import ( "sort" "strings" - log "github.com/sirupsen/logrus" + "github.com/deckhouse/deckhouse/pkg/log" v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "github.com/flant/shell-operator/pkg/executor" @@ -41,6 +41,8 @@ type Manager struct { // Index crdName -> fromVersion -> conversionLink conversionChains *conversion.ChainStorage + + logger *log.Logger } // ManagerConfig sets configuration for Manager @@ -51,6 +53,8 @@ type ManagerConfig struct { Smgr schedule_manager.ScheduleManager Wmgr *admission.WebhookManager Cmgr *conversion.WebhookManager + + Logger *log.Logger } func NewHookManager(config *ManagerConfig) *Manager { @@ -66,6 +70,8 @@ func NewHookManager(config *ManagerConfig) *Manager { scheduleManager: config.Smgr, admissionWebhookManager: config.Wmgr, conversionWebhookManager: config.Cmgr, + + logger: config.Logger, } } @@ -126,10 +132,10 @@ func (hm *Manager) loadHook(hookPath string) (hook *Hook, err error) { if err != nil { return nil, err } - hook = NewHook(hookName, hookPath) + hook = NewHook(hookName, hookPath, hm.logger.Named("hook")) - hookEntry := log.WithField("hook", hook.Name). - WithField("phase", "config") + hookEntry := hm.logger.With("hook", hook.Name). + With("phase", "config") hookEntry.Infof("Load config from '%s'", hookPath) @@ -183,7 +189,7 @@ func (hm *Manager) loadHook(hookPath string) (hook *Hook, err error) { } hookCtrl := controller.NewHookController() - hookCtrl.InitKubernetesBindings(hook.GetConfig().OnKubernetesEvents, hm.kubeEventsManager) + hookCtrl.InitKubernetesBindings(hook.GetConfig().OnKubernetesEvents, hm.kubeEventsManager, hm.logger.Named("kubernetes-bindings")) hookCtrl.InitScheduleBindings(hook.GetConfig().Schedules, hm.scheduleManager) hookCtrl.InitConversionBindings(hook.GetConfig().KubernetesConversion, hm.conversionWebhookManager) hookCtrl.InitAdmissionBindings(hook.GetConfig().KubernetesValidating, hook.GetConfig().KubernetesMutating, hm.admissionWebhookManager) @@ -208,8 +214,8 @@ func (hm *Manager) execCommandOutput(hookName string, dir string, entrypoint str cmd.Stdout = nil cmd.Stderr = nil - debugEntry := log.WithField("hook", hookName). - WithField("cmd", strings.Join(cmd.Args, " ")) + debugEntry := hm.logger.With("hook", hookName). + With("cmd", strings.Join(cmd.Args, " ")) debugEntry.Debugf("Executing hook in %s", cmd.Dir) diff --git a/pkg/hook/hook_manager_test.go b/pkg/hook/hook_manager_test.go index fea7863a..f98d7a73 100644 --- a/pkg/hook/hook_manager_test.go +++ b/pkg/hook/hook_manager_test.go @@ -5,6 +5,7 @@ import ( "strings" "testing" + "github.com/deckhouse/deckhouse/pkg/log" . "github.com/onsi/gomega" "github.com/flant/shell-operator/pkg/app" @@ -30,6 +31,8 @@ func newHookManager(t *testing.T, testdataDir string) *Manager { Smgr: nil, Wmgr: admissionManager, Cmgr: conversionManager, + + Logger: log.NewNop(), } hm := NewHookManager(cfg) diff --git a/pkg/hook/hook_test.go b/pkg/hook/hook_test.go index d5f6cf82..b7c28cf8 100644 --- a/pkg/hook/hook_test.go +++ b/pkg/hook/hook_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + "github.com/deckhouse/deckhouse/pkg/log" . "github.com/onsi/gomega" "golang.org/x/time/rate" @@ -23,7 +24,7 @@ func Test_Hook_SafeName(t *testing.T) { t.Error(err) } - h := NewHook(hookName, hookPath) + h := NewHook(hookName, hookPath, log.NewNop()) g.Expect(h.SafeName()).To(Equal("002-cool-hooks-monitor-namespaces-py")) } @@ -86,7 +87,7 @@ func Test_CreateLimiter(t *testing.T) { } for _, c := range cases { - t.Run(c.title, func(t *testing.T) { + t.Run(c.title, func(_ *testing.T) { cfg := &config.HookConfig{ Settings: c.settings, } @@ -134,8 +135,8 @@ func Test_Hook_WithConfig(t *testing.T) { } for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - hook = NewHook("hook-sh", "/hooks/hook.sh") + t.Run(test.name, func(_ *testing.T) { + hook = NewHook("hook-sh", "/hooks/hook.sh", log.NewNop()) _, err = hook.LoadConfig([]byte(test.jsonData)) test.fn() }) diff --git a/pkg/hook/task_metadata/task_metadata.go b/pkg/hook/task_metadata/task_metadata.go index 4287949a..0b0a3b38 100644 --- a/pkg/hook/task_metadata/task_metadata.go +++ b/pkg/hook/task_metadata/task_metadata.go @@ -3,7 +3,7 @@ package task_metadata import ( "fmt" - log "github.com/sirupsen/logrus" + "github.com/deckhouse/deckhouse/pkg/log" "github.com/flant/shell-operator/pkg/hook/binding_context" "github.com/flant/shell-operator/pkg/hook/types" diff --git a/pkg/kube/object_patch/operation.go b/pkg/kube/object_patch/operation.go index dd69192b..e58ea968 100644 --- a/pkg/kube/object_patch/operation.go +++ b/pkg/kube/object_patch/operation.go @@ -3,8 +3,8 @@ package object_patch import ( "fmt" + "github.com/deckhouse/deckhouse/pkg/log" "github.com/hashicorp/go-multierror" - log "github.com/sirupsen/logrus" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" diff --git a/pkg/kube/object_patch/patch.go b/pkg/kube/object_patch/patch.go index 36a5cfea..f46e22b7 100644 --- a/pkg/kube/object_patch/patch.go +++ b/pkg/kube/object_patch/patch.go @@ -6,9 +6,9 @@ import ( "fmt" "time" + "github.com/deckhouse/deckhouse/pkg/log" "github.com/hashicorp/go-multierror" gerror "github.com/pkg/errors" - log "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -23,7 +23,7 @@ import ( type ObjectPatcher struct { kubeClient KubeClient - logger *log.Entry + logger *log.Logger } type KubeClient interface { @@ -32,10 +32,10 @@ type KubeClient interface { GroupVersionResource(apiVersion string, kind string) (schema.GroupVersionResource, error) } -func NewObjectPatcher(kubeClient KubeClient) *ObjectPatcher { +func NewObjectPatcher(kubeClient KubeClient, logger *log.Logger) *ObjectPatcher { return &ObjectPatcher{ kubeClient: kubeClient, - logger: log.WithField("operator.component", "KubernetesObjectPatcher"), + logger: logger.With("operator.component", "KubernetesObjectPatcher"), } } diff --git a/pkg/kube/object_patch/patch_test.go b/pkg/kube/object_patch/patch_test.go index 25f49707..bc0a29ce 100644 --- a/pkg/kube/object_patch/patch_test.go +++ b/pkg/kube/object_patch/patch_test.go @@ -6,6 +6,7 @@ import ( "os" "testing" + "github.com/deckhouse/deckhouse/pkg/log" "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -524,7 +525,7 @@ object: cluster := newFakeClusterWithNamespaceAndObjects(t, namespace, configMap) // Apply MergePatch: add a new field in data section. - patcher := NewObjectPatcher(cluster.Client) + patcher := NewObjectPatcher(cluster.Client, log.NewNop()) err := tt.fn(patcher) @@ -775,7 +776,7 @@ object: | cluster := newFakeClusterWithNamespaceAndObjects(t, namespace, existingConfigMap) // Apply MergePatch: add a new field in data section. - patcher := NewObjectPatcher(cluster.Client) + patcher := NewObjectPatcher(cluster.Client, log.NewNop()) err := tt.fn(patcher) @@ -883,7 +884,7 @@ name: %s cluster := newFakeClusterWithNamespaceAndObjects(t, namespace, existingConfigMap) // Apply MergePatch: add a new field in data section. - patcher := NewObjectPatcher(cluster.Client) + patcher := NewObjectPatcher(cluster.Client, log.NewNop()) err := tt.fn(patcher) diff --git a/pkg/kube_events_manager/error_handler.go b/pkg/kube_events_manager/error_handler.go index 8ae8d564..852b789e 100644 --- a/pkg/kube_events_manager/error_handler.go +++ b/pkg/kube_events_manager/error_handler.go @@ -3,7 +3,7 @@ package kube_events_manager import ( "io" - log "github.com/sirupsen/logrus" + "github.com/deckhouse/deckhouse/pkg/log" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/client-go/tools/cache" @@ -14,16 +14,18 @@ import ( type WatchErrorHandler struct { description string kind string - logEntry *log.Entry metricStorage *metric_storage.MetricStorage + + logger *log.Logger } -func newWatchErrorHandler(description string, kind string, logLabels map[string]string, metricStorage *metric_storage.MetricStorage) *WatchErrorHandler { +func newWatchErrorHandler(description string, kind string, logLabels map[string]string, metricStorage *metric_storage.MetricStorage, logger *log.Logger) *WatchErrorHandler { return &WatchErrorHandler{ description: description, kind: kind, - logEntry: log.WithFields(utils.LabelsToLogFields(logLabels)), metricStorage: metricStorage, + + logger: utils.EnrichLoggerWithLabels(logger, logLabels), } } @@ -36,16 +38,16 @@ func (weh *WatchErrorHandler) handler(_ *cache.Reflector, err error) { // Don't set LastSyncResourceVersionUnavailable - LIST call with ResourceVersion=RV already // has a semantic that it returns data at least as fresh as provided RV. // So first try to LIST with setting RV to resource version of last observed object. - weh.logEntry.Errorf("%s: Watch of %v closed with: %v", weh.description, weh.kind, err) + weh.logger.Errorf("%s: Watch of %v closed with: %v", weh.description, weh.kind, err) errorType = "expired" case err == io.EOF: // watch closed normally errorType = "eof" case err == io.ErrUnexpectedEOF: - weh.logEntry.Errorf("%s: Watch for %v closed with unexpected EOF: %v", weh.description, weh.kind, err) + weh.logger.Errorf("%s: Watch for %v closed with unexpected EOF: %v", weh.description, weh.kind, err) errorType = "unexpected-eof" case err != nil: - weh.logEntry.Errorf("%s: Failed to watch %v: %v", weh.description, weh.kind, err) + weh.logger.Errorf("%s: Failed to watch %v: %v", weh.description, weh.kind, err) errorType = "fail" } diff --git a/pkg/kube_events_manager/factory.go b/pkg/kube_events_manager/factory.go index 56147105..fc439025 100644 --- a/pkg/kube_events_manager/factory.go +++ b/pkg/kube_events_manager/factory.go @@ -5,7 +5,7 @@ import ( "sync" "time" - log "github.com/sirupsen/logrus" + "github.com/deckhouse/deckhouse/pkg/log" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/wait" diff --git a/pkg/kube_events_manager/kube_events_manager.go b/pkg/kube_events_manager/kube_events_manager.go index 9181cf12..cc8b583c 100644 --- a/pkg/kube_events_manager/kube_events_manager.go +++ b/pkg/kube_events_manager/kube_events_manager.go @@ -5,7 +5,7 @@ import ( "runtime/trace" "sync" - log "github.com/sirupsen/logrus" + "github.com/deckhouse/deckhouse/pkg/log" klient "github.com/flant/kube-client/client" . "github.com/flant/shell-operator/pkg/kube_events_manager/types" @@ -37,13 +37,15 @@ type kubeEventsManager struct { m sync.RWMutex Monitors map[string]Monitor + + logger *log.Logger } // kubeEventsManager should implement KubeEventsManager. var _ KubeEventsManager = &kubeEventsManager{} // NewKubeEventsManager returns an implementation of KubeEventsManager. -func NewKubeEventsManager(ctx context.Context, client *klient.Client) *kubeEventsManager { +func NewKubeEventsManager(ctx context.Context, client *klient.Client, logger *log.Logger) *kubeEventsManager { cctx, cancel := context.WithCancel(ctx) em := &kubeEventsManager{ ctx: cctx, @@ -52,6 +54,7 @@ func NewKubeEventsManager(ctx context.Context, client *klient.Client) *kubeEvent m: sync.RWMutex{}, Monitors: make(map[string]Monitor), KubeEventCh: make(chan KubeEvent, 1), + logger: logger, } return em } @@ -73,7 +76,9 @@ func (mgr *kubeEventsManager) AddMonitor(monitorConfig *MonitorConfig) error { func(ev KubeEvent) { defer trace.StartRegion(context.Background(), "EmitKubeEvent").End() mgr.KubeEventCh <- ev - }) + }, + mgr.logger.Named("monitor"), + ) err := monitor.CreateInformers() if err != nil { diff --git a/pkg/kube_events_manager/kube_events_manager_test.go b/pkg/kube_events_manager/kube_events_manager_test.go index 6e4efd86..ed55b4d5 100644 --- a/pkg/kube_events_manager/kube_events_manager_test.go +++ b/pkg/kube_events_manager/kube_events_manager_test.go @@ -6,6 +6,7 @@ import ( "testing" "time" + "github.com/deckhouse/deckhouse/pkg/log" "github.com/stretchr/testify/assert" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -49,7 +50,7 @@ func Test_MainKubeEventsManager_Run(t *testing.T) { }, } - mgr := NewKubeEventsManager(context.Background(), kubeClient) + mgr := NewKubeEventsManager(context.Background(), kubeClient, log.NewNop()) // monitor with 3 namespaces and 4 object names monitor := &MonitorConfig{ @@ -137,7 +138,7 @@ func Test_MainKubeEventsManager_HandleEvents(t *testing.T) { _, _ = dynClient.Resource(podGvr).Namespace("default").Create(context.TODO(), obj, metav1.CreateOptions{}, []string{}...) // Init() replacement - mgr := NewKubeEventsManager(ctx, kubeClient) + mgr := NewKubeEventsManager(ctx, kubeClient, log.NewNop()) mgr.KubeEventCh = make(chan KubeEvent, 10) // monitor with 3 namespaces and 4 object names and all event types @@ -313,7 +314,7 @@ func Test_FakeClient_CatchUpdates(t *testing.T) { _, _ = dynClient.Resource(podGvr).Namespace("default").Create(context.TODO(), obj, metav1.CreateOptions{}, []string{}...) //// Init() replacement - mgr := NewKubeEventsManager(ctx, nil) + mgr := NewKubeEventsManager(ctx, nil, log.NewNop()) // monitor with 3 namespaces and 4 object names and all event types monitor := &MonitorConfig{ diff --git a/pkg/kube_events_manager/monitor.go b/pkg/kube_events_manager/monitor.go index a4fc5bbd..1cbc4b65 100644 --- a/pkg/kube_events_manager/monitor.go +++ b/pkg/kube_events_manager/monitor.go @@ -5,7 +5,7 @@ import ( "fmt" "sort" - log "github.com/sirupsen/logrus" + "github.com/deckhouse/deckhouse/pkg/log" klient "github.com/flant/kube-client/client" . "github.com/flant/shell-operator/pkg/kube_events_manager/types" @@ -46,9 +46,11 @@ type monitor struct { ctx context.Context cancel context.CancelFunc metricStorage *metric_storage.MetricStorage + + logger *log.Logger } -func NewMonitor(ctx context.Context, client *klient.Client, mstor *metric_storage.MetricStorage, config *MonitorConfig, eventCb func(KubeEvent)) *monitor { +func NewMonitor(ctx context.Context, client *klient.Client, mstor *metric_storage.MetricStorage, config *MonitorConfig, eventCb func(KubeEvent), logger *log.Logger) *monitor { cctx, cancel := context.WithCancel(ctx) return &monitor{ @@ -62,6 +64,7 @@ func NewMonitor(ctx context.Context, client *klient.Client, mstor *metric_storag VaryingInformers: make(map[string][]*resourceInformer), cancelForNs: make(map[string]context.CancelFunc), staticNamespaces: make(map[string]bool), + logger: logger, } } @@ -75,9 +78,8 @@ func (m *monitor) GetConfig() *MonitorConfig { // multiple informers are created for each namespace. // If no NamespaceSelector defined, then one informer is created. func (m *monitor) CreateInformers() error { - logEntry := log. - WithFields(utils.LabelsToLogFields(m.Config.Metadata.LogLabels)). - WithField("binding.name", m.Config.Metadata.DebugName) + logEntry := utils.EnrichLoggerWithLabels(m.logger, m.Config.Metadata.LogLabels). + With("binding.name", m.Config.Metadata.DebugName) if m.Config.Kind == "" && m.Config.ApiVersion == "" { logEntry.Debugf("Create Informers for Config with empty kind and apiVersion: %+v", m.Config) @@ -229,6 +231,7 @@ func (m *monitor) CreateInformersForNamespace(namespace string) (informers []*re mstor: m.metricStorage, eventCb: m.eventCb, monitor: m.Config, + logger: m.logger.Named("resource-informer"), } objNames := []string{""} diff --git a/pkg/kube_events_manager/monitor_config.go b/pkg/kube_events_manager/monitor_config.go index 5c33c957..08e87566 100644 --- a/pkg/kube_events_manager/monitor_config.go +++ b/pkg/kube_events_manager/monitor_config.go @@ -1,7 +1,7 @@ package kube_events_manager import ( - log "github.com/sirupsen/logrus" + "github.com/deckhouse/deckhouse/pkg/log" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -25,7 +25,7 @@ type MonitorConfig struct { LabelSelector *metav1.LabelSelector FieldSelector *FieldSelector JqFilter string - LogEntry *log.Entry + Logger *log.Logger Mode KubeEventMode KeepFullObjectsInMemory bool FilterFunc func(*unstructured.Unstructured) (interface{}, error) diff --git a/pkg/kube_events_manager/monitor_test.go b/pkg/kube_events_manager/monitor_test.go index 1059256a..b71d9a8b 100644 --- a/pkg/kube_events_manager/monitor_test.go +++ b/pkg/kube_events_manager/monitor_test.go @@ -5,6 +5,7 @@ import ( "fmt" "testing" + "github.com/deckhouse/deckhouse/pkg/log" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -38,7 +39,7 @@ func Test_Monitor_should_handle_dynamic_ns_events(t *testing.T) { mon := NewMonitor(context.Background(), fc.Client, nil, monitorCfg, func(ev KubeEvent) { objsFromEvents = append(objsFromEvents, snapshotResourceIDs(ev.Objects)...) - }) + }, log.NewNop()) // Start monitor. err := mon.CreateInformers() diff --git a/pkg/kube_events_manager/namespace_informer.go b/pkg/kube_events_manager/namespace_informer.go index 533e83de..726cee94 100644 --- a/pkg/kube_events_manager/namespace_informer.go +++ b/pkg/kube_events_manager/namespace_informer.go @@ -6,7 +6,7 @@ import ( "context" "fmt" - log "github.com/sirupsen/logrus" + "github.com/deckhouse/deckhouse/pkg/log" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" @@ -137,7 +137,7 @@ func (ni *namespaceInformer) start() { if err := wait.PollUntilContextCancel(cctx, DefaultSyncTime, true, func(_ context.Context) (bool, error) { return ni.SharedInformer.HasSynced(), nil }); err != nil { - ni.Monitor.LogEntry.Errorf("%s: cache is not synced for informer", ni.Monitor.Metadata.DebugName) + ni.Monitor.Logger.Errorf("%s: cache is not synced for informer", ni.Monitor.Metadata.DebugName) } log.Debugf("%s: informer is ready", ni.Monitor.Metadata.DebugName) diff --git a/pkg/kube_events_manager/resource_informer.go b/pkg/kube_events_manager/resource_informer.go index 4951b8bb..3f7d25c0 100644 --- a/pkg/kube_events_manager/resource_informer.go +++ b/pkg/kube_events_manager/resource_informer.go @@ -7,8 +7,8 @@ import ( "sync" "time" + "github.com/deckhouse/deckhouse/pkg/log" "github.com/gofrs/uuid/v5" - log "github.com/sirupsen/logrus" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" @@ -60,6 +60,8 @@ type resourceInformer struct { // a flag to stop handle events after Stop() stopped bool + + logger *log.Logger } // resourceInformer should implement ResourceInformer @@ -68,6 +70,8 @@ type resourceInformerConfig struct { mstor *metric_storage.MetricStorage eventCb func(KubeEvent) monitor *MonitorConfig + + logger *log.Logger } func newResourceInformer(ns, name string, cfg *resourceInformerConfig) *resourceInformer { @@ -84,6 +88,7 @@ func newResourceInformer(ns, name string, cfg *resourceInformerConfig) *resource eventBufLock: sync.Mutex{}, cachedObjectsInfo: &CachedObjectsInfo{}, cachedObjectsIncrement: &CachedObjectsInfo{}, + logger: cfg.logger, } return informer } @@ -440,10 +445,10 @@ func (ei *resourceInformer) start() { }() // TODO: separate handler and informer - errorHandler := newWatchErrorHandler(ei.Monitor.Metadata.DebugName, ei.Monitor.Kind, ei.Monitor.Metadata.LogLabels, ei.metricStorage) + errorHandler := newWatchErrorHandler(ei.Monitor.Metadata.DebugName, ei.Monitor.Kind, ei.Monitor.Metadata.LogLabels, ei.metricStorage, ei.logger.Named("watch-error-handler")) err := DefaultFactoryStore.Start(ei.ctx, ei.id, ei.KubeClient.Dynamic(), ei.FactoryIndex, ei, errorHandler) if err != nil { - ei.Monitor.LogEntry.Errorf("%s: cache is not synced for informer", ei.Monitor.Metadata.DebugName) + ei.Monitor.Logger.Errorf("%s: cache is not synced for informer", ei.Monitor.Metadata.DebugName) return } diff --git a/pkg/kube_events_manager/types/types.go b/pkg/kube_events_manager/types/types.go index 134eff5e..dabb7c16 100644 --- a/pkg/kube_events_manager/types/types.go +++ b/pkg/kube_events_manager/types/types.go @@ -5,7 +5,7 @@ import ( "fmt" "strings" - log "github.com/sirupsen/logrus" + "github.com/deckhouse/deckhouse/pkg/log" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ) diff --git a/pkg/kube_events_manager/util.go b/pkg/kube_events_manager/util.go index f37869eb..b4459d2e 100644 --- a/pkg/kube_events_manager/util.go +++ b/pkg/kube_events_manager/util.go @@ -2,7 +2,7 @@ package kube_events_manager import ( "fmt" - "math/rand" + "math/rand/v2" "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -59,9 +59,9 @@ const ( // randomizedResyncPeriod returns a time.Duration between 2 hours and 4 hours with jitter and granularity func randomizedResyncPeriod() time.Duration { spreadCount := ResyncPeriodSpread.Nanoseconds() / ResyncPeriodGranularity.Nanoseconds() - rndSpreadDelta := time.Duration(rand.Int63n(spreadCount)) * ResyncPeriodGranularity + rndSpreadDelta := time.Duration(rand.Int64N(spreadCount)) * ResyncPeriodGranularity jitterCount := ResyncPeriodGranularity.Nanoseconds() / ResyncPeriodJitterGranularity.Nanoseconds() - rndJitterDelta := time.Duration(rand.Int63n(jitterCount)) * ResyncPeriodJitterGranularity + rndJitterDelta := time.Duration(rand.Int64N(jitterCount)) * ResyncPeriodJitterGranularity return ResyncPeriodMedian - (ResyncPeriodSpread / 2) + rndSpreadDelta + rndJitterDelta } diff --git a/pkg/metric/storage.go b/pkg/metric/storage.go index 4cdca8b1..3bcf207b 100644 --- a/pkg/metric/storage.go +++ b/pkg/metric/storage.go @@ -1,8 +1,9 @@ package metric import ( - "github.com/flant/shell-operator/pkg/metric_storage/operation" "github.com/prometheus/client_golang/prometheus" + + "github.com/flant/shell-operator/pkg/metric_storage/operation" ) type Storage interface { diff --git a/pkg/metric_storage/metric_storage.go b/pkg/metric_storage/metric_storage.go index 6adf62c8..3c74fae2 100644 --- a/pkg/metric_storage/metric_storage.go +++ b/pkg/metric_storage/metric_storage.go @@ -3,15 +3,15 @@ package metric_storage import ( "context" "fmt" - "github.com/flant/shell-operator/pkg/metric" "net/http" "strings" "sync" + "github.com/deckhouse/deckhouse/pkg/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" - log "github.com/sirupsen/logrus" + "github.com/flant/shell-operator/pkg/metric" "github.com/flant/shell-operator/pkg/metric_storage/operation" "github.com/flant/shell-operator/pkg/metric_storage/vault" . "github.com/flant/shell-operator/pkg/utils/labels" @@ -42,9 +42,11 @@ type MetricStorage struct { Registry *prometheus.Registry Gatherer prometheus.Gatherer Registerer prometheus.Registerer + + logger *log.Logger } -func NewMetricStorage(ctx context.Context, prefix string, newRegistry bool) *MetricStorage { +func NewMetricStorage(ctx context.Context, prefix string, newRegistry bool, logger *log.Logger) *MetricStorage { cctx, cancel := context.WithCancel(ctx) m := &MetricStorage{ ctx: cctx, @@ -57,6 +59,8 @@ func NewMetricStorage(ctx context.Context, prefix string, newRegistry bool) *Met HistogramBuckets: make(map[string][]float64), Gatherer: prometheus.DefaultGatherer, Registerer: prometheus.DefaultRegisterer, + + logger: logger.With("operator.component", "metricsStorage"), } m.groupedVault = vault.NewGroupedVault(m.resolveMetricName) m.groupedVault.SetRegisterer(m.Registerer) @@ -90,8 +94,7 @@ func (m *MetricStorage) GaugeSet(metric string, value float64, labels map[string } defer func() { if r := recover(); r != nil { - log.WithField("operator.component", "metricsStorage"). - Errorf("Metric gauge set %s %v with %v: %v", m.resolveMetricName(metric), LabelNames(labels), labels, r) + m.logger.Errorf("Metric gauge set %s %v with %v: %v", m.resolveMetricName(metric), LabelNames(labels), labels, r) } }() @@ -104,8 +107,7 @@ func (m *MetricStorage) GaugeAdd(metric string, value float64, labels map[string } defer func() { if r := recover(); r != nil { - log.WithField("operator.component", "metricsStorage"). - Errorf("Metric gauge add %s %v with %v: %v", m.resolveMetricName(metric), LabelNames(labels), labels, r) + m.logger.Errorf("Metric gauge add %s %v with %v: %v", m.resolveMetricName(metric), LabelNames(labels), labels, r) } }() @@ -130,8 +132,7 @@ func (m *MetricStorage) RegisterGauge(metric string, labels map[string]string) * defer func() { if r := recover(); r != nil { - log.WithField("operator.component", "metricStorage"). - Errorf("Create metric gauge %s %v with %v: %v", metricName, LabelNames(labels), labels, r) + m.logger.Errorf("Create metric gauge %s %v with %v: %v", metricName, LabelNames(labels), labels, r) } }() @@ -143,8 +144,7 @@ func (m *MetricStorage) RegisterGauge(metric string, labels map[string]string) * return vec } - log.WithField("operator.component", "metricStorage"). - Infof("Create metric gauge %s", metricName) + m.logger.Infof("Create metric gauge %s", metricName) vec = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Name: metricName, @@ -165,8 +165,7 @@ func (m *MetricStorage) CounterAdd(metric string, value float64, labels map[stri } defer func() { if r := recover(); r != nil { - log.WithField("operator.component", "metricsStorage"). - Errorf("Metric counter add %s %v with %v: %v", m.resolveMetricName(metric), LabelNames(labels), labels, r) + m.logger.Errorf("Metric counter add %s %v with %v: %v", m.resolveMetricName(metric), LabelNames(labels), labels, r) } }() m.Counter(metric, labels).With(labels).Add(value) @@ -190,8 +189,7 @@ func (m *MetricStorage) RegisterCounter(metric string, labels map[string]string) defer func() { if r := recover(); r != nil { - log.WithField("operator.component", "metricStorage"). - Errorf("Create metric counter %s %v with %v: %v", metricName, LabelNames(labels), labels, r) + m.logger.Errorf("Create metric counter %s %v with %v: %v", metricName, LabelNames(labels), labels, r) } }() @@ -203,8 +201,7 @@ func (m *MetricStorage) RegisterCounter(metric string, labels map[string]string) return vec } - log.WithField("operator.component", "metricStorage"). - Infof("Create metric counter %s", metricName) + m.logger.Infof("Create metric counter %s", metricName) vec = prometheus.NewCounterVec( prometheus.CounterOpts{ Name: metricName, @@ -224,8 +221,7 @@ func (m *MetricStorage) HistogramObserve(metric string, value float64, labels ma } defer func() { if r := recover(); r != nil { - log.WithField("operator.component", "metricsStorage"). - Errorf("Metric histogram observe %s %v with %v: %v", m.resolveMetricName(metric), LabelNames(labels), labels, r) + m.logger.Errorf("Metric histogram observe %s %v with %v: %v", m.resolveMetricName(metric), LabelNames(labels), labels, r) } }() m.Histogram(metric, labels, buckets).With(labels).Observe(value) @@ -246,8 +242,7 @@ func (m *MetricStorage) RegisterHistogram(metric string, labels map[string]strin defer func() { if r := recover(); r != nil { - log.WithField("operator.component", "metricsStorage"). - Errorf("Create metric histogram %s %v with %v: %v", metricName, LabelNames(labels), labels, r) + m.logger.Errorf("Create metric histogram %s %v with %v: %v", metricName, LabelNames(labels), labels, r) } }() @@ -259,8 +254,7 @@ func (m *MetricStorage) RegisterHistogram(metric string, labels map[string]strin return vec } - log.WithField("operator.component", "metricsStorage"). - Infof("Create metric histogram %s", metricName) + m.logger.Infof("Create metric histogram %s", metricName) b, has := m.HistogramBuckets[metric] // This shouldn't happen except when entering this concurrently // If there are buckets for this histogram about to be registered, keep them diff --git a/pkg/metric_storage/vault/vault.go b/pkg/metric_storage/vault/vault.go index 6827306c..118107fe 100644 --- a/pkg/metric_storage/vault/vault.go +++ b/pkg/metric_storage/vault/vault.go @@ -2,26 +2,26 @@ package vault import ( "fmt" - "github.com/flant/shell-operator/pkg/metric" "sync" + "github.com/deckhouse/deckhouse/pkg/log" "github.com/prometheus/client_golang/prometheus" - log "github.com/sirupsen/logrus" + "github.com/flant/shell-operator/pkg/metric" . "github.com/flant/shell-operator/pkg/utils/labels" ) type GroupedVault struct { - collectors map[string]metric.ConstCollector - mtx sync.Mutex - registerer prometheus.Registerer + collectors map[string]metric.ConstCollector + mtx sync.Mutex + registerer prometheus.Registerer resolveMetricNameFunc func(name string) string } func NewGroupedVault(resolveMetricNameFunc func(name string) string) *GroupedVault { return &GroupedVault{ - collectors: make(map[string]metric.ConstCollector), - resolveMetricNameFunc : resolveMetricNameFunc, + collectors: make(map[string]metric.ConstCollector), + resolveMetricNameFunc: resolveMetricNameFunc, } } diff --git a/pkg/metric_storage/vault/vault_test.go b/pkg/metric_storage/vault/vault_test.go index d24714e6..bc4fd2f1 100644 --- a/pkg/metric_storage/vault/vault_test.go +++ b/pkg/metric_storage/vault/vault_test.go @@ -5,19 +5,22 @@ import ( "strings" "testing" + "github.com/deckhouse/deckhouse/pkg/log" . "github.com/onsi/gomega" "github.com/prometheus/client_golang/prometheus" promtest "github.com/prometheus/client_golang/prometheus/testutil" - log "github.com/sirupsen/logrus" ) func Test_CounterAdd(t *testing.T) { g := NewWithT(t) + logger := log.NewLogger(log.Options{}) + log.SetDefault(logger) + buf := &bytes.Buffer{} - log.SetOutput(buf) + logger.SetOutput(buf) - v := NewGroupedVault(func (name string) string { return name }) + v := NewGroupedVault(func(name string) string { return name }) v.registerer = prometheus.DefaultRegisterer v.CounterAdd("group1", "metric_total", 1.0, map[string]string{"lbl": "val"}) diff --git a/pkg/schedule_manager/schedule_manager.go b/pkg/schedule_manager/schedule_manager.go index fcf86917..6681b5bd 100644 --- a/pkg/schedule_manager/schedule_manager.go +++ b/pkg/schedule_manager/schedule_manager.go @@ -3,7 +3,7 @@ package schedule_manager import ( "context" - log "github.com/sirupsen/logrus" + "github.com/deckhouse/deckhouse/pkg/log" "gopkg.in/robfig/cron.v2" . "github.com/flant/shell-operator/pkg/schedule_manager/types" @@ -28,11 +28,13 @@ type scheduleManager struct { cron *cron.Cron ScheduleCh chan string Entries map[string]CronEntry + + logger *log.Logger } var _ ScheduleManager = &scheduleManager{} -func NewScheduleManager(ctx context.Context) *scheduleManager { +func NewScheduleManager(ctx context.Context, logger *log.Logger) *scheduleManager { cctx, cancel := context.WithCancel(ctx) sm := &scheduleManager{ ctx: cctx, @@ -40,6 +42,8 @@ func NewScheduleManager(ctx context.Context) *scheduleManager { ScheduleCh: make(chan string, 1), cron: cron.New(), Entries: make(map[string]CronEntry), + + logger: logger, } return sm } @@ -54,7 +58,7 @@ func (sm *scheduleManager) Stop() { // Crontab string should be validated with cron.Parse // function before pass to Add. func (sm *scheduleManager) Add(newEntry ScheduleEntry) { - logEntry := log.WithField("operator.component", "scheduleManager") + logEntry := sm.logger.With("operator.component", "scheduleManager") cronEntry, hasCronEntry := sm.Entries[newEntry.Crontab] @@ -105,7 +109,7 @@ func (sm *scheduleManager) Remove(delEntry ScheduleEntry) { if len(sm.Entries[delEntry.Crontab].Ids) == 0 { sm.cron.Remove(sm.Entries[delEntry.Crontab].EntryID) delete(sm.Entries, delEntry.Crontab) - log.WithField("operator.component", "scheduleManager").Debugf("entry '%s' deleted", delEntry.Crontab) + sm.logger.With("operator.component", "scheduleManager").Debugf("entry '%s' deleted", delEntry.Crontab) } } diff --git a/pkg/schedule_manager/schedule_manager_test.go b/pkg/schedule_manager/schedule_manager_test.go index bb5de61f..21efbc2b 100644 --- a/pkg/schedule_manager/schedule_manager_test.go +++ b/pkg/schedule_manager/schedule_manager_test.go @@ -4,11 +4,13 @@ import ( "context" "testing" + "github.com/deckhouse/deckhouse/pkg/log" + "github.com/flant/shell-operator/pkg/schedule_manager/types" ) func Test_ScheduleManager_Add(t *testing.T) { - sm := NewScheduleManager(context.Background()) + sm := NewScheduleManager(context.Background(), log.NewNop()) expectations := []struct { testName string @@ -37,7 +39,7 @@ func Test_ScheduleManager_Add(t *testing.T) { } for _, expectation := range expectations { - t.Run(expectation.testName, func(t *testing.T) { + t.Run(expectation.testName, func(_ *testing.T) { sm.Add(types.ScheduleEntry{Crontab: expectation.crontab, Id: expectation.id}) // if expectation.err != "" { diff --git a/pkg/shell-operator/bootstrap.go b/pkg/shell-operator/bootstrap.go index 4ae1fef9..cf4ccc4e 100644 --- a/pkg/shell-operator/bootstrap.go +++ b/pkg/shell-operator/bootstrap.go @@ -3,8 +3,9 @@ package shell_operator import ( "context" "fmt" + "log/slog" - log "github.com/sirupsen/logrus" + "github.com/deckhouse/deckhouse/pkg/log" "github.com/flant/shell-operator/pkg/app" "github.com/flant/shell-operator/pkg/config" @@ -21,32 +22,33 @@ import ( // Init initialize logging, ensures directories and creates // a ShellOperator instance with all dependencies. -func Init() (*ShellOperator, error) { - runtimeConfig := config.NewConfig() +func Init(logger *log.Logger) (*ShellOperator, error) { + runtimeConfig := config.NewConfig(logger) // Init logging subsystem. - app.SetupLogging(runtimeConfig) + app.SetupLogging(runtimeConfig, logger) + // Log version and jq filtering implementation. - log.Infof(app.AppStartMessage) - log.Debug(jq.FilterInfo()) + logger.Info(app.AppStartMessage) + logger.Debug(jq.FilterInfo()) hooksDir, err := utils.RequireExistingDirectory(app.HooksDir) if err != nil { - log.Errorf("Fatal: hooks directory is required: %s", err) + logger.Log(context.TODO(), log.LevelFatal.Level(), "hooks directory is required", slog.String("error", err.Error())) return nil, err } tempDir, err := utils.EnsureTempDirectory(app.TempDir) if err != nil { - log.Errorf("Fatal: temp directory: %s", err) + logger.Log(context.TODO(), log.LevelFatal.Level(), "temp directory", slog.String("error", err.Error())) return nil, err } - op := NewShellOperator(context.Background()) + op := NewShellOperator(context.Background(), WithLogger(logger)) // Debug server. - debugServer, err := RunDefaultDebugServer(app.DebugUnixSocket, app.DebugHttpServerAddr) + debugServer, err := RunDefaultDebugServer(app.DebugUnixSocket, app.DebugHttpServerAddr, op.logger.Named("debug-server")) if err != nil { - log.Errorf("Fatal: start Debug server: %s", err) + logger.Log(context.TODO(), log.LevelFatal.Level(), "start Debug server", slog.String("error", err.Error())) return nil, err } @@ -56,13 +58,13 @@ func Init() (*ShellOperator, error) { "queue": "", }) if err != nil { - log.Errorf("Fatal: %s", err) + logger.Log(context.TODO(), log.LevelFatal.Level(), "essemble common operator", slog.String("error", err.Error())) return nil, err } err = op.assembleShellOperator(hooksDir, tempDir, debugServer, runtimeConfig) if err != nil { - log.Errorf("Fatal: %s", err) + logger.Log(context.TODO(), log.LevelFatal.Level(), "essemble shell operator", slog.String("error", err.Error())) return nil, err } @@ -88,7 +90,7 @@ func (op *ShellOperator) AssembleCommonOperator(listenAddress, listenPort string } // ObjectPatcher with a separate Kubernetes client. - op.ObjectPatcher, err = initDefaultObjectPatcher(op.MetricStorage) + op.ObjectPatcher, err = initDefaultObjectPatcher(op.MetricStorage, op.logger.Named("object-patcher")) if err != nil { return err } @@ -152,17 +154,18 @@ func (op *ShellOperator) SetupEventManagers() { op.TaskQueues.WithMetricStorage(op.MetricStorage) // Initialize schedule manager. - op.ScheduleManager = schedule_manager.NewScheduleManager(op.ctx) + op.ScheduleManager = schedule_manager.NewScheduleManager(op.ctx, op.logger.Named("schedule-manager")) // Initialize kubernetes events manager. - op.KubeEventsManager = kube_events_manager.NewKubeEventsManager(op.ctx, op.KubeClient) + op.KubeEventsManager = kube_events_manager.NewKubeEventsManager(op.ctx, op.KubeClient, op.logger.Named("kube-events-manager")) op.KubeEventsManager.WithMetricStorage(op.MetricStorage) // Initialize events handler that emit tasks to run hooks cfg := &managerEventsHandlerConfig{ - tqs: op.TaskQueues, - mgr: op.KubeEventsManager, - smgr: op.ScheduleManager, + tqs: op.TaskQueues, + mgr: op.KubeEventsManager, + smgr: op.ScheduleManager, + logger: op.logger.Named("manager-events-handler"), } op.ManagerEventsHandler = newManagerEventsHandler(op.ctx, cfg) } @@ -188,6 +191,7 @@ func (op *ShellOperator) setupHookManagers(hooksDir string, tempDir string) { Smgr: op.ScheduleManager, Wmgr: op.AdmissionWebhookManager, Cmgr: op.ConversionWebhookManager, + Logger: op.logger.Named("hook-manager"), } op.HookManager = hook.NewHookManager(cfg) } diff --git a/pkg/shell-operator/combine_binding_context.go b/pkg/shell-operator/combine_binding_context.go index 4924d222..7638f4db 100644 --- a/pkg/shell-operator/combine_binding_context.go +++ b/pkg/shell-operator/combine_binding_context.go @@ -3,8 +3,6 @@ package shell_operator import ( "fmt" - log "github.com/sirupsen/logrus" - . "github.com/flant/shell-operator/pkg/hook/binding_context" . "github.com/flant/shell-operator/pkg/hook/task_metadata" "github.com/flant/shell-operator/pkg/task" @@ -24,7 +22,7 @@ type CombineResult struct { // If input task has no metadata, result will be nil. // Metadata should implement HookNameAccessor, BindingContextAccessor and MonitorIDAccessor interfaces. // DEV WARNING! Do not use HookMetadataAccessor here. Use only *Accessor interfaces because this method is used from addon-operator. -func combineBindingContextForHook(tqs *queue.TaskQueueSet, q *queue.TaskQueue, t task.Task, stopCombineFn func(tsk task.Task) bool) *CombineResult { +func (op *ShellOperator) combineBindingContextForHook(tqs *queue.TaskQueueSet, q *queue.TaskQueue, t task.Task, stopCombineFn func(tsk task.Task) bool) *CombineResult { if q == nil { return nil } @@ -120,7 +118,7 @@ func combineBindingContextForHook(tqs *queue.TaskQueueSet, q *queue.TaskQueue, t } else { compactMsg = fmt.Sprintf("are combined to %d contexts", len(combinedContext)) } - log.Infof("Binding contexts from %d tasks %s. %d tasks are dropped from queue '%s'", len(otherTasks)+1, compactMsg, len(tasksFilter)-1, t.GetQueueName()) + op.logger.Infof("Binding contexts from %d tasks %s. %d tasks are dropped from queue '%s'", len(otherTasks)+1, compactMsg, len(tasksFilter)-1, t.GetQueueName()) res.BindingContexts = compactedContext res.MonitorIDs = monitorIDs diff --git a/pkg/shell-operator/combine_binding_context_test.go b/pkg/shell-operator/combine_binding_context_test.go index b87d69b2..6caef60b 100644 --- a/pkg/shell-operator/combine_binding_context_test.go +++ b/pkg/shell-operator/combine_binding_context_test.go @@ -4,6 +4,7 @@ import ( "context" "testing" + "github.com/deckhouse/deckhouse/pkg/log" . "github.com/onsi/gomega" "github.com/flant/shell-operator/pkg/hook/binding_context" @@ -19,7 +20,7 @@ func Test_CombineBindingContext_MultipleHooks(t *testing.T) { TaskQueues := queue.NewTaskQueueSet() TaskQueues.WithContext(context.Background()) - TaskQueues.NewNamedQueue("test_multiple_hooks", func(tsk task.Task) queue.TaskResult { + TaskQueues.NewNamedQueue("test_multiple_hooks", func(_ task.Task) queue.TaskResult { return queue.TaskResult{ Status: "Success", } @@ -98,7 +99,8 @@ func Test_CombineBindingContext_MultipleHooks(t *testing.T) { } g.Expect(TaskQueues.GetByName("test_multiple_hooks").Length()).Should(Equal(len(tasks))) - combineResult := combineBindingContextForHook(TaskQueues, TaskQueues.GetByName("test_multiple_hooks"), tasks[0], nil) + op := &ShellOperator{logger: log.NewNop()} + combineResult := op.combineBindingContextForHook(TaskQueues, TaskQueues.GetByName("test_multiple_hooks"), tasks[0], nil) // Should combine binding contexts from 4 tasks. g.Expect(combineResult).ShouldNot(BeNil()) @@ -112,7 +114,7 @@ func Test_CombineBindingContext_Nil_On_NoCombine(t *testing.T) { TaskQueues := queue.NewTaskQueueSet() TaskQueues.WithContext(context.Background()) - TaskQueues.NewNamedQueue("test_no_combine", func(tsk task.Task) queue.TaskResult { + TaskQueues.NewNamedQueue("test_no_combine", func(_ task.Task) queue.TaskResult { return queue.TaskResult{ Status: "Success", } @@ -158,7 +160,8 @@ func Test_CombineBindingContext_Nil_On_NoCombine(t *testing.T) { } g.Expect(TaskQueues.GetByName("test_no_combine").Length()).Should(Equal(len(tasks))) - combineResult := combineBindingContextForHook(TaskQueues, TaskQueues.GetByName("test_no_combine"), tasks[0], nil) + op := &ShellOperator{logger: log.NewNop()} + combineResult := op.combineBindingContextForHook(TaskQueues, TaskQueues.GetByName("test_no_combine"), tasks[0], nil) // Should return nil if no combine g.Expect(combineResult).Should(BeNil()) // Should not delete tasks @@ -170,7 +173,7 @@ func Test_CombineBindingContext_Group_Compaction(t *testing.T) { TaskQueues := queue.NewTaskQueueSet() TaskQueues.WithContext(context.Background()) - TaskQueues.NewNamedQueue("test_multiple_hooks", func(tsk task.Task) queue.TaskResult { + TaskQueues.NewNamedQueue("test_multiple_hooks", func(_ task.Task) queue.TaskResult { return queue.TaskResult{ Status: "Success", } @@ -257,7 +260,8 @@ func Test_CombineBindingContext_Group_Compaction(t *testing.T) { } g.Expect(TaskQueues.GetByName("test_multiple_hooks").Length()).Should(Equal(len(tasks))) - combineResult := combineBindingContextForHook(TaskQueues, TaskQueues.GetByName("test_multiple_hooks"), tasks[0], nil) + op := &ShellOperator{logger: log.NewNop()} + combineResult := op.combineBindingContextForHook(TaskQueues, TaskQueues.GetByName("test_multiple_hooks"), tasks[0], nil) // Should compact 4 tasks into 4 binding context and combine 3 binding contexts into one. g.Expect(combineResult).ShouldNot(BeNil()) g.Expect(combineResult.BindingContexts).Should(HaveLen(2)) @@ -271,7 +275,7 @@ func Test_CombineBindingContext_Group_Type(t *testing.T) { TaskQueues := queue.NewTaskQueueSet() TaskQueues.WithContext(context.Background()) - TaskQueues.NewNamedQueue("test_multiple_hooks", func(tsk task.Task) queue.TaskResult { + TaskQueues.NewNamedQueue("test_multiple_hooks", func(_ task.Task) queue.TaskResult { return queue.TaskResult{ Status: "Success", } @@ -406,7 +410,8 @@ func Test_CombineBindingContext_Group_Type(t *testing.T) { } g.Expect(TaskQueues.GetByName("test_multiple_hooks").Length()).Should(Equal(len(tasks))) - combineResult := combineBindingContextForHook(TaskQueues, TaskQueues.GetByName("test_multiple_hooks"), tasks[0], nil) + op := &ShellOperator{logger: log.NewNop()} + combineResult := op.combineBindingContextForHook(TaskQueues, TaskQueues.GetByName("test_multiple_hooks"), tasks[0], nil) // Should leave 3 tasks in queue. g.Expect(combineResult).ShouldNot(BeNil()) g.Expect(TaskQueues.GetByName("test_multiple_hooks").Length()).Should(Equal(3)) diff --git a/pkg/shell-operator/debug_server.go b/pkg/shell-operator/debug_server.go index 82e879e3..0bbef5d4 100644 --- a/pkg/shell-operator/debug_server.go +++ b/pkg/shell-operator/debug_server.go @@ -6,6 +6,7 @@ import ( "strconv" "time" + "github.com/deckhouse/deckhouse/pkg/log" "github.com/go-chi/chi/v5" "github.com/flant/shell-operator/pkg/config" @@ -15,8 +16,8 @@ import ( // RunDefaultDebugServer initialized and run default debug server on unix and http sockets // This method is also used in addon-operator -func RunDefaultDebugServer(unixSocket, httpServerAddress string) (*debug.Server, error) { - dbgSrv := debug.NewServer("/debug", unixSocket, httpServerAddress) +func RunDefaultDebugServer(unixSocket, httpServerAddress string, logger *log.Logger) (*debug.Server, error) { + dbgSrv := debug.NewServer("/debug", unixSocket, httpServerAddress, logger) dbgSrv.RegisterHandler(http.MethodGet, "/", func(_ *http.Request) (interface{}, error) { return "debug endpoint is alive", nil diff --git a/pkg/shell-operator/http_server.go b/pkg/shell-operator/http_server.go index 2d145fad..d9a9be31 100644 --- a/pkg/shell-operator/http_server.go +++ b/pkg/shell-operator/http_server.go @@ -8,9 +8,9 @@ import ( "strings" "time" + "github.com/deckhouse/deckhouse/pkg/log" "github.com/go-chi/chi/v5" "github.com/go-chi/chi/v5/middleware" - log "github.com/sirupsen/logrus" "github.com/flant/shell-operator/pkg/app" ) @@ -77,9 +77,9 @@ func newBaseHTTPServer(address, port string) *baseHTTPServer { // inject pprof router.Mount("/debug", middleware.Profiler()) - router.Get("/discovery", func(writer http.ResponseWriter, request *http.Request) { + router.Get("/discovery", func(writer http.ResponseWriter, _ *http.Request) { buf := bytes.NewBuffer(nil) - walkFn := func(method string, route string, handler http.Handler, middlewares ...func(http.Handler) http.Handler) error { + walkFn := func(method string, route string, _ http.Handler, _ ...func(http.Handler) http.Handler) error { // skip pprof routes if strings.HasPrefix(route, "/debug/") { return nil @@ -110,7 +110,7 @@ func newBaseHTTPServer(address, port string) *baseHTTPServer { } func registerRootRoute(op *ShellOperator) { - op.APIServer.RegisterRoute(http.MethodGet, "/", func(writer http.ResponseWriter, request *http.Request) { + op.APIServer.RegisterRoute(http.MethodGet, "/", func(writer http.ResponseWriter, _ *http.Request) { _, _ = fmt.Fprintf(writer, ` Shell operator diff --git a/pkg/shell-operator/kube_client.go b/pkg/shell-operator/kube_client.go index 3095cb96..59287fb4 100644 --- a/pkg/shell-operator/kube_client.go +++ b/pkg/shell-operator/kube_client.go @@ -3,6 +3,8 @@ package shell_operator import ( "fmt" + "github.com/deckhouse/deckhouse/pkg/log" + klient "github.com/flant/kube-client/client" "github.com/flant/shell-operator/pkg/app" "github.com/flant/shell-operator/pkg/kube/object_patch" @@ -50,11 +52,11 @@ func defaultObjectPatcherKubeClient(metricStorage *metric_storage.MetricStorage, return client } -func initDefaultObjectPatcher(metricStorage *metric_storage.MetricStorage) (*object_patch.ObjectPatcher, error) { +func initDefaultObjectPatcher(metricStorage *metric_storage.MetricStorage, logger *log.Logger) (*object_patch.ObjectPatcher, error) { patcherKubeClient := defaultObjectPatcherKubeClient(metricStorage, defaultObjectPatcherKubeClientMetricLabels) err := patcherKubeClient.Init() if err != nil { return nil, fmt.Errorf("initialize Kubernetes client for Object patcher: %s\n", err) } - return object_patch.NewObjectPatcher(patcherKubeClient), nil + return object_patch.NewObjectPatcher(patcherKubeClient, logger), nil } diff --git a/pkg/shell-operator/manager_events_handler.go b/pkg/shell-operator/manager_events_handler.go index 63e53aba..643c8de0 100644 --- a/pkg/shell-operator/manager_events_handler.go +++ b/pkg/shell-operator/manager_events_handler.go @@ -3,7 +3,7 @@ package shell_operator import ( "context" - log "github.com/sirupsen/logrus" + "github.com/deckhouse/deckhouse/pkg/log" "github.com/flant/shell-operator/pkg/kube_events_manager" . "github.com/flant/shell-operator/pkg/kube_events_manager/types" @@ -16,6 +16,8 @@ type managerEventsHandlerConfig struct { tqs *queue.TaskQueueSet mgr kube_events_manager.KubeEventsManager smgr schedule_manager.ScheduleManager + + logger *log.Logger } type ManagerEventsHandler struct { @@ -29,6 +31,8 @@ type ManagerEventsHandler struct { scheduleCb func(crontab string) []task.Task taskQueues *queue.TaskQueueSet + + logger *log.Logger } func newManagerEventsHandler(ctx context.Context, cfg *managerEventsHandlerConfig) *ManagerEventsHandler { @@ -43,6 +47,7 @@ func newManagerEventsHandler(ctx context.Context, cfg *managerEventsHandlerConfi scheduleManager: cfg.smgr, kubeEventsManager: cfg.mgr, taskQueues: cfg.tqs, + logger: cfg.logger, } } @@ -63,7 +68,7 @@ func (m *ManagerEventsHandler) Start() { go func() { for { var tailTasks []task.Task - logEntry := log.WithField("operator.component", "handleEvents") + logEntry := m.logger.With("operator.component", "handleEvents") select { case crontab := <-m.scheduleManager.Ch(): if m.scheduleCb != nil { diff --git a/pkg/shell-operator/metrics_hooks.go b/pkg/shell-operator/metrics_hooks.go index b0af5c19..24e2bfa5 100644 --- a/pkg/shell-operator/metrics_hooks.go +++ b/pkg/shell-operator/metrics_hooks.go @@ -8,7 +8,7 @@ import ( ) func (op *ShellOperator) setupHookMetricStorage() { - metricStorage := metric_storage.NewMetricStorage(op.ctx, app.PrometheusMetricsPrefix, true) + metricStorage := metric_storage.NewMetricStorage(op.ctx, app.PrometheusMetricsPrefix, true, op.logger.Named("metric-storage")) op.APIServer.RegisterRoute(http.MethodGet, "/metrics/hooks", metricStorage.Handler().ServeHTTP) // create new metric storage for hooks diff --git a/pkg/shell-operator/metrics_operator.go b/pkg/shell-operator/metrics_operator.go index 18e55c87..021335a9 100644 --- a/pkg/shell-operator/metrics_operator.go +++ b/pkg/shell-operator/metrics_operator.go @@ -9,7 +9,7 @@ import ( // setupMetricStorage creates and initializes metrics storage for built-in operator metrics func (op *ShellOperator) setupMetricStorage(kubeEventsManagerLabels map[string]string) { - metricStorage := metric_storage.NewMetricStorage(op.ctx, app.PrometheusMetricsPrefix, false) + metricStorage := metric_storage.NewMetricStorage(op.ctx, app.PrometheusMetricsPrefix, false, op.logger.Named("metric-storage")) registerCommonMetrics(metricStorage) registerTaskQueueMetrics(metricStorage) diff --git a/pkg/shell-operator/operator.go b/pkg/shell-operator/operator.go index a3ed4321..73306142 100644 --- a/pkg/shell-operator/operator.go +++ b/pkg/shell-operator/operator.go @@ -5,8 +5,8 @@ import ( "fmt" "time" + "github.com/deckhouse/deckhouse/pkg/log" "github.com/gofrs/uuid/v5" - log "github.com/sirupsen/logrus" v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" klient "github.com/flant/kube-client/client" @@ -34,6 +34,8 @@ type ShellOperator struct { ctx context.Context cancel context.CancelFunc + logger *log.Logger + // APIServer common http server for liveness and metrics endpoints APIServer *baseHTTPServer @@ -57,15 +59,35 @@ type ShellOperator struct { ConversionWebhookManager *conversion.WebhookManager } -func NewShellOperator(ctx context.Context) *ShellOperator { +type Option func(operator *ShellOperator) + +func WithLogger(logger *log.Logger) Option { + return func(operator *ShellOperator) { + operator.logger = logger + } +} + +func NewShellOperator(ctx context.Context, opts ...Option) *ShellOperator { if ctx == nil { ctx = context.Background() } + cctx, cancel := context.WithCancel(ctx) - return &ShellOperator{ + + so := &ShellOperator{ ctx: cctx, cancel: cancel, } + + for _, opt := range opts { + opt(so) + } + + if so.logger == nil { + so.logger = log.NewLogger(log.Options{}).Named("shell-operator") + } + + return so } // Start run the operator @@ -115,7 +137,7 @@ func (op *ShellOperator) initHookManager() (err error) { "event.id": uuid.Must(uuid.NewV4()).String(), "binding": string(types.OnKubernetesEvent), } - logEntry := log.WithFields(utils.LabelsToLogFields(logLabels)) + logEntry := utils.EnrichLoggerWithLabels(op.logger, logLabels) logEntry.Debugf("Create tasks for 'kubernetes' event '%s'", kubeEvent.String()) var tasks []task.Task @@ -133,7 +155,7 @@ func (op *ShellOperator) initHookManager() (err error) { WithQueueName(info.QueueName) tasks = append(tasks, newTask.WithQueuedAt(time.Now())) - logEntry.WithField("queue", info.QueueName). + logEntry.With("queue", info.QueueName). Infof("queue task %s", newTask.GetDescription()) }) @@ -144,7 +166,7 @@ func (op *ShellOperator) initHookManager() (err error) { "event.id": uuid.Must(uuid.NewV4()).String(), "binding": string(types.Schedule), } - logEntry := log.WithFields(utils.LabelsToLogFields(logLabels)) + logEntry := utils.EnrichLoggerWithLabels(op.logger, logLabels) logEntry.Debugf("Create tasks for 'schedule' event '%s'", crontab) var tasks []task.Task @@ -162,7 +184,7 @@ func (op *ShellOperator) initHookManager() (err error) { WithQueueName(info.QueueName) tasks = append(tasks, newTask.WithQueuedAt(time.Now())) - logEntry.WithField("queue", info.QueueName). + logEntry.With("queue", info.QueueName). Infof("queue task %s", newTask.GetDescription()) }) @@ -209,7 +231,7 @@ func (op *ShellOperator) initValidatingWebhookManager() (err error) { "event.id": uuid.Must(uuid.NewV4()).String(), "event": string(eventBindingType), } - logEntry := log.WithFields(utils.LabelsToLogFields(logLabels)) + logEntry := utils.EnrichLoggerWithLabels(op.logger, logLabels) logEntry.Debugf("Handle '%s' event '%s' '%s'", eventBindingType, event.ConfigurationId, event.WebhookId) var admissionTask task.Task @@ -297,7 +319,7 @@ func (op *ShellOperator) conversionEventHandler(crdName string, request *v1.Conv "event.id": uuid.Must(uuid.NewV4()).String(), "binding": string(types.KubernetesConversion), } - logEntry := log.WithFields(utils.LabelsToLogFields(logLabels)) + logEntry := utils.EnrichLoggerWithLabels(op.logger, logLabels) sourceVersions := conversion.ExtractAPIVersions(request.Objects) logEntry.Infof("Handle '%s' event for crd/%s: %d objects with versions %v", string(types.KubernetesConversion), crdName, len(request.Objects), sourceVersions) @@ -382,7 +404,7 @@ func (op *ShellOperator) conversionEventHandler(crdName string, request *v1.Conv // taskHandler func (op *ShellOperator) taskHandler(t task.Task) queue.TaskResult { - logEntry := log.WithField("operator.component", "taskRunner") + logEntry := op.logger.With("operator.component", "taskRunner") hookMeta := task_metadata.HookMetadataAccessor(t) var res queue.TaskResult @@ -400,7 +422,7 @@ func (op *ShellOperator) taskHandler(t task.Task) queue.TaskResult { hookLogLabels["task"] = "EnableScheduleBindings" hookLogLabels["queue"] = "main" - taskLogEntry := logEntry.WithFields(utils.LabelsToLogFields(hookLogLabels)) + taskLogEntry := utils.EnrichLoggerWithLabels(logEntry, hookLogLabels) taskHook := op.HookManager.GetHook(hookMeta.HookName) taskHook.HookController.EnableScheduleBindings() @@ -429,7 +451,7 @@ func (op *ShellOperator) taskHandleEnableKubernetesBindings(t task.Task) queue.T hookLogLabels["task"] = "EnableKubernetesBindings" hookLogLabels["queue"] = "main" - taskLogEntry := log.WithFields(utils.LabelsToLogFields(hookLogLabels)) + taskLogEntry := utils.EnrichLoggerWithLabels(op.logger, hookLogLabels) taskLogEntry.Info("Enable kubernetes binding for hook") @@ -511,7 +533,7 @@ func (op *ShellOperator) taskHandleHookRun(t task.Task) queue.TaskResult { hookLogLabels["task"] = "HookRun" hookLogLabels["queue"] = t.GetQueueName() - taskLogEntry := log.WithFields(utils.LabelsToLogFields(hookLogLabels)) + taskLogEntry := utils.EnrichLoggerWithLabels(op.logger, hookLogLabels) isSynchronization := hookMeta.IsSynchronization() shouldRunHook := true @@ -536,7 +558,7 @@ func (op *ShellOperator) taskHandleHookRun(t task.Task) queue.TaskResult { } } if shouldCombine { - combineResult := combineBindingContextForHook(op.TaskQueues, op.TaskQueues.GetByName(t.GetQueueName()), t, nil) + combineResult := op.combineBindingContextForHook(op.TaskQueues, op.TaskQueues.GetByName(t.GetQueueName()), t, nil) if combineResult != nil { hookMeta.BindingContext = combineResult.BindingContexts // Extra monitor IDs can be returned if several Synchronization for Group are combined. @@ -592,7 +614,7 @@ func (op *ShellOperator) taskHandleHookRun(t task.Task) queue.TaskResult { return res } -func (op *ShellOperator) handleRunHook(t task.Task, taskHook *hook.Hook, hookMeta task_metadata.HookMetadata, taskLogEntry *log.Entry, hookLogLabels map[string]string, metricLabels map[string]string) error { +func (op *ShellOperator) handleRunHook(t task.Task, taskHook *hook.Hook, hookMeta task_metadata.HookMetadata, taskLogEntry *log.Logger, hookLogLabels map[string]string, metricLabels map[string]string) error { for _, info := range taskHook.HookController.SnapshotsInfo() { taskLogEntry.Debugf("snapshot info: %s", info) } @@ -769,7 +791,7 @@ func (op *ShellOperator) CombineBindingContextForHook(q *queue.TaskQueue, t task // bootstrapMainQueue adds tasks to run hooks with OnStartup bindings // and tasks to enable kubernetes bindings. func (op *ShellOperator) bootstrapMainQueue(tqs *queue.TaskQueueSet) { - logEntry := log.WithField("operator.component", "initMainQueue") + logEntry := op.logger.With("operator.component", "initMainQueue") // Prepopulate main queue with 'onStartup' tasks and 'enable kubernetes bindings' tasks. tqs.WithMainName("main") diff --git a/pkg/shell-operator/operator_test.go b/pkg/shell-operator/operator_test.go index 71d711c0..5e8ee610 100644 --- a/pkg/shell-operator/operator_test.go +++ b/pkg/shell-operator/operator_test.go @@ -4,6 +4,7 @@ import ( "context" "testing" + "github.com/deckhouse/deckhouse/pkg/log" . "github.com/onsi/gomega" . "github.com/flant/shell-operator/pkg/hook/task_metadata" @@ -18,7 +19,7 @@ func Test_Operator_startup_tasks(t *testing.T) { hooksDir, err := utils.RequireExistingDirectory("testdata/startup_tasks/hooks") g.Expect(err).ShouldNot(HaveOccurred()) - op := NewShellOperator(context.Background()) + op := NewShellOperator(context.Background(), WithLogger(log.NewNop())) op.SetupEventManagers() op.setupHookManagers(hooksDir, "") diff --git a/pkg/task/dump/dump_test.go b/pkg/task/dump/dump_test.go index dd571e07..062d97e6 100644 --- a/pkg/task/dump/dump_test.go +++ b/pkg/task/dump/dump_test.go @@ -74,7 +74,7 @@ func Test_Dump(t *testing.T) { tqs.WithContext(context.Background()) // Empty set, no queues, dump should not fail. - t.Run("empty set", func(t *testing.T) { + t.Run("empty set", func(_ *testing.T) { dump := testDumpQueuesWrapper(tqs, "text", true) g.Expect(dump).To(ContainSubstring("No queues")) }) @@ -94,7 +94,7 @@ func Test_Dump(t *testing.T) { g.Expect(dump).To(MatchJSON(`{"active":[],"summary":{"mainQueueTasks":0,"otherQueues":{"active":0,"empty":0,"tasks":0},"totalTasks":0}}`)) }) - t.Run("main queue as active", func(t *testing.T) { + t.Run("main queue as active", func(_ *testing.T) { fillQueue(tqs.GetMain(), mainTasks) // Main queue is not counted as active or empty queue. @@ -104,7 +104,7 @@ func Test_Dump(t *testing.T) { }) // Create and fill active queue. - t.Run("fill active queue", func(t *testing.T) { + t.Run("fill active queue", func(_ *testing.T) { tqs.NewNamedQueue("active-queue", nil) fillQueue(tqs.GetByName("active-queue"), activeTasks) @@ -128,7 +128,7 @@ func Test_Dump(t *testing.T) { g.Expect(dump).To(MatchJSON(`{"active":[{"name":"main","tasksCount":5,"tasks":[{"index":1,"description":":::test_task_main_0004"},{"index":2,"description":":::test_task_main_0003"},{"index":3,"description":":::test_task_main_0002"},{"index":4,"description":":::test_task_main_0001"},{"index":5,"description":":::test_task_main_0000"}]},{"name":"active-queue","tasksCount":4,"tasks":[{"index":1,"description":":::test_task_active-queue_0003"},{"index":2,"description":":::test_task_active-queue_0002"},{"index":3,"description":":::test_task_active-queue_0001"},{"index":4,"description":":::test_task_active-queue_0000"}]}],"empty":[{"name":"empty","tasksCount":0}],"summary":{"mainQueueTasks":5,"otherQueues":{"active":1,"empty":1,"tasks":4},"totalTasks":9}}`)) }) - t.Run("omit empty queue", func(t *testing.T) { + t.Run("omit empty queue", func(_ *testing.T) { dump := testDumpQueuesWrapper(tqs, "text", false) g.Expect(dump).To(ContainSubstring("1 active")) g.Expect(dump).ToNot(ContainSubstring("Empty queues (1):")) diff --git a/pkg/task/queue/task_queue.go b/pkg/task/queue/task_queue.go index a976d0b7..32019550 100644 --- a/pkg/task/queue/task_queue.go +++ b/pkg/task/queue/task_queue.go @@ -8,7 +8,7 @@ import ( "sync" "time" - log "github.com/sirupsen/logrus" + "github.com/deckhouse/deckhouse/pkg/log" "github.com/flant/shell-operator/pkg/metric_storage" "github.com/flant/shell-operator/pkg/task" diff --git a/pkg/task/queue/task_queue_test.go b/pkg/task/queue/task_queue_test.go index 7f967dc2..52d5d645 100644 --- a/pkg/task/queue/task_queue_test.go +++ b/pkg/task/queue/task_queue_test.go @@ -114,7 +114,7 @@ func Test_ExponentialBackoff(t *testing.T) { // Set exponential backoff to the constant delay just to wait more than DelayOnQueueIsEmpty. // It is a test of delaying between task runs, not a test of exponential distribution. mockExponentialDelay := 30 * time.Millisecond - q.ExponentialBackoffFn = func(failureCount int) time.Duration { + q.ExponentialBackoffFn = func(_ int) time.Duration { return mockExponentialDelay } @@ -201,7 +201,7 @@ func Test_CancelDelay(t *testing.T) { // Set exponential backoff to the constant delay just to wait more than DelayOnQueueIsEmpty. // It is a test of delaying between task runs, not a test of exponential distribution. mockExponentialDelay := 150 * time.Millisecond - q.ExponentialBackoffFn = func(failureCount int) time.Duration { + q.ExponentialBackoffFn = func(_ int) time.Duration { return mockExponentialDelay } diff --git a/pkg/utils/exponential_backoff/delay.go b/pkg/utils/exponential_backoff/delay.go index 6178228b..ccb87799 100644 --- a/pkg/utils/exponential_backoff/delay.go +++ b/pkg/utils/exponential_backoff/delay.go @@ -2,7 +2,7 @@ package exponential_backoff import ( "math" - "math/rand" + "math/rand/v2" "time" ) @@ -52,7 +52,7 @@ func CalculateDelayWithMax(initialDelay time.Duration, maxDelay time.Duration, r } // Random addition to delay. - rndDelayNs := rand.Int63n(ExponentialDelayRandomMs) * int64(time.Millisecond) + rndDelayNs := rand.Int64N(ExponentialDelayRandomMs) * int64(time.Millisecond) delay := initialDelay + time.Duration(delayNs) + time.Duration(rndDelayNs) delay = delay.Truncate(100 * time.Millisecond) diff --git a/pkg/utils/exponential_backoff/delay_test.go b/pkg/utils/exponential_backoff/delay_test.go index b731febe..d43256a6 100644 --- a/pkg/utils/exponential_backoff/delay_test.go +++ b/pkg/utils/exponential_backoff/delay_test.go @@ -1,7 +1,6 @@ package exponential_backoff import ( - "math/rand" "testing" "time" ) @@ -9,7 +8,6 @@ import ( func Test_Delay(t *testing.T) { var initialSeconds int64 = 5 initialDelay := time.Duration(5) * time.Second - rand.Seed(time.Now().UnixNano()) for i := 0; i < 64; i++ { delay := CalculateDelay(initialDelay, i) seconds := int64(delay.Seconds()) diff --git a/pkg/utils/file/file.go b/pkg/utils/file/file.go index 479564fd..5c11c94e 100644 --- a/pkg/utils/file/file.go +++ b/pkg/utils/file/file.go @@ -5,7 +5,7 @@ import ( "path/filepath" "strings" - log "github.com/sirupsen/logrus" + "github.com/deckhouse/deckhouse/pkg/log" ) // FileExists returns true if path exists diff --git a/pkg/utils/file/file_test.go b/pkg/utils/file/file_test.go index a4abf821..20b9c49e 100644 --- a/pkg/utils/file/file_test.go +++ b/pkg/utils/file/file_test.go @@ -7,8 +7,10 @@ import ( "path/filepath" "strings" "testing" + "time" - log "github.com/sirupsen/logrus" + "github.com/deckhouse/deckhouse/pkg/log" + "github.com/stretchr/testify/assert" ) func prepareTestDirTree() (string, error) { @@ -119,23 +121,29 @@ func TestRecursiveCheckLibDirectory(t *testing.T) { } for _, tt := range tests { var buf bytes.Buffer - log.SetOutput(&buf) - formatter := new(log.TextFormatter) - formatter.DisableColors = true - formatter.DisableTimestamp = true - log.SetFormatter(formatter) + logger := log.NewLogger(log.Options{ + Output: &buf, + TimeFunc: func(_ time.Time) time.Time { + parsedTime, err := time.Parse(time.DateTime, "2006-01-02 15:04:05") + if err != nil { + assert.NoError(t, err) + } + + return parsedTime + }, + }) + + log.SetDefault(logger) t.Run(tt.name, func(t *testing.T) { if err := RecursiveCheckLibDirectory(tt.args.dir); (err != nil) != tt.wantErr { t.Errorf("RecursiveCheckLibDirectory() error = %v, wantErr %v", err, tt.wantErr) } - if strings.Compare( - strings.TrimSpace(buf.String()), - `level=warning msg="File '/lib.py' has executable permissions and is located in the ignored 'lib' directory"`) != 0 { - t.Errorf("RecursiveCheckLibDirectory() error, got `%v`", buf.String()) - } + assert.Equal(t, + buf.String(), + `{"level":"warn","msg":"File '/lib.py' has executable permissions and is located in the ignored 'lib' directory","time":"2006-01-02T15:04:05Z"}`+"\n") }) } diff --git a/pkg/utils/labels/labels.go b/pkg/utils/labels/labels.go index 0743773d..47dabd52 100644 --- a/pkg/utils/labels/labels.go +++ b/pkg/utils/labels/labels.go @@ -1,9 +1,10 @@ package utils import ( + "log/slog" "sort" - log "github.com/sirupsen/logrus" + "github.com/deckhouse/deckhouse/pkg/log" ) // MergeLabels merges several maps into one. Last map keys overrides keys from first maps. @@ -19,14 +20,16 @@ func MergeLabels(labelsMaps ...map[string]string) map[string]string { return labels } -func LabelsToLogFields(labelsMaps ...map[string]string) log.Fields { - fields := log.Fields{} +func EnrichLoggerWithLabels(logger *log.Logger, labelsMaps ...map[string]string) *log.Logger { + loggerEntry := logger + for _, labels := range labelsMaps { for k, v := range labels { - fields[k] = v + loggerEntry = loggerEntry.With(slog.String(k, v)) } } - return fields + + return loggerEntry } // LabelNames returns sorted label keys diff --git a/pkg/utils/signal/signal.go b/pkg/utils/signal/signal.go index c710e943..1000f795 100644 --- a/pkg/utils/signal/signal.go +++ b/pkg/utils/signal/signal.go @@ -5,7 +5,7 @@ import ( "os/signal" "syscall" - log "github.com/sirupsen/logrus" + "github.com/deckhouse/deckhouse/pkg/log" ) // WaitForProcessInterruption wait for SIGINT or SIGTERM and run a callback function. diff --git a/pkg/utils/structured-logger/structured-logger.go b/pkg/utils/structured-logger/structured-logger.go deleted file mode 100644 index 2383f475..00000000 --- a/pkg/utils/structured-logger/structured-logger.go +++ /dev/null @@ -1,85 +0,0 @@ -package structured_logger - -import ( - "fmt" - "net/http" - "time" - - "github.com/go-chi/chi/v5/middleware" - "github.com/sirupsen/logrus" -) - -// StructuredLogger is a simple, but powerful implementation of a custom structured -// logger backed on logrus. It is adapted from https://github.com/go-chi/chi -// 'logging' example. - -func NewStructuredLogger(logger *logrus.Logger, componentLabel string) func(next http.Handler) http.Handler { - return middleware.RequestLogger(&StructuredLogger{ - logger, - componentLabel, - }) -} - -type StructuredLogger struct { - Logger *logrus.Logger - ComponentLabel string -} - -func (l *StructuredLogger) NewLogEntry(r *http.Request) middleware.LogEntry { - entry := &StructuredLoggerEntry{Logger: logrus.NewEntry(l.Logger)} - logFields := logrus.Fields{} - logFields["operator.component"] = l.ComponentLabel - logFields["http_method"] = r.Method - logFields["uri"] = r.RequestURI - - entry.Logger = entry.Logger.WithFields(logFields) - // entry.Logger.Infoln("request started") - - return entry -} - -type StructuredLoggerEntry struct { - Logger logrus.FieldLogger -} - -func (l *StructuredLoggerEntry) Write(status, bytes int, _ http.Header, elapsed time.Duration, _ interface{}) { - l.Logger = l.Logger.WithFields(logrus.Fields{ - "resp_status": status, - "resp_bytes_length": bytes, - "resp_elapsed_ms": float64(elapsed.Truncate(10*time.Microsecond)) / 100.0, - }) - - l.Logger.Infoln("complete") -} - -// This will log panics to log -func (l *StructuredLoggerEntry) Panic(v interface{}, stack []byte) { - l.Logger = l.Logger.WithFields(logrus.Fields{ - "stack": string(stack), - "panic": fmt.Sprintf("%+v", v), - }) -} - -// Helper methods used by the application to get the request-scoped -// logger entry and set additional fields between handlers. -// -// This is a useful pattern to use to set state on the entry as it -// passes through the handler chain, which at any point can be logged -// with a call to .Print(), .Info(), etc. - -func GetLogEntry(r *http.Request) logrus.FieldLogger { - entry := middleware.GetLogEntry(r).(*StructuredLoggerEntry) - return entry.Logger -} - -func LogEntrySetField(r *http.Request, key string, value interface{}) { - if entry, ok := r.Context().Value(middleware.LogEntryCtxKey).(*StructuredLoggerEntry); ok { - entry.Logger = entry.Logger.WithField(key, value) - } -} - -func LogEntrySetFields(r *http.Request, fields map[string]interface{}) { - if entry, ok := r.Context().Value(middleware.LogEntryCtxKey).(*StructuredLoggerEntry); ok { - entry.Logger = entry.Logger.WithFields(fields) - } -} diff --git a/pkg/utils/structured-logger/structured_logger.go b/pkg/utils/structured-logger/structured_logger.go new file mode 100644 index 00000000..10d4d490 --- /dev/null +++ b/pkg/utils/structured-logger/structured_logger.go @@ -0,0 +1,87 @@ +package structuredlogger + +import ( + "fmt" + "log/slog" + "net/http" + "time" + + "github.com/deckhouse/deckhouse/pkg/log" + "github.com/go-chi/chi/v5/middleware" +) + +// StructuredLogger is a simple, but powerful implementation of a custom structured +// logger backed on slog. It is adapted from https://github.com/go-chi/chi +// 'logging' example. + +func NewStructuredLogger(logger *log.Logger, componentLabel string) func(next http.Handler) http.Handler { + return middleware.RequestLogger(&StructuredLogger{ + logger, + componentLabel, + }) +} + +type StructuredLogger struct { + Logger *log.Logger + ComponentLabel string +} + +func (l *StructuredLogger) NewLogEntry(r *http.Request) middleware.LogEntry { + entry := &StructuredLoggerEntry{Logger: log.NewLogger(log.Options{})} + + entry.Logger = entry.Logger.With( + // TODO: make snake_case + slog.String("operator.component", l.ComponentLabel), + slog.String("http_method", r.Method), + slog.String("uri", r.RequestURI), + ) + // entry.Logger.Info("request started") + + return entry +} + +type StructuredLoggerEntry struct { + Logger *log.Logger +} + +func (l *StructuredLoggerEntry) Write(status, bytes int, _ http.Header, elapsed time.Duration, _ interface{}) { + l.Logger = l.Logger.With( + slog.Int("resp_status", status), + slog.Int("resp_bytes_length", bytes), + slog.Float64("resp_elapsed_ms", float64(elapsed.Truncate(10*time.Microsecond))/100.0), + ) + + l.Logger.Info("complete") +} + +// This will log panics to log +func (l *StructuredLoggerEntry) Panic(v interface{}, stack []byte) { + l.Logger = l.Logger.With( + slog.String("stack", string(stack)), + slog.String("panic", fmt.Sprintf("%+v", v)), + ) +} + +// Helper methods used by the application to get the request-scoped +// logger entry and set additional fields between handlers. +// +// This is a useful pattern to use to set state on the entry as it +// passes through the handler chain, which at any point can be logged +// with a call to .Print(), .Info(), etc. + +func GetLogEntry(r *http.Request) *log.Logger { + entry := middleware.GetLogEntry(r).(*StructuredLoggerEntry) + return entry.Logger +} + +// func LogEntrySetField(r *http.Request, key string, value interface{}) { +// if entry, ok := r.Context().Value(middleware.LogEntryCtxKey).(*StructuredLoggerEntry); ok { +// entry.Logger = entry.Logger.WithField(key, value) +// } +// } + +// func LogEntrySetFields(r *http.Request, fields map[string]interface{}) { +// if entry, ok := r.Context().Value(middleware.LogEntryCtxKey).(*StructuredLoggerEntry); ok { +// entry.Logger = entry.Logger.WithFields(fields) +// } +// } diff --git a/pkg/webhook/admission/handler.go b/pkg/webhook/admission/handler.go index 44c3d296..e5a56482 100644 --- a/pkg/webhook/admission/handler.go +++ b/pkg/webhook/admission/handler.go @@ -3,16 +3,17 @@ package admission import ( "encoding/json" "fmt" + "log/slog" "net/http" "strings" + "github.com/deckhouse/deckhouse/pkg/log" "github.com/go-chi/chi/v5" "github.com/go-chi/chi/v5/middleware" - log "github.com/sirupsen/logrus" v1 "k8s.io/api/admission/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - structured_logger "github.com/flant/shell-operator/pkg/utils/structured-logger" + structuredLogger "github.com/flant/shell-operator/pkg/utils/structured-logger" ) type EventHandlerFn func(event Event) (*Response, error) @@ -35,7 +36,7 @@ func NewWebhookHandler() *WebhookHandler { }) rtr.Group(func(r chi.Router) { - r.Use(structured_logger.NewStructuredLogger(log.StandardLogger(), "admissionWebhook")) + r.Use(structuredLogger.NewStructuredLogger(log.NewLogger(log.Options{}).Named("admissionWebhook"), "admissionWebhook")) r.Use(middleware.Recoverer) r.Use(middleware.AllowContentType("application/json")) r.Post("/*", h.serveReviewRequest) @@ -63,7 +64,7 @@ func (h *WebhookHandler) serveReviewRequest(w http.ResponseWriter, r *http.Reque admissionResponse, err := h.handleReviewRequest(r.URL.Path, admissionReview.Request) if err != nil { - log.Error(err, "validation failed", "request", admissionReview.Request.UID) + log.Error("validation failed", "request", admissionReview.Request.UID, slog.String("error", err.Error())) admissionReview.Response = errored(err) } else { admissionReview.Response = admissionResponse diff --git a/pkg/webhook/admission/manager.go b/pkg/webhook/admission/manager.go index 5c9e18d6..b34459c0 100644 --- a/pkg/webhook/admission/manager.go +++ b/pkg/webhook/admission/manager.go @@ -3,7 +3,7 @@ package admission import ( "os" - log "github.com/sirupsen/logrus" + "github.com/deckhouse/deckhouse/pkg/log" klient "github.com/flant/kube-client/client" "github.com/flant/shell-operator/pkg/webhook/server" diff --git a/pkg/webhook/admission/resource.go b/pkg/webhook/admission/resource.go index 5932d6eb..d1df11b7 100644 --- a/pkg/webhook/admission/resource.go +++ b/pkg/webhook/admission/resource.go @@ -4,7 +4,7 @@ import ( "context" "strings" - log "github.com/sirupsen/logrus" + "github.com/deckhouse/deckhouse/pkg/log" v1 "k8s.io/api/admissionregistration/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/pkg/webhook/admission/response_test.go b/pkg/webhook/admission/response_test.go index 66fc2058..a07c24b7 100644 --- a/pkg/webhook/admission/response_test.go +++ b/pkg/webhook/admission/response_test.go @@ -4,7 +4,6 @@ import "testing" func Test_AdmissionResponseFromFile_Allowed(t *testing.T) { r, err := ResponseFromFile("testdata/response/good_allow.json") - if err != nil { t.Fatalf("ValidatingResponse should be loaded from file: %v", err) } @@ -20,7 +19,6 @@ func Test_AdmissionResponseFromFile_Allowed(t *testing.T) { func Test_AdmissionResponseFromFile_AllowedWithWarnings(t *testing.T) { r, err := ResponseFromFile("testdata/response/good_allow_warnings.json") - if err != nil { t.Fatalf("ValidatingResponse should be loaded from file: %v", err) } @@ -40,7 +38,6 @@ func Test_AdmissionResponseFromFile_AllowedWithWarnings(t *testing.T) { func Test_AdmissionResponseFromFile_NotAllowed_WithMessage(t *testing.T) { r, err := ResponseFromFile("testdata/response/good_deny.json") - if err != nil { t.Fatalf("ValidatingResponse should be loaded from file: %v", err) } @@ -60,7 +57,6 @@ func Test_AdmissionResponseFromFile_NotAllowed_WithMessage(t *testing.T) { func Test_AdmissionResponseFromFile_NotAllowed_WithoutMessage(t *testing.T) { r, err := ResponseFromFile("testdata/response/good_deny_quiet.json") - if err != nil { t.Fatalf("ValidatingResponse should be loaded from file: %v", err) } diff --git a/pkg/webhook/conversion/handler.go b/pkg/webhook/conversion/handler.go index 5fd15adc..dcff8ae6 100644 --- a/pkg/webhook/conversion/handler.go +++ b/pkg/webhook/conversion/handler.go @@ -2,18 +2,20 @@ package conversion import ( "encoding/json" + "errors" "fmt" + "log/slog" "net/http" "strings" + "github.com/deckhouse/deckhouse/pkg/log" "github.com/go-chi/chi/v5" "github.com/go-chi/chi/v5/middleware" - log "github.com/sirupsen/logrus" v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - structured_logger "github.com/flant/shell-operator/pkg/utils/structured-logger" + structuredLogger "github.com/flant/shell-operator/pkg/utils/structured-logger" ) type WebhookHandler struct { @@ -34,7 +36,7 @@ func NewWebhookHandler() *WebhookHandler { }) rtr.Group(func(r chi.Router) { - r.Use(structured_logger.NewStructuredLogger(log.StandardLogger(), "conversionWebhook")) + r.Use(structuredLogger.NewStructuredLogger(log.NewLogger(log.Options{}).Named("conversionWebhook"), "conversionWebhook")) r.Use(middleware.Recoverer) r.Use(middleware.AllowContentType("application/json")) r.Post("/*", h.serveReviewRequest) @@ -65,7 +67,7 @@ func (h *WebhookHandler) serveReviewRequest(w http.ResponseWriter, r *http.Reque conversionResponse, err := h.handleReviewRequest(crdName, convertReview.Request) if err != nil { - log.Error(err, "failed to convert", "request", convertReview.Request.UID) + log.Error("failed to convert", "request", convertReview.Request.UID, slog.String("error", err.Error())) convertReview.Response = errored(err) } else { convertReview.Response = conversionResponse @@ -97,7 +99,7 @@ func (h *WebhookHandler) handleReviewRequest(crdName string, request *v1.Convers } if conversionResponse.FailedMessage != "" { - return nil, fmt.Errorf(conversionResponse.FailedMessage) + return nil, errors.New(conversionResponse.FailedMessage) } if len(request.Objects) != len(conversionResponse.ConvertedObjects) { @@ -109,7 +111,8 @@ func (h *WebhookHandler) handleReviewRequest(crdName string, request *v1.Convers UID: request.UID, Result: metav1.Status{ Status: metav1.StatusSuccess, - }}, nil + }, + }, nil } // detectCrdName extracts crdName from the url path. diff --git a/pkg/webhook/conversion/manager.go b/pkg/webhook/conversion/manager.go index 1988ab0a..44f5287f 100644 --- a/pkg/webhook/conversion/manager.go +++ b/pkg/webhook/conversion/manager.go @@ -2,10 +2,11 @@ package conversion import ( "context" - log "github.com/sirupsen/logrus" - v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "os" + "github.com/deckhouse/deckhouse/pkg/log" + v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + klient "github.com/flant/kube-client/client" "github.com/flant/shell-operator/pkg/webhook/server" ) diff --git a/pkg/webhook/server/server.go b/pkg/webhook/server/server.go index b1f1b3b7..b9301d4f 100644 --- a/pkg/webhook/server/server.go +++ b/pkg/webhook/server/server.go @@ -9,8 +9,8 @@ import ( "os" "time" + "github.com/deckhouse/deckhouse/pkg/log" "github.com/go-chi/chi/v5" - log "github.com/sirupsen/logrus" ) type WebhookServer struct { diff --git a/test/hook/context/generator.go b/test/hook/context/generator.go index ce4b56c1..a786237e 100644 --- a/test/hook/context/generator.go +++ b/test/hook/context/generator.go @@ -6,7 +6,7 @@ import ( "sync" "time" - "github.com/sirupsen/logrus" + "github.com/deckhouse/deckhouse/pkg/log" "github.com/flant/kube-client/fake" "github.com/flant/shell-operator/pkg/hook" @@ -40,10 +40,12 @@ type BindingContextController struct { mu sync.Mutex started bool + + logger *log.Logger } -func NewBindingContextController(config string, version ...fake.ClusterVersion) *BindingContextController { - logrus.SetLevel(logrus.ErrorLevel) +func NewBindingContextController(config string, logger *log.Logger, version ...fake.ClusterVersion) *BindingContextController { + log.SetDefaultLevel(log.LevelError) k8sVersion := fake.ClusterVersionV119 if len(version) > 0 { @@ -57,13 +59,14 @@ func NewBindingContextController(config string, version ...fake.ClusterVersion) HookMap: make(map[string]string), HookConfig: config, fakeCluster: fc, + logger: logger, } - b.KubeEventsManager = kubeeventsmanager.NewKubeEventsManager(ctx, b.fakeCluster.Client) + b.KubeEventsManager = kubeeventsmanager.NewKubeEventsManager(ctx, b.fakeCluster.Client, b.logger.Named("kube-events-manager")) // Re-create factory to drop informers created using different b.fakeCluster.Client. kubeeventsmanager.DefaultFactoryStore = kubeeventsmanager.NewFactoryStore() - b.ScheduleManager = schedulemanager.NewScheduleManager(ctx) + b.ScheduleManager = schedulemanager.NewScheduleManager(ctx, b.logger.Named("schedule-manager")) b.Controller = NewStateController(fc, b.KubeEventsManager) @@ -99,7 +102,7 @@ func (b *BindingContextController) Run(initialState string) (GeneratedBindingCon } if b.Hook == nil { - testHook := hook.NewHook("test", "test") + testHook := hook.NewHook("test", "test", b.logger.Named("hook")) testHook, err = testHook.LoadConfig([]byte(b.HookConfig)) if err != nil { return GeneratedBindingContexts{}, fmt.Errorf("couldn't load or validate hook configuration: %v", err) @@ -108,7 +111,7 @@ func (b *BindingContextController) Run(initialState string) (GeneratedBindingCon } b.HookCtrl = controller.NewHookController() - b.HookCtrl.InitKubernetesBindings(b.Hook.GetConfig().OnKubernetesEvents, b.KubeEventsManager) + b.HookCtrl.InitKubernetesBindings(b.Hook.GetConfig().OnKubernetesEvents, b.KubeEventsManager, b.logger.Named("kubernetes-bindings")) b.HookCtrl.InitScheduleBindings(b.Hook.GetConfig().Schedules, b.ScheduleManager) b.HookCtrl.EnableScheduleBindings() diff --git a/test/hook/context/generator_test.go b/test/hook/context/generator_test.go index d384666c..09028938 100644 --- a/test/hook/context/generator_test.go +++ b/test/hook/context/generator_test.go @@ -4,6 +4,7 @@ import ( "encoding/json" "testing" + "github.com/deckhouse/deckhouse/pkg/log" . "github.com/onsi/gomega" . "github.com/flant/shell-operator/pkg/hook/binding_context" @@ -31,7 +32,7 @@ schedule: crontab: "* * * * *" includeSnapshotsFrom: - selected_pods -`) +`, log.NewNop()) defer c.Stop() // Synchronization contexts @@ -144,7 +145,7 @@ kubernetes: - selected_crds kind: MyResource name: selected_crds -`) +`, log.NewNop()) c.RegisterCRD("my.crd.io", "v1alpha1", "MyResource", true) defer c.Stop() @@ -190,7 +191,7 @@ kubernetes: - deployment kind: Deployment name: deployment -`) +`, log.NewNop()) contexts, err := c.Run(` --- apiVersion: apps/v1 @@ -241,7 +242,7 @@ kubernetes: kind: Pod name: pods-grouped group: group1 -`) +`, log.NewNop()) defer c.Stop() contexts, err := c.Run(` @@ -286,7 +287,7 @@ kubernetes: group: main kind: Secret name: secret -`) +`, log.NewNop()) defer c.Stop() contexts, err := c.Run(` @@ -328,7 +329,7 @@ kubernetes: kind: Pod name: selected_pods_nosync executeHookOnSynchronization: false -`) +`, log.NewNop()) defer c.Stop() // Synchronization contexts @@ -371,7 +372,7 @@ schedule: crontab: "* * * * *" includeSnapshotsFrom: - selected_pods -`) +`, log.NewNop()) defer c.Stop() // Synchronization contexts diff --git a/test/integration/kube_event_manager/kube_event_manager_test.go b/test/integration/kube_event_manager/kube_event_manager_test.go index 532ddd54..51d1ecdd 100644 --- a/test/integration/kube_event_manager/kube_event_manager_test.go +++ b/test/integration/kube_event_manager/kube_event_manager_test.go @@ -11,6 +11,7 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + "github.com/deckhouse/deckhouse/pkg/log" "github.com/flant/shell-operator/pkg/app" "github.com/flant/shell-operator/pkg/kube_events_manager" . "github.com/flant/shell-operator/pkg/kube_events_manager/types" @@ -26,7 +27,7 @@ var _ = Describe("Binding 'kubernetes' with kind 'Pod' should emit KubeEvent obj var KubeEventsManager kube_events_manager.KubeEventsManager BeforeEach(func() { - KubeEventsManager = kube_events_manager.NewKubeEventsManager(context.Background(), KubeClient) + KubeEventsManager = kube_events_manager.NewKubeEventsManager(context.Background(), KubeClient, log.NewNop()) }) Context("with configVersion: v1", func() { @@ -67,7 +68,7 @@ var _ = Describe("Binding 'kubernetes' with kind 'Pod' should emit KubeEvent obj When("Pod is Added", func() { JustBeforeEach(func() { - app.SetupLogging(nil) + app.SetupLogging(nil, log.NewNop()) // Unlock KubeEvent emitting. m := KubeEventsManager.GetMonitor(monitorConfig.Metadata.MonitorId) diff --git a/test/integration/suite/run.go b/test/integration/suite/run.go index 316eec68..1144d209 100644 --- a/test/integration/suite/run.go +++ b/test/integration/suite/run.go @@ -10,6 +10,7 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + "github.com/deckhouse/deckhouse/pkg/log" klient "github.com/flant/kube-client/client" "github.com/flant/shell-operator/pkg/kube/object_patch" ) @@ -36,5 +37,5 @@ var _ = BeforeSuite(func() { err := KubeClient.Init() Expect(err).ShouldNot(HaveOccurred()) - ObjectPatcher = object_patch.NewObjectPatcher(KubeClient) + ObjectPatcher = object_patch.NewObjectPatcher(KubeClient, log.NewNop()) }) diff --git a/test/utils/directory.go b/test/utils/directory.go index 47ba6cb3..b3eed52f 100644 --- a/test/utils/directory.go +++ b/test/utils/directory.go @@ -9,6 +9,8 @@ import ( utils_file "github.com/flant/shell-operator/pkg/utils/file" ) +// TODO: remove useless code + // ChooseExistedDirectoryPath returns first non-empty existed directory path from arguments. // // Each argument is prefixed with current directory if is not started with / diff --git a/test/utils/jqmatcher.go b/test/utils/jqmatcher.go index 5d6ab415..6e9f64ee 100644 --- a/test/utils/jqmatcher.go +++ b/test/utils/jqmatcher.go @@ -1,66 +1,68 @@ //go:build test // +build test +// TODO: remove useless code + package utils -import ( - "encoding/json" - "fmt" +// import ( +// "encoding/json" +// "fmt" - "github.com/onsi/gomega/matchers" - "github.com/onsi/gomega/types" +// "github.com/onsi/gomega/matchers" +// "github.com/onsi/gomega/types" - . "github.com/flant/libjq-go" -) +// . "github.com/flant/libjq-go" +// ) -// var _ = Ω(verBcs).To(MatchJq(`.[0] | has("objects")`, Equal(true))) -func MatchJq(jqExpression string, matcher interface{}) types.GomegaMatcher { - return &matchJq{ - JqExpr: jqExpression, - Matcher: matcher, - } -} +// // var _ = Ω(verBcs).To(MatchJq(`.[0] | has("objects")`, Equal(true))) +// func MatchJq(jqExpression string, matcher interface{}) types.GomegaMatcher { +// return &matchJq{ +// JqExpr: jqExpression, +// Matcher: matcher, +// } +// } -type matchJq struct { - JqExpr string - InputString string - Matcher interface{} - ResMatcher types.GomegaMatcher -} +// type matchJq struct { +// JqExpr string +// InputString string +// Matcher interface{} +// ResMatcher types.GomegaMatcher +// } -func (matcher *matchJq) Match(actual interface{}) (success bool, err error) { - switch v := actual.(type) { - case string: - matcher.InputString = v - case []byte: - matcher.InputString = string(v) - default: - inputBytes, err := json.Marshal(v) - if err != nil { - return false, fmt.Errorf("MatchJq marshal object to json: %s\n", err) - } - matcher.InputString = string(inputBytes) - } +// func (matcher *matchJq) Match(actual interface{}) (success bool, err error) { +// switch v := actual.(type) { +// case string: +// matcher.InputString = v +// case []byte: +// matcher.InputString = string(v) +// default: +// inputBytes, err := json.Marshal(v) +// if err != nil { +// return false, fmt.Errorf("MatchJq marshal object to json: %s\n", err) +// } +// matcher.InputString = string(inputBytes) +// } - //nolint:typecheck // Ignore false positive: undeclared name: `Jq`. - res, err := Jq().Program(matcher.JqExpr).Run(matcher.InputString) - if err != nil { - return false, fmt.Errorf("MatchJq apply jq expression: %s\n", err) - } +// //nolint:typecheck // Ignore false positive: undeclared name: `Jq`. +// res, err := Jq().Program(matcher.JqExpr).Run(matcher.InputString) +// if err != nil { +// return false, fmt.Errorf("MatchJq apply jq expression: %s\n", err) +// } - var isMatcher bool - matcher.ResMatcher, isMatcher = matcher.Matcher.(types.GomegaMatcher) - if !isMatcher { - matcher.ResMatcher = &matchers.EqualMatcher{Expected: res} - } +// var isMatcher bool +// matcher.ResMatcher, isMatcher = matcher.Matcher.(types.GomegaMatcher) +// if !isMatcher { +// matcher.ResMatcher = &matchers.EqualMatcher{Expected: res} +// } - return matcher.ResMatcher.Match(res) -} +// return matcher.ResMatcher.Match(res) +// } -func (matcher *matchJq) FailureMessage(actual interface{}) (message string) { - return fmt.Sprintf("Jq expression `%s` applied to '%s' not match: %v", matcher.JqExpr, matcher.InputString, matcher.ResMatcher.FailureMessage(actual)) -} +// func (matcher *matchJq) FailureMessage(actual interface{}) (message string) { +// return fmt.Sprintf("Jq expression `%s` applied to '%s' not match: %v", matcher.JqExpr, matcher.InputString, matcher.ResMatcher.FailureMessage(actual)) +// } -func (matcher *matchJq) NegatedFailureMessage(actual interface{}) (message string) { - return fmt.Sprintf("Jq expression `%s` applied to '%s' should not match: %s", matcher.JqExpr, matcher.InputString, matcher.ResMatcher.NegatedFailureMessage(actual)) -} +// func (matcher *matchJq) NegatedFailureMessage(actual interface{}) (message string) { +// return fmt.Sprintf("Jq expression `%s` applied to '%s' should not match: %s", matcher.JqExpr, matcher.InputString, matcher.ResMatcher.NegatedFailureMessage(actual)) +// } diff --git a/test/utils/jsonloganalyzer.go b/test/utils/jsonloganalyzer.go index d24a7222..a95a1ae1 100644 --- a/test/utils/jsonloganalyzer.go +++ b/test/utils/jsonloganalyzer.go @@ -28,12 +28,12 @@ func NewJsonLogAnalyzer() *JsonLogAnalyzer { } } -func (r *JsonLogAnalyzer) AddGroup(analyzers ...*JsonLogMatcher) { - r.Matchers = append(r.Matchers, analyzers) +func (a *JsonLogAnalyzer) AddGroup(analyzers ...*JsonLogMatcher) { + a.Matchers = append(a.Matchers, analyzers) } -func (r *JsonLogAnalyzer) OnStop(fn func()) { - r.OnStopFn = fn +func (a *JsonLogAnalyzer) OnStop(fn func()) { + a.OnStopFn = fn } func (a *JsonLogAnalyzer) Finished() bool { @@ -57,10 +57,10 @@ func (a *JsonLogAnalyzer) Reset() { } func (a *JsonLogAnalyzer) HandleLine(line string) { - //fmt.Printf("Got line: %s\n", line) - //defer func() { + // fmt.Printf("Got line: %s\n", line) + // defer func() { // fmt.Printf("analyzer.HandleLine done: ind=%d fin=%v err=%v\n", a.Index, a.finished, a.err) - //}() + // }() if a.finished { return } @@ -156,14 +156,14 @@ func NewJsonLogMatcher(steps ...*MatcherStep) *JsonLogMatcher { } } -func (m *JsonLogMatcher) HandleRecord(r JsonLogRecord) { +func (m *JsonLogMatcher) HandleRecord(_ JsonLogRecord) { } func (m *JsonLogMatcher) HandleLine(line string) { - //fmt.Printf("Matcher Got line: %s\n", line) - //defer func() { + // fmt.Printf("Matcher Got line: %s\n", line) + // defer func() { // fmt.Printf("matcher.HandleLine done: ind=%d fin=%v err=%v\n", m.Index, m.finished, m.err) - //}() + // }() defer func() { e := recover() if e != nil { @@ -259,7 +259,7 @@ func (matcher *FinishAllMatchersSuccessfullyMatcher) Match(actual interface{}) ( return matcher.finished && matcher.err == nil, nil } -func (matcher *FinishAllMatchersSuccessfullyMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *FinishAllMatchersSuccessfullyMatcher) FailureMessage(_ interface{}) (message string) { msgs := []string{} if !matcher.finished { msgs = append(msgs, "is finished") @@ -270,7 +270,7 @@ func (matcher *FinishAllMatchersSuccessfullyMatcher) FailureMessage(actual inter return fmt.Sprintf("Expected JsonLogAnalyzer %s", strings.Join(msgs, " and ")) } -func (matcher *FinishAllMatchersSuccessfullyMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *FinishAllMatchersSuccessfullyMatcher) NegatedFailureMessage(_ interface{}) (message string) { msgs := []string{} if matcher.finished { msgs = append(msgs, "is not finished") diff --git a/test/utils/kubectl.go b/test/utils/kubectl.go index c63bb7a5..fa817b82 100644 --- a/test/utils/kubectl.go +++ b/test/utils/kubectl.go @@ -1,6 +1,8 @@ //go:build test // +build test +// TODO: remove useless code + package utils import ( diff --git a/test/utils/prometheus.go b/test/utils/prometheus.go index 926385d1..0294ffbc 100644 --- a/test/utils/prometheus.go +++ b/test/utils/prometheus.go @@ -127,10 +127,10 @@ func (p *PromScraper) Scrape() error { return nil } -func (p *PromScraper) HasMetric(name string) { +func (p *PromScraper) HasMetric(_ string) { } -func (p *PromScraper) MetricEquals(name string) { +func (p *PromScraper) MetricEquals(_ string) { } // FindExact searches metric by name and labels @@ -199,11 +199,11 @@ func (matcher *HaveMetricMatcher) Match(actual interface{}) (success bool, err e return !promScraper.FindExact(matcher.pmv).IsEmpty(), nil } -func (matcher *HaveMetricMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *HaveMetricMatcher) FailureMessage(_ interface{}) (message string) { return "Expected ShellOperator has metric" } -func (matcher *HaveMetricMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *HaveMetricMatcher) NegatedFailureMessage(_ interface{}) (message string) { return "Expected ShellOperator has no metric" } diff --git a/test/utils/shell-operator.go b/test/utils/shell-operator.go index 3cc2e977..cf1690bc 100644 --- a/test/utils/shell-operator.go +++ b/test/utils/shell-operator.go @@ -1,6 +1,8 @@ //go:build test // +build test +// TODO: remove useless code + package utils import ( @@ -107,10 +109,10 @@ func (matcher *BeShellOperatorStoppedMatcher) Match(actual interface{}) (success return matcher.err != nil && strings.Contains(matcher.err.Error(), "command is stopped"), nil } -func (matcher *BeShellOperatorStoppedMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *BeShellOperatorStoppedMatcher) FailureMessage(_ interface{}) (message string) { return fmt.Sprintf("Expected ShellOperator stopped. Got error: %v", matcher.err) } -func (matcher *BeShellOperatorStoppedMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *BeShellOperatorStoppedMatcher) NegatedFailureMessage(_ interface{}) (message string) { return "Expected ShellOperator stopped with error" } diff --git a/test/utils/streamedexec.go b/test/utils/streamedexec.go index f610aa82..3b073d7e 100644 --- a/test/utils/streamedexec.go +++ b/test/utils/streamedexec.go @@ -1,10 +1,13 @@ //go:build test // +build test +// TODO: remove useless code + package utils import ( "bufio" + "errors" "fmt" "io" "os" @@ -90,7 +93,7 @@ func StreamedExecCommand(cmd *exec.Cmd, opts CommandOptions) error { if exitCode := session.ExitCode(); exitCode != 0 { cmdErr := fmt.Errorf("command failed, exit code %d", exitCode) if stopMsg != "" { - cmdErr = fmt.Errorf(stopMsg) + cmdErr = errors.New(stopMsg) } return &CommandError{ CommandError: cmdErr, @@ -100,7 +103,7 @@ func StreamedExecCommand(cmd *exec.Cmd, opts CommandOptions) error { return nil } -func consumeLines(r io.Reader, session *gexec.Session, opts CommandOptions) { +func consumeLines(r io.Reader, _ *gexec.Session, opts CommandOptions) { scanner := bufio.NewScanner(r) for scanner.Scan() { opts.OutputLineHandler(scanner.Text())