diff --git a/cmd/shell-operator/main.go b/cmd/shell-operator/main.go index 16f8a727..2c0e7ae5 100644 --- a/cmd/shell-operator/main.go +++ b/cmd/shell-operator/main.go @@ -10,7 +10,7 @@ import ( "github.com/flant/kube-client/klogtolog" "github.com/flant/shell-operator/pkg/app" "github.com/flant/shell-operator/pkg/debug" - "github.com/flant/shell-operator/pkg/jq" + "github.com/flant/shell-operator/pkg/filter/jq" shell_operator "github.com/flant/shell-operator/pkg/shell-operator" utils_signal "github.com/flant/shell-operator/pkg/utils/signal" ) @@ -33,7 +33,8 @@ func main() { // print version kpApp.Command("version", "Show version.").Action(func(_ *kingpin.ParseContext) error { fmt.Printf("%s %s\n", app.AppName, app.Version) - fmt.Println(jq.FilterInfo()) + fl := jq.NewFilter(app.JqLibraryPath) + fmt.Println(fl.FilterInfo()) return nil }) diff --git a/pkg/app/debug.go b/pkg/app/debug.go index 5d9d93fc..19860a08 100644 --- a/pkg/app/debug.go +++ b/pkg/app/debug.go @@ -10,7 +10,10 @@ var DebugUnixSocket = "/var/run/shell-operator/debug.socket" var DebugHttpServerAddr = "" -var DebugKeepTmpFiles = "no" +var ( + DebugKeepTmpFilesVar = "no" + DebugKeepTmpFiles = false +) var DebugKubernetesAPI = false @@ -27,8 +30,12 @@ func DefineDebugFlags(kpApp *kingpin.Application, cmd *kingpin.CmdClause) { cmd.Flag("debug-keep-tmp-files", "set to yes to disable cleanup of temporary files"). Envar("DEBUG_KEEP_TMP_FILES"). Hidden(). - Default(DebugKeepTmpFiles). - StringVar(&DebugKeepTmpFiles) + Default(DebugKeepTmpFilesVar).Action(func(_ *kingpin.ParseContext) error { + DebugKeepTmpFiles = DebugKeepTmpFilesVar == "yes" + + return nil + }). + StringVar(&DebugKeepTmpFilesVar) cmd.Flag("debug-kubernetes-api", "enable client-go debug messages"). Envar("DEBUG_KUBERNETES_API"). diff --git a/pkg/executor/executor.go b/pkg/executor/executor.go index 68cda40e..277948de 100644 --- a/pkg/executor/executor.go +++ b/pkg/executor/executor.go @@ -15,16 +15,9 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" - "github.com/flant/shell-operator/pkg/app" utils "github.com/flant/shell-operator/pkg/utils/labels" ) -type CmdUsage struct { - Sys time.Duration - User time.Duration - MaxRss int64 -} - func Run(cmd *exec.Cmd) error { // TODO context: hook name, hook phase, hook binding // TODO observability @@ -33,21 +26,86 @@ func Run(cmd *exec.Cmd) error { return cmd.Run() } -func RunAndLogLines(cmd *exec.Cmd, logLabels map[string]string, logger *log.Logger) (*CmdUsage, error) { - // TODO observability +type Executor struct { + cmd *exec.Cmd + logProxyHookJSON bool + proxyJsonKey string + logger *log.Logger +} + +func (e *Executor) WithLogProxyHookJSON(logProxyHookJSON bool) *Executor { + e.logProxyHookJSON = logProxyHookJSON + + return e +} + +func (e *Executor) WithLogProxyHookJSONKey(logProxyHookJSONKey string) *Executor { + if logProxyHookJSONKey == "" { + return e + } + + e.proxyJsonKey = logProxyHookJSONKey + + return e +} + +func (e *Executor) WithLogger(logger *log.Logger) *Executor { + e.logger = logger + + return e +} + +func (e *Executor) WithCMDStdout(w io.Writer) *Executor { + e.cmd.Stdout = w + + return e +} + +func (e *Executor) WithCMDStderr(w io.Writer) *Executor { + e.cmd.Stderr = w + + return e +} + +func NewExecutor(dir string, entrypoint string, args []string, envs []string) *Executor { + cmd := exec.Command(entrypoint, args...) + cmd.Env = append(cmd.Env, envs...) + cmd.Dir = dir + + ex := &Executor{ + cmd: cmd, + proxyJsonKey: "proxyJsonLog", + logger: log.NewLogger(log.Options{}).Named("auto-executor"), + } + + return ex +} + +func (e *Executor) Output() ([]byte, error) { + e.logger.Debugf("Executing command '%s' in '%s' dir", strings.Join(e.cmd.Args, " "), e.cmd.Dir) + return e.cmd.Output() +} + +type CmdUsage struct { + Sys time.Duration + User time.Duration + MaxRss int64 +} + +func (e *Executor) RunAndLogLines(logLabels map[string]string) (*CmdUsage, error) { stdErr := bytes.NewBuffer(nil) - logEntry := utils.EnrichLoggerWithLabels(logger, logLabels) + logEntry := utils.EnrichLoggerWithLabels(e.logger, logLabels) stdoutLogEntry := logEntry.With("output", "stdout") stderrLogEntry := logEntry.With("output", "stderr") - logEntry.Debugf("Executing command '%s' in '%s' dir", strings.Join(cmd.Args, " "), cmd.Dir) + logEntry.Debugf("Executing command '%s' in '%s' dir", strings.Join(e.cmd.Args, " "), e.cmd.Dir) - plo := &proxyLogger{app.LogProxyHookJSON, stdoutLogEntry, make([]byte, 0)} - ple := &proxyLogger{app.LogProxyHookJSON, stderrLogEntry, make([]byte, 0)} - cmd.Stdout = plo - cmd.Stderr = io.MultiWriter(ple, stdErr) + plo := &proxyLogger{e.logProxyHookJSON, e.proxyJsonKey, stdoutLogEntry, make([]byte, 0)} + ple := &proxyLogger{e.logProxyHookJSON, e.proxyJsonKey, stderrLogEntry, make([]byte, 0)} + e.cmd.Stdout = plo + e.cmd.Stderr = io.MultiWriter(ple, stdErr) - err := cmd.Run() + err := e.cmd.Run() if err != nil { if len(stdErr.Bytes()) > 0 { return nil, fmt.Errorf("%s", stdErr.String()) @@ -57,14 +115,14 @@ func RunAndLogLines(cmd *exec.Cmd, logLabels map[string]string, logger *log.Logg } var usage *CmdUsage - if cmd.ProcessState != nil { + if e.cmd.ProcessState != nil { usage = &CmdUsage{ - Sys: cmd.ProcessState.SystemTime(), - User: cmd.ProcessState.UserTime(), + Sys: e.cmd.ProcessState.SystemTime(), + User: e.cmd.ProcessState.UserTime(), } // FIXME Maxrss is Unix specific. - sysUsage := cmd.ProcessState.SysUsage() + sysUsage := e.cmd.ProcessState.SysUsage() if v, ok := sysUsage.(*syscall.Rusage); ok { // v.Maxrss is int32 on arm/v7 usage.MaxRss = int64(v.Maxrss) //nolint:unconvert @@ -76,6 +134,7 @@ func RunAndLogLines(cmd *exec.Cmd, logLabels map[string]string, logger *log.Logg type proxyLogger struct { logProxyHookJSON bool + proxyJsonLogKey string logger *log.Logger @@ -116,7 +175,7 @@ func (pl *proxyLogger) Write(p []byte) (int, error) { return len(p), err } - logger := pl.logger.With(app.ProxyJsonLogKey, true) + logger := pl.logger.With(pl.proxyJsonLogKey, true) logLineRaw, _ := json.Marshal(logMap) logLine := string(logLineRaw) @@ -176,18 +235,3 @@ func (pl *proxyLogger) writerScanner(p []byte) { pl.logger.Error("reading from scanner", slog.String("error", err.Error())) } } - -func Output(cmd *exec.Cmd) (output []byte, err error) { - // TODO context: hook name, hook phase, hook binding - // TODO observability - log.Debugf("Executing command '%s' in '%s' dir", strings.Join(cmd.Args, " "), cmd.Dir) - output, err = cmd.Output() - return -} - -func MakeCommand(dir string, entrypoint string, args []string, envs []string) *exec.Cmd { - cmd := exec.Command(entrypoint, args...) - cmd.Env = append(cmd.Env, envs...) - cmd.Dir = dir - return cmd -} diff --git a/pkg/executor/executor_test.go b/pkg/executor/executor_test.go index 0a9d72d8..16c43c6e 100644 --- a/pkg/executor/executor_test.go +++ b/pkg/executor/executor_test.go @@ -5,7 +5,6 @@ import ( "io" "math/rand/v2" "os" - "os/exec" "regexp" "testing" "time" @@ -13,8 +12,6 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - "github.com/flant/shell-operator/pkg/app" ) func TestRunAndLogLines(t *testing.T) { @@ -35,11 +32,11 @@ func TestRunAndLogLines(t *testing.T) { logger.SetOutput(&buf) t.Run("simple log", func(t *testing.T) { - app.LogProxyHookJSON = true - - cmd := exec.Command("echo", `{"foo": "baz"}`) + ex := NewExecutor("", "echo", []string{`{"foo": "baz"}`}, []string{}). + WithLogProxyHookJSON(true). + WithLogger(logger) - _, err := RunAndLogLines(cmd, map[string]string{"a": "b"}, logger) + _, err := ex.RunAndLogLines(map[string]string{"a": "b"}) assert.NoError(t, err) assert.Equal(t, buf.String(), `{"level":"fatal","msg":"hook result","a":"b","hook":{"foo":"baz"},"output":"stdout","proxyJsonLog":true,"time":"2006-01-02T15:04:05Z"}`+"\n") @@ -48,10 +45,10 @@ func TestRunAndLogLines(t *testing.T) { }) t.Run("not json log", func(t *testing.T) { - app.LogProxyHookJSON = false - cmd := exec.Command("echo", `foobar`) + ex := NewExecutor("", "echo", []string{"foobar"}, []string{}). + WithLogger(logger) - _, err := RunAndLogLines(cmd, map[string]string{"a": "b"}, logger) + _, err := ex.RunAndLogLines(map[string]string{"a": "b"}) assert.NoError(t, err) assert.Equal(t, buf.String(), `{"level":"info","msg":"foobar","a":"b","output":"stdout","time":"2006-01-02T15:04:05Z"}`+"\n") @@ -67,10 +64,11 @@ func TestRunAndLogLines(t *testing.T) { _, _ = io.WriteString(f, `{"foo": "`+randStringRunes(1024*1024)+`"}`) - app.LogProxyHookJSON = true - cmd := exec.Command("cat", f.Name()) + ex := NewExecutor("", "cat", []string{f.Name()}, []string{}). + WithLogProxyHookJSON(true). + WithLogger(logger) - _, err = RunAndLogLines(cmd, map[string]string{"a": "b"}, logger) + _, err = ex.RunAndLogLines(map[string]string{"a": "b"}) assert.NoError(t, err) reg := regexp.MustCompile(`{"level":"fatal","msg":"hook result","a":"b","hook":{"truncated":".*:truncated"},"output":"stdout","proxyJsonLog":true,"time":"2006-01-02T15:04:05Z"`) @@ -87,10 +85,10 @@ func TestRunAndLogLines(t *testing.T) { _, _ = io.WriteString(f, `result `+randStringRunes(1024*1024)) - app.LogProxyHookJSON = false - cmd := exec.Command("cat", f.Name()) + ex := NewExecutor("", "cat", []string{f.Name()}, []string{}). + WithLogger(logger) - _, err = RunAndLogLines(cmd, map[string]string{"a": "b"}, logger) + _, err = ex.RunAndLogLines(map[string]string{"a": "b"}) assert.NoError(t, err) reg := regexp.MustCompile(`{"level":"info","msg":"result .*:truncated","a":"b","output":"stdout","time":"2006-01-02T15:04:05Z"`) @@ -101,25 +99,32 @@ func TestRunAndLogLines(t *testing.T) { t.Run("invalid json structure", func(t *testing.T) { logger.SetLevel(log.LevelDebug) - app.LogProxyHookJSON = true - cmd := exec.Command("echo", `["a","b","c"]`) - _, err := RunAndLogLines(cmd, map[string]string{"a": "b"}, logger) + + ex := NewExecutor("", "echo", []string{`["a","b","c"]`}, []string{}). + WithLogProxyHookJSON(true). + WithLogger(logger) + + _, err := ex.RunAndLogLines(map[string]string{"a": "b"}) assert.NoError(t, err) - assert.Equal(t, buf.String(), `{"level":"debug","msg":"Executing command 'echo [\"a\",\"b\",\"c\"]' in '' dir","source":"executor/executor.go:43","a":"b","time":"2006-01-02T15:04:05Z"}`+"\n"+ - `{"level":"debug","msg":"json log line not map[string]interface{}","source":"executor/executor.go:111","a":"b","line":["a","b","c"],"output":"stdout","time":"2006-01-02T15:04:05Z"}`+"\n"+ - `{"level":"info","msg":"[\"a\",\"b\",\"c\"]\n","source":"executor/executor.go:114","a":"b","output":"stdout","time":"2006-01-02T15:04:05Z"}`+"\n") + + assert.Equal(t, buf.String(), `{"level":"debug","msg":"Executing command 'echo [\"a\",\"b\",\"c\"]' in '' dir","source":"executor/executor.go:101","a":"b","time":"2006-01-02T15:04:05Z"}`+"\n"+ + `{"level":"debug","msg":"json log line not map[string]interface{}","source":"executor/executor.go:170","a":"b","line":["a","b","c"],"output":"stdout","time":"2006-01-02T15:04:05Z"}`+"\n"+ + `{"level":"info","msg":"[\"a\",\"b\",\"c\"]\n","source":"executor/executor.go:173","a":"b","output":"stdout","time":"2006-01-02T15:04:05Z"}`+"\n") buf.Reset() }) t.Run("multiline", func(t *testing.T) { logger.SetLevel(log.LevelInfo) - app.LogProxyHookJSON = true - cmd := exec.Command("echo", ` + arg := ` {"a":"b", "c":"d"} -`) - _, err := RunAndLogLines(cmd, map[string]string{"foor": "baar"}, logger) +` + ex := NewExecutor("", "echo", []string{arg}, []string{}). + WithLogProxyHookJSON(true). + WithLogger(logger) + + _, err := ex.RunAndLogLines(map[string]string{"foor": "baar"}) assert.NoError(t, err) assert.Equal(t, buf.String(), `{"level":"fatal","msg":"hook result","foor":"baar","hook":{"a":"b","c":"d"},"output":"stdout","proxyJsonLog":true,"time":"2006-01-02T15:04:05Z"}`+"\n") @@ -127,12 +132,14 @@ func TestRunAndLogLines(t *testing.T) { }) t.Run("multiline non json", func(t *testing.T) { - app.LogProxyHookJSON = false - cmd := exec.Command("echo", ` + arg := ` a b c d -`) - _, err := RunAndLogLines(cmd, map[string]string{"foor": "baar"}, logger) +` + ex := NewExecutor("", "echo", []string{arg}, []string{}). + WithLogger(logger) + + _, err := ex.RunAndLogLines(map[string]string{"foor": "baar"}) assert.NoError(t, err) assert.Equal(t, buf.String(), `{"level":"info","msg":"a b","foor":"baar","output":"stdout","time":"2006-01-02T15:04:05Z"}`+"\n"+ `{"level":"info","msg":"c d","foor":"baar","output":"stdout","time":"2006-01-02T15:04:05Z"}`+"\n") @@ -141,12 +148,15 @@ c d }) t.Run("multiline json", func(t *testing.T) { - app.LogProxyHookJSON = true - cmd := exec.Command("echo", `{ + arg := `{ "a":"b", "c":"d" -}`) - _, err := RunAndLogLines(cmd, map[string]string{"foor": "baar"}, logger) +}` + ex := NewExecutor("", "echo", []string{arg}, []string{}). + WithLogProxyHookJSON(true). + WithLogger(logger) + + _, err := ex.RunAndLogLines(map[string]string{"foor": "baar"}) assert.NoError(t, err) assert.Equal(t, buf.String(), `{"level":"fatal","msg":"hook result","foor":"baar","hook":{"a":"b","c":"d"},"output":"stdout","proxyJsonLog":true,"time":"2006-01-02T15:04:05Z"}`+"\n") diff --git a/pkg/filter/filter.go b/pkg/filter/filter.go new file mode 100644 index 00000000..c24d29d6 --- /dev/null +++ b/pkg/filter/filter.go @@ -0,0 +1,6 @@ +package filter + +type Filter interface { + ApplyFilter(filterStr string, data []byte) (string, error) + FilterInfo() string +} diff --git a/pkg/filter/jq/apply_jq_exec.go b/pkg/filter/jq/apply_jq_exec.go new file mode 100644 index 00000000..6543cf31 --- /dev/null +++ b/pkg/filter/jq/apply_jq_exec.go @@ -0,0 +1,29 @@ +//go:build !cgo || (cgo && !use_libjq) +// +build !cgo cgo,!use_libjq + +package jq + +import "github.com/flant/shell-operator/pkg/filter" + +var _ filter.Filter = (*Filter)(nil) + +func NewFilter(libpath string) *Filter { + return &Filter{ + Libpath: libpath, + } +} + +type Filter struct { + Libpath string +} + +// ApplyJqFilter runs jq expression provided in jqFilter with jsonData as input. +// +// It uses jq as a subprocess. +func (f *Filter) ApplyFilter(jqFilter string, jsonData []byte) (string, error) { + return jqExec(jqFilter, jsonData, f.Libpath) +} + +func (f *Filter) FilterInfo() string { + return "jqFilter implementation: use jq binary from $PATH" +} diff --git a/pkg/jq/apply_libjq-go.go b/pkg/filter/jq/apply_libjq_go.go similarity index 57% rename from pkg/jq/apply_libjq-go.go rename to pkg/filter/jq/apply_libjq_go.go index 633d103d..e4b3a7d6 100644 --- a/pkg/jq/apply_libjq-go.go +++ b/pkg/filter/jq/apply_libjq_go.go @@ -7,28 +7,41 @@ import ( "fmt" "os" - . "github.com/flant/libjq-go" + libjq "github.com/flant/libjq-go" + "github.com/flant/shell-operator/pkg/filter" ) +var _ filter.Filter = (*Filter)(nil) + +func NewFilter(libpath string) *Filter { + return &Filter{ + Libpath: libpath, + } +} + +type Filter struct { + Libpath string +} + // Note: add build tag 'use_libjg' to build with libjq-go. // ApplyJqFilter runs jq expression provided in jqFilter with jsonData as input. // // It uses libjq-go or executes jq as a binary if $JQ_EXEC is set to "yes". -func ApplyJqFilter(jqFilter string, jsonData []byte, libPath string) (string, error) { +func (f *Filter) ApplyFilter(jqFilter string, jsonData []byte) (string, error) { // Use jq exec filtering if environment variable is present. if os.Getenv("JQ_EXEC") == "yes" { - return jqExec(jqFilter, jsonData, libPath) + return jqExec(jqFilter, jsonData, f.Libpath) } - result, err := Jq().WithLibPath(libPath).Program(jqFilter).Cached().Run(string(jsonData)) + result, err := libjq.Jq().WithLibPath(f.Libpath).Program(jqFilter).Cached().Run(string(jsonData)) if err != nil { return "", fmt.Errorf("libjq filter '%s': '%s'", jqFilter, err) } return result, nil } -func FilterInfo() string { +func (f *Filter) FilterInfo() string { if os.Getenv("JQ_EXEC") == "yes" { return "jqFilter implementation: use jq binary from $PATH (JQ_EXEC=yes is set)" } diff --git a/pkg/jq/jq_exec.go b/pkg/filter/jq/jq_exec.go similarity index 100% rename from pkg/jq/jq_exec.go rename to pkg/filter/jq/jq_exec.go diff --git a/pkg/hook/binding_context/binding_context.go b/pkg/hook/binding_context/binding_context.go index 126b5b99..968bbecf 100644 --- a/pkg/hook/binding_context/binding_context.go +++ b/pkg/hook/binding_context/binding_context.go @@ -1,4 +1,4 @@ -package binding_context +package bindingcontext import ( "encoding/json" @@ -7,15 +7,15 @@ import ( v1 "k8s.io/api/admission/v1" apixv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - . "github.com/flant/shell-operator/pkg/hook/types" - . "github.com/flant/shell-operator/pkg/kube_events_manager/types" + htypes "github.com/flant/shell-operator/pkg/hook/types" + kemtypes "github.com/flant/shell-operator/pkg/kube_events_manager/types" ) // BindingContext contains information about event for hook type BindingContext struct { Metadata struct { Version string - BindingType BindingType + BindingType htypes.BindingType JqFilter string IncludeSnapshots []string IncludeAllSnapshots bool @@ -25,10 +25,10 @@ type BindingContext struct { // name of a binding or a group or kubeEventType if binding has no 'name' field Binding string // additional fields for 'kubernetes' binding - Type KubeEventType - WatchEvent WatchEventType - Objects []ObjectAndFilterResult - Snapshots map[string][]ObjectAndFilterResult + Type kemtypes.KubeEventType + WatchEvent kemtypes.WatchEventType + Objects []kemtypes.ObjectAndFilterResult + Snapshots map[string][]kemtypes.ObjectAndFilterResult AdmissionReview *v1.AdmissionReview ConversionReview *apixv1.ConversionReview FromVersion string @@ -36,7 +36,7 @@ type BindingContext struct { } func (bc BindingContext) IsSynchronization() bool { - return bc.Metadata.BindingType == OnKubernetesEvent && bc.Type == TypeSynchronization + return bc.Metadata.BindingType == htypes.OnKubernetesEvent && bc.Type == kemtypes.TypeSynchronization } func (bc BindingContext) MarshalJSON() ([]byte, error) { @@ -59,7 +59,7 @@ func (bc BindingContext) MapV1() map[string]interface{} { res := make(map[string]interface{}) res["binding"] = bc.Binding - if bc.Metadata.BindingType == OnStartup { + if bc.Metadata.BindingType == htypes.OnStartup { return res } @@ -73,19 +73,19 @@ func (bc BindingContext) MapV1() map[string]interface{} { } // Handle admission and conversion before grouping. - if bc.Metadata.BindingType == KubernetesValidating { + if bc.Metadata.BindingType == htypes.KubernetesValidating { res["type"] = "Validating" res["review"] = bc.AdmissionReview return res } - if bc.Metadata.BindingType == KubernetesMutating { + if bc.Metadata.BindingType == htypes.KubernetesMutating { res["type"] = "Mutating" res["review"] = bc.AdmissionReview return res } - if bc.Metadata.BindingType == KubernetesConversion { + if bc.Metadata.BindingType == htypes.KubernetesConversion { res["type"] = "Conversion" res["fromVersion"] = bc.FromVersion res["toVersion"] = bc.ToVersion @@ -100,13 +100,13 @@ func (bc BindingContext) MapV1() map[string]interface{} { return res } - if bc.Metadata.BindingType == Schedule { + if bc.Metadata.BindingType == htypes.Schedule { res["type"] = "Schedule" return res } // A short way for addon-operator's hooks. - if bc.Metadata.BindingType != OnKubernetesEvent || bc.Type == "" { + if bc.Metadata.BindingType != htypes.OnKubernetesEvent || bc.Type == "" { return res } @@ -117,13 +117,13 @@ func (bc BindingContext) MapV1() map[string]interface{} { res["watchEvent"] = string(bc.WatchEvent) } switch bc.Type { - case TypeSynchronization: + case kemtypes.TypeSynchronization: if len(bc.Objects) == 0 { res["objects"] = make([]string, 0) } else { res["objects"] = bc.Objects } - case TypeEvent: + case kemtypes.TypeEvent: if len(bc.Objects) == 0 { res["object"] = nil if bc.Metadata.JqFilter != "" { @@ -145,17 +145,17 @@ func (bc BindingContext) MapV1() map[string]interface{} { func (bc BindingContext) MapV0() map[string]interface{} { res := make(map[string]interface{}) res["binding"] = bc.Binding - if bc.Metadata.BindingType != OnKubernetesEvent { + if bc.Metadata.BindingType != htypes.OnKubernetesEvent { return res } eventV0 := "" switch bc.WatchEvent { - case WatchEventAdded: + case kemtypes.WatchEventAdded: eventV0 = "add" - case WatchEventModified: + case kemtypes.WatchEventModified: eventV0 = "update" - case WatchEventDeleted: + case kemtypes.WatchEventDeleted: eventV0 = "delete" } diff --git a/pkg/hook/binding_context/binding_context_test.go b/pkg/hook/binding_context/binding_context_test.go index efba5588..e04b9e23 100644 --- a/pkg/hook/binding_context/binding_context_test.go +++ b/pkg/hook/binding_context/binding_context_test.go @@ -1,4 +1,4 @@ -package binding_context +package bindingcontext // TODO: need refactoring // change JQ tests for another testing tool diff --git a/pkg/hook/config/config.go b/pkg/hook/config/config.go index f42bac2d..a76c1710 100644 --- a/pkg/hook/config/config.go +++ b/pkg/hook/config/config.go @@ -5,10 +5,10 @@ import ( "sigs.k8s.io/yaml" - . "github.com/flant/shell-operator/pkg/hook/types" + htypes "github.com/flant/shell-operator/pkg/hook/types" ) -var validBindingTypes = []BindingType{OnStartup, Schedule, OnKubernetesEvent, KubernetesValidating, KubernetesMutating, KubernetesConversion} +var validBindingTypes = []htypes.BindingType{htypes.OnStartup, htypes.Schedule, htypes.OnKubernetesEvent, htypes.KubernetesValidating, htypes.KubernetesMutating, htypes.KubernetesConversion} // HookConfig is a structure with versioned hook configuration type HookConfig struct { @@ -20,13 +20,13 @@ type HookConfig struct { V1 *HookConfigV1 // effective config values - OnStartup *OnStartupConfig - Schedules []ScheduleConfig - OnKubernetesEvents []OnKubernetesEventConfig - KubernetesValidating []ValidatingConfig - KubernetesMutating []MutatingConfig - KubernetesConversion []ConversionConfig - Settings *Settings + OnStartup *htypes.OnStartupConfig + Schedules []htypes.ScheduleConfig + OnKubernetesEvents []htypes.OnKubernetesEventConfig + KubernetesValidating []htypes.ValidatingConfig + KubernetesMutating []htypes.MutatingConfig + KubernetesConversion []htypes.ConversionConfig + Settings *htypes.Settings } // LoadAndValidate loads config from bytes and validate it. Returns multierror. @@ -92,8 +92,8 @@ func (c *HookConfig) ConvertAndCheck(data []byte) error { } // Bindings returns a list of binding types in hook configuration. -func (c *HookConfig) Bindings() []BindingType { - res := []BindingType{} +func (c *HookConfig) Bindings() []htypes.BindingType { + res := []htypes.BindingType{} for _, binding := range validBindingTypes { if c.HasBinding(binding) { @@ -105,33 +105,33 @@ func (c *HookConfig) Bindings() []BindingType { } // HasBinding returns true if a hook configuration has binding type. -func (c *HookConfig) HasBinding(binding BindingType) bool { +func (c *HookConfig) HasBinding(binding htypes.BindingType) bool { switch binding { - case OnStartup: + case htypes.OnStartup: return c.OnStartup != nil - case Schedule: + case htypes.Schedule: return len(c.Schedules) > 0 - case OnKubernetesEvent: + case htypes.OnKubernetesEvent: return len(c.OnKubernetesEvents) > 0 - case KubernetesValidating: + case htypes.KubernetesValidating: return len(c.KubernetesValidating) > 0 - case KubernetesMutating: + case htypes.KubernetesMutating: return len(c.KubernetesMutating) > 0 - case KubernetesConversion: + case htypes.KubernetesConversion: return len(c.KubernetesConversion) > 0 } return false } -func (c *HookConfig) ConvertOnStartup(value interface{}) (*OnStartupConfig, error) { +func (c *HookConfig) ConvertOnStartup(value interface{}) (*htypes.OnStartupConfig, error) { floatValue, err := ConvertFloatForBinding(value, "onStartup") if err != nil || floatValue == nil { return nil, err } - res := &OnStartupConfig{} + res := &htypes.OnStartupConfig{} res.AllowFailure = false - res.BindingName = string(OnStartup) + res.BindingName = string(htypes.OnStartup) res.Order = *floatValue return res, nil } @@ -142,7 +142,7 @@ func (c *HookConfig) ConvertOnStartup(value interface{}) (*OnStartupConfig, erro // - binding name should exists, // // - binding name should not be repeated. -func CheckIncludeSnapshots(kubeConfigs []OnKubernetesEventConfig, includes ...string) error { +func CheckIncludeSnapshots(kubeConfigs []htypes.OnKubernetesEventConfig, includes ...string) error { for _, include := range includes { bindings := 0 for _, kubeCfg := range kubeConfigs { diff --git a/pkg/hook/config/config_v0.go b/pkg/hook/config/config_v0.go index e0c463fc..4340019f 100644 --- a/pkg/hook/config/config_v0.go +++ b/pkg/hook/config/config_v0.go @@ -6,10 +6,10 @@ import ( "gopkg.in/robfig/cron.v2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - . "github.com/flant/shell-operator/pkg/hook/types" - "github.com/flant/shell-operator/pkg/kube_events_manager" - . "github.com/flant/shell-operator/pkg/kube_events_manager/types" - . "github.com/flant/shell-operator/pkg/schedule_manager/types" + htypes "github.com/flant/shell-operator/pkg/hook/types" + kubeeventsmanager "github.com/flant/shell-operator/pkg/kube_events_manager" + kemtypes "github.com/flant/shell-operator/pkg/kube_events_manager/types" + smtypes "github.com/flant/shell-operator/pkg/schedule_manager/types" ) type HookConfigV0 struct { @@ -49,7 +49,7 @@ func (cv0 *HookConfigV0) ConvertAndCheck(c *HookConfig) (err error) { return err } - c.Schedules = []ScheduleConfig{} + c.Schedules = []htypes.ScheduleConfig{} for i, rawSchedule := range cv0.Schedule { err := cv0.CheckSchedule(rawSchedule) if err != nil { @@ -62,30 +62,30 @@ func (cv0 *HookConfigV0) ConvertAndCheck(c *HookConfig) (err error) { c.Schedules = append(c.Schedules, schedule) } - c.OnKubernetesEvents = []OnKubernetesEventConfig{} + c.OnKubernetesEvents = []htypes.OnKubernetesEventConfig{} for i, kubeCfg := range cv0.OnKubernetesEvent { err := cv0.CheckOnKubernetesEvent(kubeCfg, fmt.Sprintf("onKubernetesEvent[%d]", i)) if err != nil { return fmt.Errorf("invalid onKubernetesEvent config [%d]: %v", i, err) } - monitor := &kube_events_manager.MonitorConfig{} + monitor := &kubeeventsmanager.MonitorConfig{} monitor.Metadata.DebugName = MonitorDebugName(kubeCfg.Name, i) monitor.Metadata.MonitorId = MonitorConfigID() monitor.Metadata.LogLabels = map[string]string{} monitor.Metadata.MetricLabels = map[string]string{} - monitor.WithMode(ModeV0) + monitor.WithMode(kemtypes.ModeV0) // convert event names from legacy config. - eventTypes := []WatchEventType{} + eventTypes := []kemtypes.WatchEventType{} for _, eventName := range kubeCfg.EventTypes { switch eventName { case "add": - eventTypes = append(eventTypes, WatchEventAdded) + eventTypes = append(eventTypes, kemtypes.WatchEventAdded) case "update": - eventTypes = append(eventTypes, WatchEventModified) + eventTypes = append(eventTypes, kemtypes.WatchEventModified) case "delete": - eventTypes = append(eventTypes, WatchEventDeleted) + eventTypes = append(eventTypes, kemtypes.WatchEventDeleted) default: return fmt.Errorf("event '%s' is unsupported", eventName) } @@ -94,13 +94,13 @@ func (cv0 *HookConfigV0) ConvertAndCheck(c *HookConfig) (err error) { monitor.Kind = kubeCfg.Kind if kubeCfg.ObjectName != "" { - monitor.WithNameSelector(&NameSelector{ + monitor.WithNameSelector(&kemtypes.NameSelector{ MatchNames: []string{kubeCfg.ObjectName}, }) } if kubeCfg.NamespaceSelector != nil && !kubeCfg.NamespaceSelector.Any { - monitor.WithNamespaceSelector(&NamespaceSelector{ - NameSelector: &NameSelector{ + monitor.WithNamespaceSelector(&kemtypes.NamespaceSelector{ + NameSelector: &kemtypes.NameSelector{ MatchNames: kubeCfg.NamespaceSelector.MatchNames, }, }) @@ -108,7 +108,7 @@ func (cv0 *HookConfigV0) ConvertAndCheck(c *HookConfig) (err error) { monitor.WithLabelSelector(kubeCfg.Selector) monitor.JqFilter = kubeCfg.JqFilter - kubeConfig := OnKubernetesEventConfig{} + kubeConfig := htypes.OnKubernetesEventConfig{} kubeConfig.Monitor = monitor kubeConfig.AllowFailure = kubeCfg.AllowFailure if kubeCfg.Name == "" { @@ -132,17 +132,17 @@ func (cv0 *HookConfigV0) CheckSchedule(schV0 ScheduleConfigV0) error { return nil } -func (cv0 *HookConfigV0) ConvertSchedule(schV0 ScheduleConfigV0) (ScheduleConfig, error) { - res := ScheduleConfig{} +func (cv0 *HookConfigV0) ConvertSchedule(schV0 ScheduleConfigV0) (htypes.ScheduleConfig, error) { + res := htypes.ScheduleConfig{} if schV0.Name != "" { res.BindingName = schV0.Name } else { - res.BindingName = string(Schedule) + res.BindingName = string(htypes.Schedule) } res.AllowFailure = schV0.AllowFailure - res.ScheduleEntry = ScheduleEntry{ + res.ScheduleEntry = smtypes.ScheduleEntry{ Crontab: schV0.Crontab, Id: ScheduleID(), } diff --git a/pkg/hook/config/config_v1.go b/pkg/hook/config/config_v1.go index 16307950..68995f56 100644 --- a/pkg/hook/config/config_v1.go +++ b/pkg/hook/config/config_v1.go @@ -12,10 +12,10 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "github.com/flant/shell-operator/pkg/app" - . "github.com/flant/shell-operator/pkg/hook/types" - "github.com/flant/shell-operator/pkg/kube_events_manager" - . "github.com/flant/shell-operator/pkg/kube_events_manager/types" - . "github.com/flant/shell-operator/pkg/schedule_manager/types" + htypes "github.com/flant/shell-operator/pkg/hook/types" + kubeeventsmanager "github.com/flant/shell-operator/pkg/kube_events_manager" + kemtypes "github.com/flant/shell-operator/pkg/kube_events_manager/types" + smtypes "github.com/flant/shell-operator/pkg/schedule_manager/types" "github.com/flant/shell-operator/pkg/webhook/admission" "github.com/flant/shell-operator/pkg/webhook/conversion" "github.com/flant/shell-operator/pkg/webhook/validating/validation" @@ -44,32 +44,32 @@ type ScheduleConfigV1 struct { // version 1 of kubernetes event configuration type OnKubernetesEventConfigV1 struct { - Name string `json:"name,omitempty"` - WatchEventTypes []WatchEventType `json:"watchEvent,omitempty"` - ExecuteHookOnEvents []WatchEventType `json:"executeHookOnEvent,omitempty"` - ExecuteHookOnSynchronization string `json:"executeHookOnSynchronization,omitempty"` - WaitForSynchronization string `json:"waitForSynchronization,omitempty"` - KeepFullObjectsInMemory string `json:"keepFullObjectsInMemory,omitempty"` - Mode KubeEventMode `json:"mode,omitempty"` - ApiVersion string `json:"apiVersion,omitempty"` - Kind string `json:"kind,omitempty"` - NameSelector *KubeNameSelectorV1 `json:"nameSelector,omitempty"` - LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"` - FieldSelector *KubeFieldSelectorV1 `json:"fieldSelector,omitempty"` - Namespace *KubeNamespaceSelectorV1 `json:"namespace,omitempty"` - JqFilter string `json:"jqFilter,omitempty"` - AllowFailure bool `json:"allowFailure,omitempty"` - ResynchronizationPeriod string `json:"resynchronizationPeriod,omitempty"` - IncludeSnapshotsFrom []string `json:"includeSnapshotsFrom,omitempty"` - Queue string `json:"queue,omitempty"` - Group string `json:"group,omitempty"` + Name string `json:"name,omitempty"` + WatchEventTypes []kemtypes.WatchEventType `json:"watchEvent,omitempty"` + ExecuteHookOnEvents []kemtypes.WatchEventType `json:"executeHookOnEvent,omitempty"` + ExecuteHookOnSynchronization string `json:"executeHookOnSynchronization,omitempty"` + WaitForSynchronization string `json:"waitForSynchronization,omitempty"` + KeepFullObjectsInMemory string `json:"keepFullObjectsInMemory,omitempty"` + Mode kemtypes.KubeEventMode `json:"mode,omitempty"` + ApiVersion string `json:"apiVersion,omitempty"` + Kind string `json:"kind,omitempty"` + NameSelector *KubeNameSelectorV1 `json:"nameSelector,omitempty"` + LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"` + FieldSelector *KubeFieldSelectorV1 `json:"fieldSelector,omitempty"` + Namespace *KubeNamespaceSelectorV1 `json:"namespace,omitempty"` + JqFilter string `json:"jqFilter,omitempty"` + AllowFailure bool `json:"allowFailure,omitempty"` + ResynchronizationPeriod string `json:"resynchronizationPeriod,omitempty"` + IncludeSnapshotsFrom []string `json:"includeSnapshotsFrom,omitempty"` + Queue string `json:"queue,omitempty"` + Group string `json:"group,omitempty"` } -type KubeNameSelectorV1 NameSelector +type KubeNameSelectorV1 kemtypes.NameSelector -type KubeFieldSelectorV1 FieldSelector +type KubeFieldSelectorV1 kemtypes.FieldSelector -type KubeNamespaceSelectorV1 NamespaceSelector +type KubeNamespaceSelectorV1 kemtypes.NamespaceSelector // version 1 of kubernetes vali configuration type KubernetesAdmissionConfigV1 struct { @@ -111,14 +111,14 @@ func (cv1 *HookConfigV1) ConvertAndCheck(c *HookConfig) (err error) { return err } - c.OnKubernetesEvents = []OnKubernetesEventConfig{} + c.OnKubernetesEvents = []htypes.OnKubernetesEventConfig{} for i, kubeCfg := range cv1.OnKubernetesEvent { err := cv1.CheckOnKubernetesEvent(kubeCfg, fmt.Sprintf("kubernetes[%d]", i)) if err != nil { return fmt.Errorf("invalid kubernetes config [%d]: %v", i, err) } - monitor := &kube_events_manager.MonitorConfig{} + monitor := &kubeeventsmanager.MonitorConfig{} monitor.Metadata.DebugName = MonitorDebugName(kubeCfg.Name, i) monitor.Metadata.MonitorId = MonitorConfigID() monitor.Metadata.LogLabels = map[string]string{} @@ -126,9 +126,9 @@ func (cv1 *HookConfigV1) ConvertAndCheck(c *HookConfig) (err error) { monitor.WithMode(kubeCfg.Mode) monitor.ApiVersion = kubeCfg.ApiVersion monitor.Kind = kubeCfg.Kind - monitor.WithNameSelector((*NameSelector)(kubeCfg.NameSelector)) - monitor.WithFieldSelector((*FieldSelector)(kubeCfg.FieldSelector)) - monitor.WithNamespaceSelector((*NamespaceSelector)(kubeCfg.Namespace)) + monitor.WithNameSelector((*kemtypes.NameSelector)(kubeCfg.NameSelector)) + monitor.WithFieldSelector((*kemtypes.FieldSelector)(kubeCfg.FieldSelector)) + monitor.WithNamespaceSelector((*kemtypes.NamespaceSelector)(kubeCfg.Namespace)) monitor.WithLabelSelector(kubeCfg.LabelSelector) monitor.JqFilter = kubeCfg.JqFilter // executeHookOnEvent is a priority @@ -142,11 +142,11 @@ func (cv1 *HookConfigV1) ConvertAndCheck(c *HookConfig) (err error) { } } - kubeConfig := OnKubernetesEventConfig{} + kubeConfig := htypes.OnKubernetesEventConfig{} kubeConfig.Monitor = monitor kubeConfig.AllowFailure = kubeCfg.AllowFailure if kubeCfg.Name == "" { - kubeConfig.BindingName = string(OnKubernetesEvent) + kubeConfig.BindingName = string(htypes.OnKubernetesEvent) } else { kubeConfig.BindingName = kubeCfg.Name } @@ -192,7 +192,7 @@ func (cv1 *HookConfigV1) ConvertAndCheck(c *HookConfig) (err error) { // schedule bindings with includeSnapshotsFrom // are depend on kubernetes bindings. - c.Schedules = []ScheduleConfig{} + c.Schedules = []htypes.ScheduleConfig{} for i, rawSchedule := range cv1.Schedule { err := cv1.CheckSchedule(c.OnKubernetesEvents, rawSchedule) if err != nil { @@ -206,7 +206,7 @@ func (cv1 *HookConfigV1) ConvertAndCheck(c *HookConfig) (err error) { } // Validating webhooks - c.KubernetesValidating = []ValidatingConfig{} + c.KubernetesValidating = []htypes.ValidatingConfig{} for i, rawValidating := range c.V1.KubernetesValidating { err := cv1.CheckAdmission(c.OnKubernetesEvents, rawValidating) if err != nil { @@ -230,7 +230,7 @@ func (cv1 *HookConfigV1) ConvertAndCheck(c *HookConfig) (err error) { return err } - c.KubernetesMutating = []MutatingConfig{} + c.KubernetesMutating = []htypes.MutatingConfig{} for i, rawMutating := range c.V1.KubernetesMutating { err := cv1.CheckAdmission(c.OnKubernetesEvents, rawMutating) if err != nil { @@ -245,7 +245,7 @@ func (cv1 *HookConfigV1) ConvertAndCheck(c *HookConfig) (err error) { // TODO: Validate mutatingWebhooks // Conversion webhooks. - c.KubernetesConversion = []ConversionConfig{} + c.KubernetesConversion = []htypes.ConversionConfig{} for i, rawConversion := range c.V1.KubernetesConversion { err := cv1.CheckConversion(c.OnKubernetesEvents, rawConversion) if err != nil { @@ -270,7 +270,7 @@ func (cv1 *HookConfigV1) ConvertAndCheck(c *HookConfig) (err error) { } groupSnapshots[kubeCfg.Group] = append(groupSnapshots[kubeCfg.Group], kubeCfg.BindingName) } - newKubeEvents := make([]OnKubernetesEventConfig, 0) + newKubeEvents := make([]htypes.OnKubernetesEventConfig, 0) for _, cfg := range c.OnKubernetesEvents { if snapshots, ok := groupSnapshots[cfg.Group]; ok { cfg.IncludeSnapshotsFrom = MergeArrays(cfg.IncludeSnapshotsFrom, snapshots) @@ -279,7 +279,7 @@ func (cv1 *HookConfigV1) ConvertAndCheck(c *HookConfig) (err error) { } c.OnKubernetesEvents = newKubeEvents - newSchedules := make([]ScheduleConfig, 0) + newSchedules := make([]htypes.ScheduleConfig, 0) for _, cfg := range c.Schedules { if snapshots, ok := groupSnapshots[cfg.Group]; ok { cfg.IncludeSnapshotsFrom = MergeArrays(cfg.IncludeSnapshotsFrom, snapshots) @@ -288,7 +288,7 @@ func (cv1 *HookConfigV1) ConvertAndCheck(c *HookConfig) (err error) { } c.Schedules = newSchedules - newValidating := make([]ValidatingConfig, 0) + newValidating := make([]htypes.ValidatingConfig, 0) for _, cfg := range c.KubernetesValidating { if snapshots, ok := groupSnapshots[cfg.Group]; ok { cfg.IncludeSnapshotsFrom = MergeArrays(cfg.IncludeSnapshotsFrom, snapshots) @@ -297,7 +297,7 @@ func (cv1 *HookConfigV1) ConvertAndCheck(c *HookConfig) (err error) { } c.KubernetesValidating = newValidating - newMutating := make([]MutatingConfig, 0) + newMutating := make([]htypes.MutatingConfig, 0) for _, cfg := range c.KubernetesMutating { if snapshots, ok := groupSnapshots[cfg.Group]; ok { cfg.IncludeSnapshotsFrom = MergeArrays(cfg.IncludeSnapshotsFrom, snapshots) @@ -306,7 +306,7 @@ func (cv1 *HookConfigV1) ConvertAndCheck(c *HookConfig) (err error) { } c.KubernetesMutating = newMutating - newConversion := make([]ConversionConfig, 0) + newConversion := make([]htypes.ConversionConfig, 0) for _, cfg := range c.KubernetesConversion { if snapshots, ok := groupSnapshots[cfg.Group]; ok { cfg.IncludeSnapshotsFrom = MergeArrays(cfg.IncludeSnapshotsFrom, snapshots) @@ -318,17 +318,17 @@ func (cv1 *HookConfigV1) ConvertAndCheck(c *HookConfig) (err error) { return nil } -func (cv1 *HookConfigV1) ConvertSchedule(schV1 ScheduleConfigV1) (ScheduleConfig, error) { - res := ScheduleConfig{} +func (cv1 *HookConfigV1) ConvertSchedule(schV1 ScheduleConfigV1) (htypes.ScheduleConfig, error) { + res := htypes.ScheduleConfig{} if schV1.Name != "" { res.BindingName = schV1.Name } else { - res.BindingName = string(Schedule) + res.BindingName = string(htypes.Schedule) } res.AllowFailure = schV1.AllowFailure - res.ScheduleEntry = ScheduleEntry{ + res.ScheduleEntry = smtypes.ScheduleEntry{ Crontab: schV1.Crontab, Id: ScheduleID(), } @@ -344,7 +344,7 @@ func (cv1 *HookConfigV1) ConvertSchedule(schV1 ScheduleConfigV1) (ScheduleConfig return res, nil } -func (cv1 *HookConfigV1) CheckSchedule(kubeConfigs []OnKubernetesEventConfig, schV1 ScheduleConfigV1) (allErr error) { +func (cv1 *HookConfigV1) CheckSchedule(kubeConfigs []htypes.OnKubernetesEventConfig, schV1 ScheduleConfigV1) (allErr error) { var err error _, err = cron.Parse(schV1.Crontab) if err != nil { @@ -370,14 +370,14 @@ func (cv1 *HookConfigV1) CheckOnKubernetesEvent(kubeCfg OnKubernetesEventConfigV } if kubeCfg.LabelSelector != nil { - _, err := kube_events_manager.FormatLabelSelector(kubeCfg.LabelSelector) + _, err := kubeeventsmanager.FormatLabelSelector(kubeCfg.LabelSelector) if err != nil { allErr = multierror.Append(allErr, fmt.Errorf("labelSelector is invalid: %v", err)) } } if kubeCfg.FieldSelector != nil { - _, err := kube_events_manager.FormatFieldSelector((*FieldSelector)(kubeCfg.FieldSelector)) + _, err := kubeeventsmanager.FormatFieldSelector((*kemtypes.FieldSelector)(kubeCfg.FieldSelector)) if err != nil { allErr = multierror.Append(allErr, fmt.Errorf("fieldSelector is invalid: %v", err)) } @@ -396,7 +396,7 @@ func (cv1 *HookConfigV1) CheckOnKubernetesEvent(kubeCfg OnKubernetesEventConfigV return allErr } -func (cv1 *HookConfigV1) CheckAdmission(kubeConfigs []OnKubernetesEventConfig, cfgV1 KubernetesAdmissionConfigV1) (allErr error) { +func (cv1 *HookConfigV1) CheckAdmission(kubeConfigs []htypes.OnKubernetesEventConfig, cfgV1 KubernetesAdmissionConfigV1) (allErr error) { var err error if len(cfgV1.IncludeSnapshotsFrom) > 0 { @@ -407,14 +407,14 @@ func (cv1 *HookConfigV1) CheckAdmission(kubeConfigs []OnKubernetesEventConfig, c } if cfgV1.LabelSelector != nil { - _, err := kube_events_manager.FormatLabelSelector(cfgV1.LabelSelector) + _, err := kubeeventsmanager.FormatLabelSelector(cfgV1.LabelSelector) if err != nil { allErr = multierror.Append(allErr, fmt.Errorf("labelSelector is invalid: %v", err)) } } if cfgV1.Namespace != nil && cfgV1.Namespace.LabelSelector != nil { - _, err := kube_events_manager.FormatLabelSelector(cfgV1.Namespace.LabelSelector) + _, err := kubeeventsmanager.FormatLabelSelector(cfgV1.Namespace.LabelSelector) if err != nil { allErr = multierror.Append(allErr, fmt.Errorf("namespace.labelSelector is invalid: %v", err)) } @@ -423,8 +423,8 @@ func (cv1 *HookConfigV1) CheckAdmission(kubeConfigs []OnKubernetesEventConfig, c return allErr } -func convertValidating(cfgV1 KubernetesAdmissionConfigV1) (ValidatingConfig, error) { - cfg := ValidatingConfig{} +func convertValidating(cfgV1 KubernetesAdmissionConfigV1) (htypes.ValidatingConfig, error) { + cfg := htypes.ValidatingConfig{} cfg.Group = cfgV1.Group cfg.IncludeSnapshotsFrom = cfgV1.IncludeSnapshotsFrom @@ -469,8 +469,8 @@ func convertValidating(cfgV1 KubernetesAdmissionConfigV1) (ValidatingConfig, err return cfg, nil } -func convertMutating(cfgV1 KubernetesAdmissionConfigV1) (MutatingConfig, error) { - cfg := MutatingConfig{} +func convertMutating(cfgV1 KubernetesAdmissionConfigV1) (htypes.MutatingConfig, error) { + cfg := htypes.MutatingConfig{} cfg.Group = cfgV1.Group cfg.IncludeSnapshotsFrom = cfgV1.IncludeSnapshotsFrom @@ -515,7 +515,7 @@ func convertMutating(cfgV1 KubernetesAdmissionConfigV1) (MutatingConfig, error) return cfg, nil } -func (cv1 *HookConfigV1) CheckConversion(kubeConfigs []OnKubernetesEventConfig, cfgV1 KubernetesConversionConfigV1) (allErr error) { +func (cv1 *HookConfigV1) CheckConversion(kubeConfigs []htypes.OnKubernetesEventConfig, cfgV1 KubernetesConversionConfigV1) (allErr error) { var err error if len(cfgV1.IncludeSnapshotsFrom) > 0 { @@ -528,8 +528,8 @@ func (cv1 *HookConfigV1) CheckConversion(kubeConfigs []OnKubernetesEventConfig, return allErr } -func (cv1 *HookConfigV1) ConvertConversion(cfgV1 KubernetesConversionConfigV1) (ConversionConfig, error) { - cfg := ConversionConfig{} +func (cv1 *HookConfigV1) ConvertConversion(cfgV1 KubernetesConversionConfigV1) (htypes.ConversionConfig, error) { + cfg := htypes.ConversionConfig{} cfg.Group = cfgV1.Group cfg.IncludeSnapshotsFrom = cfgV1.IncludeSnapshotsFrom @@ -547,7 +547,7 @@ func (cv1 *HookConfigV1) ConvertConversion(cfgV1 KubernetesConversionConfigV1) ( } // CheckAndConvertSettings validates a duration and returns a Settings struct. -func (cv1 *HookConfigV1) CheckAndConvertSettings(settings *SettingsV1) (out *Settings, allErr error) { +func (cv1 *HookConfigV1) CheckAndConvertSettings(settings *SettingsV1) (out *htypes.Settings, allErr error) { if settings == nil { return nil, nil } @@ -565,7 +565,7 @@ func (cv1 *HookConfigV1) CheckAndConvertSettings(settings *SettingsV1) (out *Set return nil, allErr } - return &Settings{ + return &htypes.Settings{ ExecutionMinInterval: interval, ExecutionBurst: int(burst), }, nil diff --git a/pkg/hook/controller/admission_bindings_controller.go b/pkg/hook/controller/admission_bindings_controller.go index cbeeec32..97597b77 100644 --- a/pkg/hook/controller/admission_bindings_controller.go +++ b/pkg/hook/controller/admission_bindings_controller.go @@ -4,14 +4,14 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" v1 "k8s.io/api/admission/v1" - . "github.com/flant/shell-operator/pkg/hook/binding_context" - . "github.com/flant/shell-operator/pkg/hook/types" + bctx "github.com/flant/shell-operator/pkg/hook/binding_context" + htypes "github.com/flant/shell-operator/pkg/hook/types" "github.com/flant/shell-operator/pkg/webhook/admission" ) // AdmissionBindingToWebhookLink is a link between a hook and a webhook configuration. type AdmissionBindingToWebhookLink struct { - BindingType BindingType + BindingType htypes.BindingType BindingName string ConfigurationId string WebhookId string @@ -26,8 +26,8 @@ type AdmissionBindingsController struct { // WebhookId -> link AdmissionLinks map[string]*AdmissionBindingToWebhookLink - ValidatingBindings []ValidatingConfig - MutatingBindings []MutatingConfig + ValidatingBindings []htypes.ValidatingConfig + MutatingBindings []htypes.MutatingConfig webhookManager *admission.WebhookManager } @@ -39,11 +39,11 @@ var NewValidatingBindingsController = func() *AdmissionBindingsController { } } -func (c *AdmissionBindingsController) WithValidatingBindings(bindings []ValidatingConfig) { +func (c *AdmissionBindingsController) WithValidatingBindings(bindings []htypes.ValidatingConfig) { c.ValidatingBindings = bindings } -func (c *AdmissionBindingsController) WithMutatingBindings(bindings []MutatingConfig) { +func (c *AdmissionBindingsController) WithMutatingBindings(bindings []htypes.MutatingConfig) { c.MutatingBindings = bindings } @@ -74,7 +74,7 @@ func (c *AdmissionBindingsController) EnableValidatingBindings() { for _, config := range c.ValidatingBindings { c.AdmissionLinks[config.Webhook.Metadata.WebhookId] = &AdmissionBindingToWebhookLink{ - BindingType: KubernetesValidating, + BindingType: htypes.KubernetesValidating, BindingName: config.BindingName, ConfigurationId: c.ConfigurationId, WebhookId: config.Webhook.Metadata.WebhookId, @@ -108,7 +108,7 @@ func (c *AdmissionBindingsController) EnableMutatingBindings() { for _, config := range c.MutatingBindings { c.AdmissionLinks[config.Webhook.Metadata.WebhookId] = &AdmissionBindingToWebhookLink{ - BindingType: KubernetesMutating, + BindingType: htypes.KubernetesMutating, BindingName: config.BindingName, ConfigurationId: c.ConfigurationId, WebhookId: config.Webhook.Metadata.WebhookId, @@ -139,7 +139,7 @@ func (c *AdmissionBindingsController) HandleEvent(event admission.Event) Binding if c.ConfigurationId != event.ConfigurationId { log.Errorf("Possible bug!!! Unknown validating event: no binding for configurationId '%s' (webhookId '%s')", event.ConfigurationId, event.WebhookId) return BindingExecutionInfo{ - BindingContext: []BindingContext{}, + BindingContext: []bctx.BindingContext{}, AllowFailure: false, } } @@ -148,12 +148,12 @@ func (c *AdmissionBindingsController) HandleEvent(event admission.Event) Binding if !hasKey { log.Errorf("Possible bug!!! Unknown validating event: no binding for configurationId '%s', webhookId '%s'", event.ConfigurationId, event.WebhookId) return BindingExecutionInfo{ - BindingContext: []BindingContext{}, + BindingContext: []bctx.BindingContext{}, AllowFailure: false, } } - bc := BindingContext{ + bc := bctx.BindingContext{ Binding: link.BindingName, AdmissionReview: &v1.AdmissionReview{Request: event.Request}, } @@ -162,7 +162,7 @@ func (c *AdmissionBindingsController) HandleEvent(event admission.Event) Binding bc.Metadata.Group = link.Group return BindingExecutionInfo{ - BindingContext: []BindingContext{bc}, + BindingContext: []bctx.BindingContext{bc}, Binding: link.BindingName, IncludeSnapshots: link.IncludeSnapshots, Group: link.Group, diff --git a/pkg/hook/controller/conversion_bindings_controller.go b/pkg/hook/controller/conversion_bindings_controller.go index 4548e01e..ca4c632f 100644 --- a/pkg/hook/controller/conversion_bindings_controller.go +++ b/pkg/hook/controller/conversion_bindings_controller.go @@ -4,8 +4,8 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - . "github.com/flant/shell-operator/pkg/hook/binding_context" - . "github.com/flant/shell-operator/pkg/hook/types" + bctx "github.com/flant/shell-operator/pkg/hook/binding_context" + htypes "github.com/flant/shell-operator/pkg/hook/types" "github.com/flant/shell-operator/pkg/webhook/conversion" ) @@ -25,7 +25,7 @@ type ConversionBindingsController struct { // crdName -> conversionRule id -> link Links map[string]map[conversion.Rule]*ConversionBindingToWebhookLink - Bindings []ConversionConfig + Bindings []htypes.ConversionConfig webhookManager *conversion.WebhookManager } @@ -37,7 +37,7 @@ var NewConversionBindingsController = func() *ConversionBindingsController { } } -func (c *ConversionBindingsController) WithBindings(bindings []ConversionConfig) { +func (c *ConversionBindingsController) WithBindings(bindings []htypes.ConversionConfig) { c.Bindings = bindings } @@ -83,7 +83,7 @@ func (c *ConversionBindingsController) HandleEvent(crdName string, request *v1.C if !hasKey { log.Errorf("Possible bug!!! No binding for conversion event for crd/%s", crdName) return BindingExecutionInfo{ - BindingContext: []BindingContext{}, + BindingContext: []bctx.BindingContext{}, AllowFailure: false, } } @@ -91,23 +91,23 @@ func (c *ConversionBindingsController) HandleEvent(crdName string, request *v1.C if !has { log.Errorf("Possible bug!!! Event has an unknown conversion rule %s for crd/%s: no binding was registered", rule.String(), crdName) return BindingExecutionInfo{ - BindingContext: []BindingContext{}, + BindingContext: []bctx.BindingContext{}, AllowFailure: false, } } - bc := BindingContext{ + bc := bctx.BindingContext{ Binding: link.BindingName, ConversionReview: &v1.ConversionReview{Request: request}, FromVersion: link.FromVersion, ToVersion: link.ToVersion, } - bc.Metadata.BindingType = KubernetesConversion + bc.Metadata.BindingType = htypes.KubernetesConversion bc.Metadata.IncludeSnapshots = link.IncludeSnapshots bc.Metadata.Group = link.Group return BindingExecutionInfo{ - BindingContext: []BindingContext{bc}, + BindingContext: []bctx.BindingContext{bc}, Binding: link.BindingName, IncludeSnapshots: link.IncludeSnapshots, Group: link.Group, diff --git a/pkg/hook/controller/hook_controller.go b/pkg/hook/controller/hook_controller.go index 54cd166d..b332ccbe 100644 --- a/pkg/hook/controller/hook_controller.go +++ b/pkg/hook/controller/hook_controller.go @@ -4,24 +4,24 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - . "github.com/flant/shell-operator/pkg/hook/binding_context" - . "github.com/flant/shell-operator/pkg/hook/types" - "github.com/flant/shell-operator/pkg/kube_events_manager" - . "github.com/flant/shell-operator/pkg/kube_events_manager/types" - "github.com/flant/shell-operator/pkg/schedule_manager" + bctx "github.com/flant/shell-operator/pkg/hook/binding_context" + htypes "github.com/flant/shell-operator/pkg/hook/types" + kubeeventsmanager "github.com/flant/shell-operator/pkg/kube_events_manager" + kemtypes "github.com/flant/shell-operator/pkg/kube_events_manager/types" + schedulemanager "github.com/flant/shell-operator/pkg/schedule_manager" "github.com/flant/shell-operator/pkg/webhook/admission" "github.com/flant/shell-operator/pkg/webhook/conversion" ) type BindingExecutionInfo struct { - BindingContext []BindingContext + BindingContext []bctx.BindingContext IncludeSnapshots []string IncludeAllSnapshots bool AllowFailure bool QueueName string Binding string Group string - KubernetesBinding OnKubernetesEventConfig + KubernetesBinding htypes.OnKubernetesEventConfig } // В каждый хук надо будет положить этот объект. @@ -44,16 +44,16 @@ type HookController struct { ScheduleController ScheduleBindingsController AdmissionController *AdmissionBindingsController ConversionController *ConversionBindingsController - kubernetesBindings []OnKubernetesEventConfig - scheduleBindings []ScheduleConfig - validatingBindings []ValidatingConfig - mutatingBindings []MutatingConfig - conversionBindings []ConversionConfig + kubernetesBindings []htypes.OnKubernetesEventConfig + scheduleBindings []htypes.ScheduleConfig + validatingBindings []htypes.ValidatingConfig + mutatingBindings []htypes.MutatingConfig + conversionBindings []htypes.ConversionConfig logger *log.Logger } -func (hc *HookController) InitKubernetesBindings(bindings []OnKubernetesEventConfig, kubeEventMgr kube_events_manager.KubeEventsManager, logger *log.Logger) { +func (hc *HookController) InitKubernetesBindings(bindings []htypes.OnKubernetesEventConfig, kubeEventMgr kubeeventsmanager.KubeEventsManager, logger *log.Logger) { if len(bindings) == 0 { return } @@ -66,7 +66,7 @@ func (hc *HookController) InitKubernetesBindings(bindings []OnKubernetesEventCon hc.logger = logger } -func (hc *HookController) InitScheduleBindings(bindings []ScheduleConfig, scheduleMgr schedule_manager.ScheduleManager) { +func (hc *HookController) InitScheduleBindings(bindings []htypes.ScheduleConfig, scheduleMgr schedulemanager.ScheduleManager) { if len(bindings) == 0 { return } @@ -78,7 +78,7 @@ func (hc *HookController) InitScheduleBindings(bindings []ScheduleConfig, schedu hc.scheduleBindings = bindings } -func (hc *HookController) InitAdmissionBindings(vbindings []ValidatingConfig, mbindings []MutatingConfig, webhookMgr *admission.WebhookManager) { +func (hc *HookController) InitAdmissionBindings(vbindings []htypes.ValidatingConfig, mbindings []htypes.MutatingConfig, webhookMgr *admission.WebhookManager) { bindingCtrl := NewValidatingBindingsController() bindingCtrl.WithWebhookManager(webhookMgr) hc.AdmissionController = bindingCtrl @@ -87,7 +87,7 @@ func (hc *HookController) InitAdmissionBindings(vbindings []ValidatingConfig, mb hc.initMutatingBindings(mbindings) } -func (hc *HookController) initValidatingBindings(bindings []ValidatingConfig) { +func (hc *HookController) initValidatingBindings(bindings []htypes.ValidatingConfig) { if len(bindings) == 0 { return } @@ -96,7 +96,7 @@ func (hc *HookController) initValidatingBindings(bindings []ValidatingConfig) { hc.validatingBindings = bindings } -func (hc *HookController) initMutatingBindings(bindings []MutatingConfig) { +func (hc *HookController) initMutatingBindings(bindings []htypes.MutatingConfig) { if len(bindings) == 0 { return } @@ -105,7 +105,7 @@ func (hc *HookController) initMutatingBindings(bindings []MutatingConfig) { hc.mutatingBindings = bindings } -func (hc *HookController) InitConversionBindings(bindings []ConversionConfig, webhookMgr *conversion.WebhookManager) { +func (hc *HookController) InitConversionBindings(bindings []htypes.ConversionConfig, webhookMgr *conversion.WebhookManager) { if len(bindings) == 0 { return } @@ -117,7 +117,7 @@ func (hc *HookController) InitConversionBindings(bindings []ConversionConfig, we hc.conversionBindings = bindings } -func (hc *HookController) CanHandleKubeEvent(kubeEvent KubeEvent) bool { +func (hc *HookController) CanHandleKubeEvent(kubeEvent kemtypes.KubeEvent) bool { if hc.KubernetesController != nil { return hc.KubernetesController.CanHandleEvent(kubeEvent) } @@ -161,7 +161,7 @@ func (hc *HookController) HandleEnableKubernetesBindings(createTasksFn func(Bind return nil } -func (hc *HookController) HandleKubeEvent(event KubeEvent, createTasksFn func(BindingExecutionInfo)) { +func (hc *HookController) HandleKubeEvent(event kemtypes.KubeEvent, createTasksFn func(BindingExecutionInfo)) { if hc.KubernetesController != nil { execInfo := hc.KubernetesController.HandleEvent(event) if createTasksFn != nil { @@ -255,47 +255,47 @@ func (hc *HookController) EnableConversionBindings() { // KubernetesSnapshots returns a 'full snapshot': all snapshots for all registered kubernetes bindings. // Note: no caching as in UpdateSnapshots because KubernetesSnapshots used for non-combined binding contexts. -func (hc *HookController) KubernetesSnapshots() map[string][]ObjectAndFilterResult { +func (hc *HookController) KubernetesSnapshots() map[string][]kemtypes.ObjectAndFilterResult { if hc.KubernetesController != nil { return hc.KubernetesController.Snapshots() } - return map[string][]ObjectAndFilterResult{} + return map[string][]kemtypes.ObjectAndFilterResult{} } // getIncludeSnapshotsFrom returns binding names from 'includeSnapshotsFrom' field. -func (hc *HookController) getIncludeSnapshotsFrom(bindingType BindingType, bindingName string) []string { +func (hc *HookController) getIncludeSnapshotsFrom(bindingType htypes.BindingType, bindingName string) []string { includeSnapshotsFrom := make([]string, 0) switch bindingType { - case OnKubernetesEvent: + case htypes.OnKubernetesEvent: for _, binding := range hc.kubernetesBindings { if bindingName == binding.BindingName { includeSnapshotsFrom = binding.IncludeSnapshotsFrom break } } - case Schedule: + case htypes.Schedule: for _, binding := range hc.scheduleBindings { if bindingName == binding.BindingName { includeSnapshotsFrom = binding.IncludeSnapshotsFrom break } } - case KubernetesValidating: + case htypes.KubernetesValidating: for _, binding := range hc.validatingBindings { if bindingName == binding.BindingName { includeSnapshotsFrom = binding.IncludeSnapshotsFrom break } } - case KubernetesMutating: + case htypes.KubernetesMutating: for _, binding := range hc.mutatingBindings { if bindingName == binding.BindingName { includeSnapshotsFrom = binding.IncludeSnapshotsFrom break } } - case KubernetesConversion: + case htypes.KubernetesConversion: for _, binding := range hc.conversionBindings { if bindingName == binding.BindingName { includeSnapshotsFrom = binding.IncludeSnapshotsFrom @@ -314,25 +314,25 @@ func (hc *HookController) getIncludeSnapshotsFrom(bindingType BindingType, bindi // Combined "Synchronization" binging contexts or "Synchronization" // with self-inclusion may require several calls to Snapshot*() methods, but objects // may change between these calls. -func (hc *HookController) UpdateSnapshots(context []BindingContext) []BindingContext { +func (hc *HookController) UpdateSnapshots(context []bctx.BindingContext) []bctx.BindingContext { if hc.KubernetesController == nil { return context } // Cache retrieved snapshots to make them consistent. - cache := make(map[string][]ObjectAndFilterResult) + cache := make(map[string][]kemtypes.ObjectAndFilterResult) - newContext := make([]BindingContext, 0) + newContext := make([]bctx.BindingContext, 0) for _, bc := range context { newBc := bc // Update 'snapshots' field to fresh snapshot based on 'includeSnapshotsFrom' field. // Note: it is a cache-enabled version of KubernetesController.SnapshotsFrom. - newBc.Snapshots = make(map[string][]ObjectAndFilterResult) + newBc.Snapshots = make(map[string][]kemtypes.ObjectAndFilterResult) includeSnapshotsFrom := hc.getIncludeSnapshotsFrom(bc.Metadata.BindingType, bc.Binding) for _, bindingName := range includeSnapshotsFrom { // Initialize all keys with empty arrays. - newBc.Snapshots[bindingName] = make([]ObjectAndFilterResult, 0) + newBc.Snapshots[bindingName] = make([]kemtypes.ObjectAndFilterResult, 0) if _, has := cache[bindingName]; !has { cache[bindingName] = hc.KubernetesController.SnapshotsFor(bindingName) } @@ -342,7 +342,7 @@ func (hc *HookController) UpdateSnapshots(context []BindingContext) []BindingCon } // Also refresh 'objects' field for Kubernetes.Synchronization event. - if newBc.Metadata.BindingType == OnKubernetesEvent && newBc.Type == TypeSynchronization { + if newBc.Metadata.BindingType == htypes.OnKubernetesEvent && newBc.Type == kemtypes.TypeSynchronization { if _, has := cache[bc.Binding]; !has { cache[bc.Binding] = hc.KubernetesController.SnapshotsFor(bc.Binding) } diff --git a/pkg/hook/controller/hook_controller_test.go b/pkg/hook/controller/hook_controller_test.go index 92683f9f..6db4ca7e 100644 --- a/pkg/hook/controller/hook_controller_test.go +++ b/pkg/hook/controller/hook_controller_test.go @@ -8,11 +8,11 @@ import ( . "github.com/onsi/gomega" "github.com/flant/kube-client/fake" - "github.com/flant/shell-operator/pkg/hook/binding_context" + bindingcontext "github.com/flant/shell-operator/pkg/hook/binding_context" "github.com/flant/shell-operator/pkg/hook/config" "github.com/flant/shell-operator/pkg/hook/types" - "github.com/flant/shell-operator/pkg/kube_events_manager" - types2 "github.com/flant/shell-operator/pkg/kube_events_manager/types" + kubeeventsmanager "github.com/flant/shell-operator/pkg/kube_events_manager" + kemtypes "github.com/flant/shell-operator/pkg/kube_events_manager/types" ) // Test updating snapshots for combined contexts. @@ -20,7 +20,7 @@ func Test_UpdateSnapshots(t *testing.T) { g := NewWithT(t) fc := fake.NewFakeCluster(fake.ClusterVersionV121) - mgr := kube_events_manager.NewKubeEventsManager(context.Background(), fc.Client, log.NewNop()) + mgr := kubeeventsmanager.NewKubeEventsManager(context.Background(), fc.Client, log.NewNop()) testHookConfig := ` configVersion: v1 @@ -51,16 +51,16 @@ kubernetes: hc.EnableScheduleBindings() // Test case: combined binding context for binding_2 and binding_3. - bcs := []binding_context.BindingContext{ + bcs := []bindingcontext.BindingContext{ { Binding: "binding_2", - Type: types2.TypeEvent, - WatchEvent: types2.WatchEventAdded, + Type: kemtypes.TypeEvent, + WatchEvent: kemtypes.WatchEventAdded, }, { Binding: "binding_3", - Type: types2.TypeEvent, - WatchEvent: types2.WatchEventAdded, + Type: kemtypes.TypeEvent, + WatchEvent: kemtypes.WatchEventAdded, }, } bcs[0].Metadata.BindingType = types.OnKubernetesEvent diff --git a/pkg/hook/controller/kubernetes_bindings_controller.go b/pkg/hook/controller/kubernetes_bindings_controller.go index dd395d31..b03d0242 100644 --- a/pkg/hook/controller/kubernetes_bindings_controller.go +++ b/pkg/hook/controller/kubernetes_bindings_controller.go @@ -5,35 +5,35 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" - . "github.com/flant/shell-operator/pkg/hook/binding_context" - . "github.com/flant/shell-operator/pkg/hook/types" - "github.com/flant/shell-operator/pkg/kube_events_manager" - . "github.com/flant/shell-operator/pkg/kube_events_manager/types" + bctx "github.com/flant/shell-operator/pkg/hook/binding_context" + htypes "github.com/flant/shell-operator/pkg/hook/types" + kubeeventsmanager "github.com/flant/shell-operator/pkg/kube_events_manager" + kemtypes "github.com/flant/shell-operator/pkg/kube_events_manager/types" utils "github.com/flant/shell-operator/pkg/utils/labels" ) // KubernetesBindingToMonitorLink is a link between a binding config and a Monitor. type KubernetesBindingToMonitorLink struct { MonitorId string - BindingConfig OnKubernetesEventConfig + BindingConfig htypes.OnKubernetesEventConfig } // KubernetesBindingsController handles kubernetes bindings for one hook. type KubernetesBindingsController interface { - WithKubernetesBindings([]OnKubernetesEventConfig) - WithKubeEventsManager(kube_events_manager.KubeEventsManager) + WithKubernetesBindings([]htypes.OnKubernetesEventConfig) + WithKubeEventsManager(kubeeventsmanager.KubeEventsManager) EnableKubernetesBindings() ([]BindingExecutionInfo, error) UpdateMonitor(monitorId string, kind, apiVersion string) error UnlockEvents() UnlockEventsFor(monitorID string) StopMonitors() - CanHandleEvent(kubeEvent KubeEvent) bool - HandleEvent(kubeEvent KubeEvent) BindingExecutionInfo + CanHandleEvent(kubeEvent kemtypes.KubeEvent) bool + HandleEvent(kubeEvent kemtypes.KubeEvent) BindingExecutionInfo BindingNames() []string - SnapshotsFrom(bindingNames ...string) map[string][]ObjectAndFilterResult - SnapshotsFor(bindingName string) []ObjectAndFilterResult - Snapshots() map[string][]ObjectAndFilterResult + SnapshotsFrom(bindingNames ...string) map[string][]kemtypes.ObjectAndFilterResult + SnapshotsFor(bindingName string) []kemtypes.ObjectAndFilterResult + Snapshots() map[string][]kemtypes.ObjectAndFilterResult SnapshotsInfo() []string SnapshotsDump() map[string]interface{} } @@ -44,10 +44,10 @@ type kubernetesBindingsController struct { BindingMonitorLinks map[string]*KubernetesBindingToMonitorLink // bindings configurations - KubernetesBindings []OnKubernetesEventConfig + KubernetesBindings []htypes.OnKubernetesEventConfig // dependencies - kubeEventsManager kube_events_manager.KubeEventsManager + kubeEventsManager kubeeventsmanager.KubeEventsManager logger *log.Logger } @@ -63,11 +63,11 @@ var NewKubernetesBindingsController = func(logger *log.Logger) *kubernetesBindin } } -func (c *kubernetesBindingsController) WithKubernetesBindings(bindings []OnKubernetesEventConfig) { +func (c *kubernetesBindingsController) WithKubernetesBindings(bindings []htypes.OnKubernetesEventConfig) { c.KubernetesBindings = bindings } -func (c *kubernetesBindingsController) WithKubeEventsManager(kubeEventsManager kube_events_manager.KubeEventsManager) { +func (c *kubernetesBindingsController) WithKubeEventsManager(kubeEventsManager kubeeventsmanager.KubeEventsManager) { c.kubeEventsManager = kubeEventsManager } @@ -89,9 +89,9 @@ func (c *kubernetesBindingsController) EnableKubernetesBindings() ([]BindingExec // Start monitor's informers to fill the cache. c.kubeEventsManager.StartMonitor(config.Monitor.Metadata.MonitorId) - synchronizationInfo := c.HandleEvent(KubeEvent{ + synchronizationInfo := c.HandleEvent(kemtypes.KubeEvent{ MonitorId: config.Monitor.Metadata.MonitorId, - Type: TypeSynchronization, + Type: kemtypes.TypeSynchronization, }) res = append(res, synchronizationInfo) } @@ -132,10 +132,10 @@ func (c *kubernetesBindingsController) UpdateMonitor(monitorId string, kind, api // Synchronization has no meaning for UpdateMonitor. Just emit Added event to handle objects of // a new kind. - kubeEvent := KubeEvent{ + kubeEvent := kemtypes.KubeEvent{ MonitorId: monitorId, - Type: TypeEvent, - WatchEvents: []WatchEventType{WatchEventAdded}, + Type: kemtypes.TypeEvent, + WatchEvents: []kemtypes.WatchEventType{kemtypes.WatchEventAdded}, Objects: nil, } c.kubeEventsManager.Ch() <- kubeEvent @@ -173,7 +173,7 @@ func (c *kubernetesBindingsController) StopMonitors() { } } -func (c *kubernetesBindingsController) CanHandleEvent(kubeEvent KubeEvent) bool { +func (c *kubernetesBindingsController) CanHandleEvent(kubeEvent kemtypes.KubeEvent) bool { for key := range c.BindingMonitorLinks { if key == kubeEvent.MonitorId { return true @@ -184,12 +184,12 @@ func (c *kubernetesBindingsController) CanHandleEvent(kubeEvent KubeEvent) bool // HandleEvent receives event from KubeEventManager and returns a BindingExecutionInfo // to help create a new task to run a hook. -func (c *kubernetesBindingsController) HandleEvent(kubeEvent KubeEvent) BindingExecutionInfo { +func (c *kubernetesBindingsController) HandleEvent(kubeEvent kemtypes.KubeEvent) BindingExecutionInfo { link, hasKey := c.BindingMonitorLinks[kubeEvent.MonitorId] if !hasKey { log.Errorf("Possible bug!!! Unknown kube event: no such monitor id '%s' registered", kubeEvent.MonitorId) return BindingExecutionInfo{ - BindingContext: []BindingContext{}, + BindingContext: []bctx.BindingContext{}, AllowFailure: false, } } @@ -218,7 +218,7 @@ func (c *kubernetesBindingsController) BindingNames() []string { // SnapshotsFor returns snapshot for single onKubernetes binding. // It finds a monitorId for a binding name and returns an array of objects. -func (c *kubernetesBindingsController) SnapshotsFor(bindingName string) []ObjectAndFilterResult { +func (c *kubernetesBindingsController) SnapshotsFor(bindingName string) []kemtypes.ObjectAndFilterResult { for _, binding := range c.KubernetesBindings { if bindingName == binding.BindingName { monitorID := binding.Monitor.Metadata.MonitorId @@ -234,12 +234,12 @@ func (c *kubernetesBindingsController) SnapshotsFor(bindingName string) []Object // SnapshotsFrom returns snapshot for several binding names. // It finds a monitorId for each binding name and get its Snapshot, // then returns a map of object arrays for each binding name. -func (c *kubernetesBindingsController) SnapshotsFrom(bindingNames ...string) map[string][]ObjectAndFilterResult { - res := map[string][]ObjectAndFilterResult{} +func (c *kubernetesBindingsController) SnapshotsFrom(bindingNames ...string) map[string][]kemtypes.ObjectAndFilterResult { + res := map[string][]kemtypes.ObjectAndFilterResult{} for _, bindingName := range bindingNames { // Initialize all keys with empty arrays. - res[bindingName] = make([]ObjectAndFilterResult, 0) + res[bindingName] = make([]kemtypes.ObjectAndFilterResult, 0) snapshot := c.SnapshotsFor(bindingName) if snapshot != nil { @@ -250,7 +250,7 @@ func (c *kubernetesBindingsController) SnapshotsFrom(bindingNames ...string) map return res } -func (c *kubernetesBindingsController) Snapshots() map[string][]ObjectAndFilterResult { +func (c *kubernetesBindingsController) Snapshots() map[string][]kemtypes.ObjectAndFilterResult { return c.SnapshotsFrom(c.BindingNames()...) } @@ -298,33 +298,33 @@ func (c *kubernetesBindingsController) SnapshotsDump() map[string]interface{} { return dumps } -func ConvertKubeEventToBindingContext(kubeEvent KubeEvent, link *KubernetesBindingToMonitorLink) []BindingContext { - bindingContexts := make([]BindingContext, 0) +func ConvertKubeEventToBindingContext(kubeEvent kemtypes.KubeEvent, link *KubernetesBindingToMonitorLink) []bctx.BindingContext { + bindingContexts := make([]bctx.BindingContext, 0) switch kubeEvent.Type { - case TypeSynchronization: - bc := BindingContext{ + case kemtypes.TypeSynchronization: + bc := bctx.BindingContext{ Binding: link.BindingConfig.BindingName, Type: kubeEvent.Type, Objects: kubeEvent.Objects, } bc.Metadata.JqFilter = link.BindingConfig.Monitor.JqFilter - bc.Metadata.BindingType = OnKubernetesEvent + bc.Metadata.BindingType = htypes.OnKubernetesEvent bc.Metadata.IncludeSnapshots = link.BindingConfig.IncludeSnapshotsFrom bc.Metadata.Group = link.BindingConfig.Group bindingContexts = append(bindingContexts, bc) - case TypeEvent: + case kemtypes.TypeEvent: for _, kEvent := range kubeEvent.WatchEvents { - bc := BindingContext{ + bc := bctx.BindingContext{ Binding: link.BindingConfig.BindingName, Type: kubeEvent.Type, WatchEvent: kEvent, Objects: kubeEvent.Objects, } bc.Metadata.JqFilter = link.BindingConfig.Monitor.JqFilter - bc.Metadata.BindingType = OnKubernetesEvent + bc.Metadata.BindingType = htypes.OnKubernetesEvent bc.Metadata.IncludeSnapshots = link.BindingConfig.IncludeSnapshotsFrom bc.Metadata.Group = link.BindingConfig.Group diff --git a/pkg/hook/controller/schedule_bindings_controller.go b/pkg/hook/controller/schedule_bindings_controller.go index 3f83438a..57b80aec 100644 --- a/pkg/hook/controller/schedule_bindings_controller.go +++ b/pkg/hook/controller/schedule_bindings_controller.go @@ -1,9 +1,9 @@ package controller import ( - . "github.com/flant/shell-operator/pkg/hook/binding_context" - . "github.com/flant/shell-operator/pkg/hook/types" - "github.com/flant/shell-operator/pkg/schedule_manager" + bctx "github.com/flant/shell-operator/pkg/hook/binding_context" + htypes "github.com/flant/shell-operator/pkg/hook/types" + schedulemanager "github.com/flant/shell-operator/pkg/schedule_manager" ) // A link between a hook and a kube monitor @@ -19,8 +19,8 @@ type ScheduleBindingToCrontabLink struct { // ScheduleBindingsController handles schedule bindings for one hook. type ScheduleBindingsController interface { - WithScheduleBindings([]ScheduleConfig) - WithScheduleManager(schedule_manager.ScheduleManager) + WithScheduleBindings([]htypes.ScheduleConfig) + WithScheduleManager(schedulemanager.ScheduleManager) EnableScheduleBindings() DisableScheduleBindings() CanHandleEvent(crontab string) bool @@ -33,10 +33,10 @@ type scheduleBindingsController struct { ScheduleLinks map[string]*ScheduleBindingToCrontabLink // bindings configurations - ScheduleBindings []ScheduleConfig + ScheduleBindings []htypes.ScheduleConfig // dependencies - scheduleManager schedule_manager.ScheduleManager + scheduleManager schedulemanager.ScheduleManager } // kubernetesHooksController should implement the KubernetesHooksController @@ -49,11 +49,11 @@ var NewScheduleBindingsController = func() *scheduleBindingsController { } } -func (c *scheduleBindingsController) WithScheduleBindings(bindings []ScheduleConfig) { +func (c *scheduleBindingsController) WithScheduleBindings(bindings []htypes.ScheduleConfig) { c.ScheduleBindings = bindings } -func (c *scheduleBindingsController) WithScheduleManager(scheduleManager schedule_manager.ScheduleManager) { +func (c *scheduleBindingsController) WithScheduleManager(scheduleManager schedulemanager.ScheduleManager) { c.scheduleManager = scheduleManager } @@ -71,15 +71,15 @@ func (c *scheduleBindingsController) HandleEvent(crontab string) []BindingExecut for _, link := range c.ScheduleLinks { if link.Crontab == crontab { - bc := BindingContext{ + bc := bctx.BindingContext{ Binding: link.BindingName, } - bc.Metadata.BindingType = Schedule + bc.Metadata.BindingType = htypes.Schedule bc.Metadata.IncludeSnapshots = link.IncludeSnapshots bc.Metadata.Group = link.Group info := BindingExecutionInfo{ - BindingContext: []BindingContext{bc}, + BindingContext: []bctx.BindingContext{bc}, IncludeSnapshots: link.IncludeSnapshots, AllowFailure: link.AllowFailure, QueueName: link.QueueName, diff --git a/pkg/hook/hook.go b/pkg/hook/hook.go index 803c55b1..af857e60 100644 --- a/pkg/hook/hook.go +++ b/pkg/hook/hook.go @@ -13,12 +13,11 @@ import ( "github.com/kennygrant/sanitize" "golang.org/x/time/rate" - "github.com/flant/shell-operator/pkg/app" "github.com/flant/shell-operator/pkg/executor" - . "github.com/flant/shell-operator/pkg/hook/binding_context" + bctx "github.com/flant/shell-operator/pkg/hook/binding_context" "github.com/flant/shell-operator/pkg/hook/config" "github.com/flant/shell-operator/pkg/hook/controller" - . "github.com/flant/shell-operator/pkg/hook/types" + htypes "github.com/flant/shell-operator/pkg/hook/types" "github.com/flant/shell-operator/pkg/metric_storage/operation" "github.com/flant/shell-operator/pkg/webhook/admission" "github.com/flant/shell-operator/pkg/webhook/conversion" @@ -44,17 +43,24 @@ type Hook struct { HookController *controller.HookController RateLimiter *rate.Limiter - TmpDir string + TmpDir string + KeepTemporaryHookFiles bool + + LogProxyHookJSON bool + LogProxyHookJSONKey string Logger *log.Logger } -func NewHook(name, path string, logger *log.Logger) *Hook { +func NewHook(name, path string, keepTemporaryHookFiles bool, logProxyHookJSON bool, logProxyHookJSONKey string, logger *log.Logger) *Hook { return &Hook{ - Name: name, - Path: path, - Config: &config.HookConfig{}, - Logger: logger, + Name: name, + Path: path, + Config: &config.HookConfig{}, + KeepTemporaryHookFiles: keepTemporaryHookFiles, + LogProxyHookJSON: logProxyHookJSON, + LogProxyHookJSONKey: logProxyHookJSONKey, + Logger: logger, } } @@ -85,11 +91,11 @@ func (h *Hook) WithHookController(hookController *controller.HookController) { h.HookController = hookController } -func (h *Hook) Run(_ BindingType, context []BindingContext, logLabels map[string]string) (*Result, error) { +func (h *Hook) Run(_ htypes.BindingType, context []bctx.BindingContext, logLabels map[string]string) (*Result, error) { // Refresh snapshots freshBindingContext := h.HookController.UpdateSnapshots(context) - versionedContextList := ConvertBindingContextList(h.Config.Version, freshBindingContext) + versionedContextList := bctx.ConvertBindingContextList(h.Config.Version, freshBindingContext) contextPath, err := h.prepareBindingContextJsonFile(versionedContextList) if err != nil { @@ -118,7 +124,7 @@ func (h *Hook) Run(_ BindingType, context []BindingContext, logLabels map[string // remove tmp file on hook exit defer func() { - if app.DebugKeepTmpFiles != "yes" { + if h.KeepTemporaryHookFiles { _ = os.Remove(contextPath) _ = os.Remove(metricsPath) _ = os.Remove(conversionPath) @@ -138,11 +144,18 @@ func (h *Hook) Run(_ BindingType, context []BindingContext, logLabels map[string envs = append(envs, fmt.Sprintf("KUBERNETES_PATCH_PATH=%s", kubernetesPatchPath)) } - hookCmd := executor.MakeCommand(path.Dir(h.Path), h.Path, []string{}, envs) + hookCmd := executor.NewExecutor( + path.Dir(h.Path), + h.Path, + []string{}, + envs). + WithLogProxyHookJSON(h.LogProxyHookJSON). + WithLogProxyHookJSONKey(h.LogProxyHookJSONKey). + WithLogger(h.Logger.Named("executor")) result := &Result{} - result.Usage, err = executor.RunAndLogLines(hookCmd, logLabels, h.Logger) + result.Usage, err = hookCmd.RunAndLogLines(logLabels) if err != nil { return result, fmt.Errorf("%s FAILED: %s", h.Name, err) } @@ -258,7 +271,7 @@ func (h *Hook) GetConfigDescription() string { return strings.Join(msgs, ", ") } -func (h *Hook) prepareBindingContextJsonFile(context BindingContextList) (string, error) { +func (h *Hook) prepareBindingContextJsonFile(context bctx.BindingContextList) (string, error) { var err error data, err := context.Json() if err != nil { diff --git a/pkg/hook/hook_manager.go b/pkg/hook/hook_manager.go index 5d31c725..db56b752 100644 --- a/pkg/hook/hook_manager.go +++ b/pkg/hook/hook_manager.go @@ -11,12 +11,13 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "github.com/flant/shell-operator/pkg/app" "github.com/flant/shell-operator/pkg/executor" "github.com/flant/shell-operator/pkg/hook/controller" - . "github.com/flant/shell-operator/pkg/hook/types" - "github.com/flant/shell-operator/pkg/kube_events_manager" - . "github.com/flant/shell-operator/pkg/kube_events_manager/types" - "github.com/flant/shell-operator/pkg/schedule_manager" + htypes "github.com/flant/shell-operator/pkg/hook/types" + kubeeventsmanager "github.com/flant/shell-operator/pkg/kube_events_manager" + kemtypes "github.com/flant/shell-operator/pkg/kube_events_manager/types" + schedulemanager "github.com/flant/shell-operator/pkg/schedule_manager" utils_file "github.com/flant/shell-operator/pkg/utils/file" "github.com/flant/shell-operator/pkg/webhook/admission" "github.com/flant/shell-operator/pkg/webhook/conversion" @@ -26,8 +27,8 @@ type Manager struct { // dependencies workingDir string tempDir string - kubeEventsManager kube_events_manager.KubeEventsManager - scheduleManager schedule_manager.ScheduleManager + kubeEventsManager kubeeventsmanager.KubeEventsManager + scheduleManager schedulemanager.ScheduleManager conversionWebhookManager *conversion.WebhookManager admissionWebhookManager *admission.WebhookManager @@ -37,7 +38,7 @@ type Manager struct { // index by name hooksByName map[string]*Hook // index to search hooks by binding type - hooksInOrder map[BindingType][]*Hook + hooksInOrder map[htypes.BindingType][]*Hook // Index crdName -> fromVersion -> conversionLink conversionChains *conversion.ChainStorage @@ -49,8 +50,8 @@ type Manager struct { type ManagerConfig struct { WorkingDir string TempDir string - Kmgr kube_events_manager.KubeEventsManager - Smgr schedule_manager.ScheduleManager + Kmgr kubeeventsmanager.KubeEventsManager + Smgr schedulemanager.ScheduleManager Wmgr *admission.WebhookManager Cmgr *conversion.WebhookManager @@ -61,7 +62,7 @@ func NewHookManager(config *ManagerConfig) *Manager { return &Manager{ hooksByName: make(map[string]*Hook), hookNamesInOrder: make([]string, 0), - hooksInOrder: make(map[BindingType][]*Hook), + hooksInOrder: make(map[htypes.BindingType][]*Hook), conversionChains: conversion.NewChainStorage(), workingDir: config.WorkingDir, @@ -87,7 +88,7 @@ func (hm *Manager) TempDir() string { func (hm *Manager) Init() error { log.Info("Initialize hooks manager. Search for and load all hooks.") - hm.hooksInOrder = make(map[BindingType][]*Hook) + hm.hooksInOrder = make(map[htypes.BindingType][]*Hook) hm.hooksByName = make(map[string]*Hook) if err := utils_file.RecursiveCheckLibDirectory(hm.workingDir); err != nil { @@ -132,7 +133,7 @@ func (hm *Manager) loadHook(hookPath string) (hook *Hook, err error) { if err != nil { return nil, err } - hook = NewHook(hookName, hookPath, hm.logger.Named("hook")) + hook = NewHook(hookName, hookPath, app.DebugKeepTmpFiles, app.LogProxyHookJSON, app.ProxyJsonLogKey, hm.logger.Named("hook")) hookEntry := hm.logger.With("hook", hook.Name). With("phase", "config") @@ -210,16 +211,23 @@ func (hm *Manager) loadHook(hookPath string) (hook *Hook, err error) { func (hm *Manager) execCommandOutput(hookName string, dir string, entrypoint string, envs []string, args []string) ([]byte, error) { envs = append(os.Environ(), envs...) - cmd := executor.MakeCommand(dir, entrypoint, args, envs) - cmd.Stdout = nil - cmd.Stderr = nil + hookCmd := executor.NewExecutor( + dir, + entrypoint, + args, + envs). + WithLogProxyHookJSON(app.LogProxyHookJSON). + WithLogProxyHookJSONKey(app.ProxyJsonLogKey). + WithCMDStdout(nil). + WithCMDStderr(nil). + WithLogger(hm.logger.Named("executor")) debugEntry := hm.logger.With("hook", hookName). - With("cmd", strings.Join(cmd.Args, " ")) + With("cmd", strings.Join(args, " ")) - debugEntry.Debugf("Executing hook in %s", cmd.Dir) + debugEntry.Debugf("Executing hook in %s", dir) - output, err := executor.Output(cmd) + output, err := hookCmd.Output() if err != nil { return output, err } @@ -242,7 +250,7 @@ func (hm *Manager) GetHookNames() []string { return hm.hookNamesInOrder } -func (hm *Manager) GetHooksInOrder(bindingType BindingType) ([]string, error) { +func (hm *Manager) GetHooksInOrder(bindingType htypes.BindingType) ([]string, error) { hooks, ok := hm.hooksInOrder[bindingType] if !ok { return []string{}, nil @@ -250,9 +258,9 @@ func (hm *Manager) GetHooksInOrder(bindingType BindingType) ([]string, error) { // OnStartup hooks are sorted by onStartup config value // FIXME: onStartup value is now a config validating error, no need to check it here again. - if bindingType == OnStartup { + if bindingType == htypes.OnStartup { for _, hook := range hooks { - if !hook.Config.HasBinding(OnStartup) { + if !hook.Config.HasBinding(htypes.OnStartup) { return nil, fmt.Errorf("possible bug: hook '%s' is registered as OnStartup but has no onStartup value", hook.Name) } } @@ -270,8 +278,8 @@ func (hm *Manager) GetHooksInOrder(bindingType BindingType) ([]string, error) { return hooksNames, nil } -func (hm *Manager) HandleKubeEvent(kubeEvent KubeEvent, createTaskFn func(*Hook, controller.BindingExecutionInfo)) { - kubeHooks, _ := hm.GetHooksInOrder(OnKubernetesEvent) +func (hm *Manager) HandleKubeEvent(kubeEvent kemtypes.KubeEvent, createTaskFn func(*Hook, controller.BindingExecutionInfo)) { + kubeHooks, _ := hm.GetHooksInOrder(htypes.OnKubernetesEvent) for _, hookName := range kubeHooks { h := hm.GetHook(hookName) @@ -287,7 +295,7 @@ func (hm *Manager) HandleKubeEvent(kubeEvent KubeEvent, createTaskFn func(*Hook, } func (hm *Manager) HandleScheduleEvent(crontab string, createTaskFn func(*Hook, controller.BindingExecutionInfo)) { - schHooks, _ := hm.GetHooksInOrder(Schedule) + schHooks, _ := hm.GetHooksInOrder(htypes.Schedule) for _, hookName := range schHooks { h := hm.GetHook(hookName) if h.HookController.CanHandleScheduleEvent(crontab) { @@ -301,7 +309,7 @@ func (hm *Manager) HandleScheduleEvent(crontab string, createTaskFn func(*Hook, } func (hm *Manager) HandleAdmissionEvent(event admission.Event, createTaskFn func(*Hook, controller.BindingExecutionInfo)) { - vHooks, _ := hm.GetHooksInOrder(KubernetesValidating) + vHooks, _ := hm.GetHooksInOrder(htypes.KubernetesValidating) for _, hookName := range vHooks { h := hm.GetHook(hookName) if h.HookController.CanHandleAdmissionEvent(event) { @@ -313,7 +321,7 @@ func (hm *Manager) HandleAdmissionEvent(event admission.Event, createTaskFn func } } - mHooks, _ := hm.GetHooksInOrder(KubernetesMutating) + mHooks, _ := hm.GetHooksInOrder(htypes.KubernetesMutating) for _, hookName := range mHooks { h := hm.GetHook(hookName) if h.HookController.CanHandleAdmissionEvent(event) { @@ -326,20 +334,20 @@ func (hm *Manager) HandleAdmissionEvent(event admission.Event, createTaskFn func } } -func (hm *Manager) DetectAdmissionEventType(event admission.Event) BindingType { - vHooks, _ := hm.GetHooksInOrder(KubernetesValidating) +func (hm *Manager) DetectAdmissionEventType(event admission.Event) htypes.BindingType { + vHooks, _ := hm.GetHooksInOrder(htypes.KubernetesValidating) for _, hookName := range vHooks { h := hm.GetHook(hookName) if h.HookController.CanHandleAdmissionEvent(event) { - return KubernetesValidating + return htypes.KubernetesValidating } } - mHooks, _ := hm.GetHooksInOrder(KubernetesMutating) + mHooks, _ := hm.GetHooksInOrder(htypes.KubernetesMutating) for _, hookName := range mHooks { h := hm.GetHook(hookName) if h.HookController.CanHandleAdmissionEvent(event) { - return KubernetesMutating + return htypes.KubernetesMutating } } @@ -349,7 +357,7 @@ func (hm *Manager) DetectAdmissionEventType(event admission.Event) BindingType { // HandleConversionEvent receives a crdName and calculates a sequence of hooks to run. func (hm *Manager) HandleConversionEvent(crdName string, request *v1.ConversionRequest, rule conversion.Rule, createTaskFn func(*Hook, controller.BindingExecutionInfo)) { - vHooks, _ := hm.GetHooksInOrder(KubernetesConversion) + vHooks, _ := hm.GetHooksInOrder(htypes.KubernetesConversion) for _, hookName := range vHooks { h := hm.GetHook(hookName) @@ -364,7 +372,7 @@ func (hm *Manager) HandleConversionEvent(crdName string, request *v1.ConversionR } func (hm *Manager) UpdateConversionChains() error { - vHooks, _ := hm.GetHooksInOrder(KubernetesConversion) + vHooks, _ := hm.GetHooksInOrder(htypes.KubernetesConversion) // Update conversionChains. for _, hookName := range vHooks { diff --git a/pkg/hook/hook_test.go b/pkg/hook/hook_test.go index b7c28cf8..b2039ca5 100644 --- a/pkg/hook/hook_test.go +++ b/pkg/hook/hook_test.go @@ -10,7 +10,7 @@ import ( "golang.org/x/time/rate" "github.com/flant/shell-operator/pkg/hook/config" - . "github.com/flant/shell-operator/pkg/hook/types" + htypes "github.com/flant/shell-operator/pkg/hook/types" ) func Test_Hook_SafeName(t *testing.T) { @@ -24,7 +24,7 @@ func Test_Hook_SafeName(t *testing.T) { t.Error(err) } - h := NewHook(hookName, hookPath, log.NewNop()) + h := NewHook(hookName, hookPath, false, false, "", log.NewNop()) g.Expect(h.SafeName()).To(Equal("002-cool-hooks-monitor-namespaces-py")) } @@ -41,7 +41,7 @@ func Test_CreateLimiter(t *testing.T) { burst int limit rate.Limit title string - settings *Settings + settings *htypes.Settings }{ { title: "Nil run settings: should return limiter with defaults", @@ -54,14 +54,14 @@ func Test_CreateLimiter(t *testing.T) { title: "Empty settings: should return limiter with defaults", limit: defaultLimit, burst: defaultBurst, - settings: &Settings{}, + settings: &htypes.Settings{}, }, { title: "Burst is zero, limit is non-zero: should return limiter with zero burst and converted interval", limit: rate.Limit(1 / 20.0), burst: defaultBurst, - settings: &Settings{ + settings: &htypes.Settings{ ExecutionMinInterval: 20 * time.Second, }, }, @@ -70,7 +70,7 @@ func Test_CreateLimiter(t *testing.T) { title: "Burst is non-zero, limit is zero: should return limiter with default limiter and passed burst", limit: defaultLimit, burst: 3, - settings: &Settings{ + settings: &htypes.Settings{ ExecutionBurst: 3, }, }, @@ -79,7 +79,7 @@ func Test_CreateLimiter(t *testing.T) { title: "Burst and limit are passed: should run limiter with passed burst and converted interval", limit: rate.Limit(1.0 / 30), burst: 3, - settings: &Settings{ + settings: &htypes.Settings{ ExecutionBurst: 3, ExecutionMinInterval: 30 * time.Second, }, @@ -117,7 +117,7 @@ func Test_Hook_WithConfig(t *testing.T) { func() { g.Expect(err).ToNot(HaveOccurred()) g.Expect(hook.Config).ToNot(BeNil()) - g.Expect(hook.Config.Bindings()).To(Equal([]BindingType{OnStartup})) + g.Expect(hook.Config.Bindings()).To(Equal([]htypes.BindingType{htypes.OnStartup})) g.Expect(hook.Config.OnStartup).ToNot(BeNil()) g.Expect(hook.Config.OnStartup.Order).To(Equal(10.0)) }, @@ -136,7 +136,7 @@ func Test_Hook_WithConfig(t *testing.T) { for _, test := range tests { t.Run(test.name, func(_ *testing.T) { - hook = NewHook("hook-sh", "/hooks/hook.sh", log.NewNop()) + hook = NewHook("hook-sh", "/hooks/hook.sh", false, false, "", log.NewNop()) _, err = hook.LoadConfig([]byte(test.jsonData)) test.fn() }) diff --git a/pkg/hook/task_metadata/task_metadata.go b/pkg/hook/task_metadata/task_metadata.go index 0b0a3b38..5738ac0a 100644 --- a/pkg/hook/task_metadata/task_metadata.go +++ b/pkg/hook/task_metadata/task_metadata.go @@ -5,7 +5,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" - "github.com/flant/shell-operator/pkg/hook/binding_context" + bindingcontext "github.com/flant/shell-operator/pkg/hook/binding_context" "github.com/flant/shell-operator/pkg/hook/types" "github.com/flant/shell-operator/pkg/task" ) @@ -22,7 +22,7 @@ type HookNameAccessor interface { } type BindingContextAccessor interface { - GetBindingContext() []binding_context.BindingContext + GetBindingContext() []bindingcontext.BindingContext } type MonitorIDAccessor interface { @@ -34,7 +34,7 @@ type HookMetadata struct { Binding string // binding name Group string BindingType types.BindingType - BindingContext []binding_context.BindingContext + BindingContext []bindingcontext.BindingContext AllowFailure bool // Task considered as 'ok' if hook failed. False by default. Can be true for some schedule hooks. MonitorIDs []string // monitor ids for Synchronization tasks @@ -66,7 +66,7 @@ func (m HookMetadata) GetHookName() string { return m.HookName } -func (m HookMetadata) GetBindingContext() []binding_context.BindingContext { +func (m HookMetadata) GetBindingContext() []bindingcontext.BindingContext { return m.BindingContext } @@ -88,12 +88,12 @@ func (m *HookMetadata) WithBinding(binding types.BindingType) *HookMetadata { return m } -func (m *HookMetadata) WithBindingContext(context []binding_context.BindingContext) *HookMetadata { +func (m *HookMetadata) WithBindingContext(context []bindingcontext.BindingContext) *HookMetadata { m.BindingContext = context return m } -func (m *HookMetadata) AppendBindingContext(context binding_context.BindingContext) *HookMetadata { +func (m *HookMetadata) AppendBindingContext(context bindingcontext.BindingContext) *HookMetadata { m.BindingContext = append(m.BindingContext, context) return m } diff --git a/pkg/hook/task_metadata/task_metadata_test.go b/pkg/hook/task_metadata/task_metadata_test.go index ce3dd0e2..e5ea5aa6 100644 --- a/pkg/hook/task_metadata/task_metadata_test.go +++ b/pkg/hook/task_metadata/task_metadata_test.go @@ -7,8 +7,8 @@ import ( . "github.com/onsi/gomega" - . "github.com/flant/shell-operator/pkg/hook/binding_context" - . "github.com/flant/shell-operator/pkg/hook/types" + bctx "github.com/flant/shell-operator/pkg/hook/binding_context" + htypes "github.com/flant/shell-operator/pkg/hook/types" "github.com/flant/shell-operator/pkg/task" "github.com/flant/shell-operator/pkg/task/queue" ) @@ -19,8 +19,8 @@ func Test_HookMetadata_Access(t *testing.T) { Task := task.NewTask(HookRun). WithMetadata(HookMetadata{ HookName: "test-hook", - BindingType: Schedule, - BindingContext: []BindingContext{ + BindingType: htypes.Schedule, + BindingContext: []bctx.BindingContext{ {Binding: "each_1_min"}, {Binding: "each_5_min"}, }, @@ -30,7 +30,7 @@ func Test_HookMetadata_Access(t *testing.T) { hm := HookMetadataAccessor(Task) g.Expect(hm.HookName).Should(Equal("test-hook")) - g.Expect(hm.BindingType).Should(Equal(Schedule)) + g.Expect(hm.BindingType).Should(Equal(htypes.Schedule)) g.Expect(hm.AllowFailure).Should(BeTrue()) g.Expect(hm.BindingContext).Should(HaveLen(2)) g.Expect(hm.BindingContext[0].Binding).Should(Equal("each_1_min")) @@ -54,7 +54,7 @@ func Test_HookMetadata_QueueDump_Task_Description(t *testing.T) { q.AddLast(task.NewTask(HookRun). WithMetadata(HookMetadata{ HookName: "hook1.sh", - BindingType: OnKubernetesEvent, + BindingType: htypes.OnKubernetesEvent, Binding: "monitor_pods", }). WithLogLabels(logLabels). @@ -63,7 +63,7 @@ func Test_HookMetadata_QueueDump_Task_Description(t *testing.T) { q.AddLast(task.NewTask(HookRun). WithMetadata(HookMetadata{ HookName: "hook1.sh", - BindingType: Schedule, + BindingType: htypes.Schedule, AllowFailure: true, Binding: "every 1 sec", Group: "monitor_pods", diff --git a/pkg/hook/types/bindings.go b/pkg/hook/types/bindings.go index c32b43ab..17bfe148 100644 --- a/pkg/hook/types/bindings.go +++ b/pkg/hook/types/bindings.go @@ -3,8 +3,8 @@ package types import ( "time" - "github.com/flant/shell-operator/pkg/kube_events_manager" - . "github.com/flant/shell-operator/pkg/schedule_manager/types" + kubeeventsmanager "github.com/flant/shell-operator/pkg/kube_events_manager" + smtypes "github.com/flant/shell-operator/pkg/schedule_manager/types" "github.com/flant/shell-operator/pkg/webhook/admission" "github.com/flant/shell-operator/pkg/webhook/conversion" ) @@ -33,7 +33,7 @@ type OnStartupConfig struct { type ScheduleConfig struct { CommonBindingConfig - ScheduleEntry ScheduleEntry + ScheduleEntry smtypes.ScheduleEntry IncludeSnapshotsFrom []string Queue string Group string @@ -41,7 +41,7 @@ type ScheduleConfig struct { type OnKubernetesEventConfig struct { CommonBindingConfig - Monitor *kube_events_manager.MonitorConfig + Monitor *kubeeventsmanager.MonitorConfig IncludeSnapshotsFrom []string Queue string Group string diff --git a/pkg/jq/apply_jq_exec.go b/pkg/jq/apply_jq_exec.go deleted file mode 100644 index afeb0b2a..00000000 --- a/pkg/jq/apply_jq_exec.go +++ /dev/null @@ -1,17 +0,0 @@ -//go:build !cgo || (cgo && !use_libjq) -// +build !cgo cgo,!use_libjq - -package jq - -// Note: this implementation is enabled by default. - -// ApplyJqFilter runs jq expression provided in jqFilter with jsonData as input. -// -// It uses jq as a subprocess. -func ApplyJqFilter(jqFilter string, jsonData []byte, libPath string) (string, error) { - return jqExec(jqFilter, jsonData, libPath) -} - -func FilterInfo() string { - return "jqFilter implementation: use jq binary from $PATH" -} diff --git a/pkg/kube/object_patch/helpers.go b/pkg/kube/object_patch/helpers.go index 507c8374..a17c5288 100644 --- a/pkg/kube/object_patch/helpers.go +++ b/pkg/kube/object_patch/helpers.go @@ -12,8 +12,7 @@ import ( k8yaml "sigs.k8s.io/yaml" "github.com/flant/kube-client/manifest" - "github.com/flant/shell-operator/pkg/app" - "github.com/flant/shell-operator/pkg/jq" + "github.com/flant/shell-operator/pkg/filter" ) func unmarshalFromJSONOrYAML(specs []byte) ([]OperationSpec, error) { @@ -65,13 +64,13 @@ func unmarshalFromYaml(yamlSpecs []byte) ([]OperationSpec, error) { return specSlice, nil } -func applyJQPatch(jqFilter string, obj *unstructured.Unstructured) (*unstructured.Unstructured, error) { +func applyJQPatch(jqFilter string, fl filter.Filter, obj *unstructured.Unstructured) (*unstructured.Unstructured, error) { objBytes, err := obj.MarshalJSON() if err != nil { return nil, err } - filterResult, err := jq.ApplyJqFilter(jqFilter, objBytes, app.JqLibraryPath) + filterResult, err := fl.ApplyFilter(jqFilter, objBytes) if err != nil { return nil, fmt.Errorf("failed to apply jqFilter:\n%sto Object:\n%s\n"+ "error: %s", jqFilter, obj, err) diff --git a/pkg/kube/object_patch/operation.go b/pkg/kube/object_patch/operation.go index e58ea968..8128f139 100644 --- a/pkg/kube/object_patch/operation.go +++ b/pkg/kube/object_patch/operation.go @@ -8,6 +8,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" + + "github.com/flant/shell-operator/pkg/app" + "github.com/flant/shell-operator/pkg/filter/jq" ) // A JSON and YAML representation of the operation for shell hooks @@ -192,7 +195,8 @@ func NewFromOperationSpec(spec OperationSpec) Operation { case JQPatch: return NewFilterPatchOperation( func(u *unstructured.Unstructured) (*unstructured.Unstructured, error) { - return applyJQPatch(spec.JQFilter, u) + filter := jq.NewFilter(app.JqLibraryPath) + return applyJQPatch(spec.JQFilter, filter, u) }, spec.ApiVersion, spec.Kind, spec.Namespace, spec.Name, WithSubresource(spec.Subresource), diff --git a/pkg/kube/object_patch/patch_collector.go b/pkg/kube/object_patch/patch_collector.go index 0d2c365e..229f9b56 100644 --- a/pkg/kube/object_patch/patch_collector.go +++ b/pkg/kube/object_patch/patch_collector.go @@ -4,6 +4,15 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ) +type IPatchCollector interface { + Create(object interface{}, options ...CreateOption) + Delete(apiVersion string, kind string, namespace string, name string, options ...DeleteOption) + Filter(filterFunc func(*unstructured.Unstructured) (*unstructured.Unstructured, error), apiVersion string, kind string, namespace string, name string, options ...FilterOption) + JSONPatch(jsonPatch interface{}, apiVersion string, kind string, namespace string, name string, options ...PatchOption) + MergePatch(mergePatch interface{}, apiVersion string, kind string, namespace string, name string, options ...PatchOption) + Operations() []Operation +} + type PatchCollector struct { patchOperations []Operation } diff --git a/pkg/kube_events_manager/error_handler.go b/pkg/kube_events_manager/error_handler.go index 852b789e..c222b993 100644 --- a/pkg/kube_events_manager/error_handler.go +++ b/pkg/kube_events_manager/error_handler.go @@ -1,4 +1,4 @@ -package kube_events_manager +package kubeeventsmanager import ( "io" @@ -7,19 +7,19 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/client-go/tools/cache" - "github.com/flant/shell-operator/pkg/metric_storage" + metricstorage "github.com/flant/shell-operator/pkg/metric_storage" utils "github.com/flant/shell-operator/pkg/utils/labels" ) type WatchErrorHandler struct { description string kind string - metricStorage *metric_storage.MetricStorage + metricStorage *metricstorage.MetricStorage logger *log.Logger } -func newWatchErrorHandler(description string, kind string, logLabels map[string]string, metricStorage *metric_storage.MetricStorage, logger *log.Logger) *WatchErrorHandler { +func newWatchErrorHandler(description string, kind string, logLabels map[string]string, metricStorage *metricstorage.MetricStorage, logger *log.Logger) *WatchErrorHandler { return &WatchErrorHandler{ description: description, kind: kind, diff --git a/pkg/kube_events_manager/factory.go b/pkg/kube_events_manager/factory.go index fc439025..7b35f9da 100644 --- a/pkg/kube_events_manager/factory.go +++ b/pkg/kube_events_manager/factory.go @@ -1,4 +1,4 @@ -package kube_events_manager +package kubeeventsmanager import ( "context" diff --git a/pkg/kube_events_manager/filter.go b/pkg/kube_events_manager/filter.go index 2165f2c9..b740bfc2 100644 --- a/pkg/kube_events_manager/filter.go +++ b/pkg/kube_events_manager/filter.go @@ -1,4 +1,4 @@ -package kube_events_manager +package kubeeventsmanager import ( "context" @@ -10,19 +10,18 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "github.com/flant/shell-operator/pkg/app" - "github.com/flant/shell-operator/pkg/jq" - . "github.com/flant/shell-operator/pkg/kube_events_manager/types" + "github.com/flant/shell-operator/pkg/filter" + kemtypes "github.com/flant/shell-operator/pkg/kube_events_manager/types" utils_checksum "github.com/flant/shell-operator/pkg/utils/checksum" ) // applyFilter filters object json representation with jq expression, calculate checksum // over result and return ObjectAndFilterResult. If jqFilter is empty, no filter // is required and checksum is calculated over full json representation of the object. -func applyFilter(jqFilter string, filterFn func(obj *unstructured.Unstructured) (result interface{}, err error), obj *unstructured.Unstructured) (*ObjectAndFilterResult, error) { +func applyFilter(jqFilter string, fl filter.Filter, filterFn func(obj *unstructured.Unstructured) (result interface{}, err error), obj *unstructured.Unstructured) (*kemtypes.ObjectAndFilterResult, error) { defer trace.StartRegion(context.Background(), "ApplyJqFilter").End() - res := &ObjectAndFilterResult{ + res := &kemtypes.ObjectAndFilterResult{ Object: obj, } res.Metadata.JqFilter = jqFilter @@ -57,7 +56,7 @@ func applyFilter(jqFilter string, filterFn func(obj *unstructured.Unstructured) } else { var err error var filtered string - filtered, err = jq.ApplyJqFilter(jqFilter, data, app.JqLibraryPath) + filtered, err = fl.ApplyFilter(jqFilter, data) if err != nil { return nil, fmt.Errorf("jqFilter: %v", err) } diff --git a/pkg/kube_events_manager/filter_test.go b/pkg/kube_events_manager/filter_test.go index a641e4fe..392793fe 100644 --- a/pkg/kube_events_manager/filter_test.go +++ b/pkg/kube_events_manager/filter_test.go @@ -1,4 +1,4 @@ -package kube_events_manager +package kubeeventsmanager import ( "encoding/json" @@ -6,12 +6,15 @@ import ( "github.com/stretchr/testify/assert" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + "github.com/flant/shell-operator/pkg/filter/jq" ) func TestApplyFilter(t *testing.T) { t.Run("filter func with error", func(t *testing.T) { uns := &unstructured.Unstructured{Object: map[string]interface{}{"foo": "bar"}} - _, err := applyFilter("", filterFuncWithError, uns) + filter := jq.NewFilter("") + _, err := applyFilter("", filter, filterFuncWithError, uns) assert.EqualError(t, err, "filterFn (github.com/flant/shell-operator/pkg/kube_events_manager.filterFuncWithError) contains an error: invalid character 'a' looking for beginning of value") }) } diff --git a/pkg/kube_events_manager/kube_events_manager.go b/pkg/kube_events_manager/kube_events_manager.go index cc8b583c..2ed40404 100644 --- a/pkg/kube_events_manager/kube_events_manager.go +++ b/pkg/kube_events_manager/kube_events_manager.go @@ -1,4 +1,4 @@ -package kube_events_manager +package kubeeventsmanager import ( "context" @@ -8,32 +8,32 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" klient "github.com/flant/kube-client/client" - . "github.com/flant/shell-operator/pkg/kube_events_manager/types" - "github.com/flant/shell-operator/pkg/metric_storage" + kemtypes "github.com/flant/shell-operator/pkg/kube_events_manager/types" + metricstorage "github.com/flant/shell-operator/pkg/metric_storage" ) type KubeEventsManager interface { - WithMetricStorage(mstor *metric_storage.MetricStorage) + WithMetricStorage(mstor *metricstorage.MetricStorage) AddMonitor(monitorConfig *MonitorConfig) error HasMonitor(monitorID string) bool GetMonitor(monitorID string) Monitor StartMonitor(monitorID string) StopMonitor(monitorID string) error - Ch() chan KubeEvent + Ch() chan kemtypes.KubeEvent PauseHandleEvents() } // kubeEventsManager is a main implementation of KubeEventsManager. type kubeEventsManager struct { // channel to emit KubeEvent objects - KubeEventCh chan KubeEvent + KubeEventCh chan kemtypes.KubeEvent KubeClient *klient.Client ctx context.Context cancel context.CancelFunc - metricStorage *metric_storage.MetricStorage + metricStorage *metricstorage.MetricStorage m sync.RWMutex Monitors map[string]Monitor @@ -53,13 +53,13 @@ func NewKubeEventsManager(ctx context.Context, client *klient.Client, logger *lo KubeClient: client, m: sync.RWMutex{}, Monitors: make(map[string]Monitor), - KubeEventCh: make(chan KubeEvent, 1), + KubeEventCh: make(chan kemtypes.KubeEvent, 1), logger: logger, } return em } -func (mgr *kubeEventsManager) WithMetricStorage(mstor *metric_storage.MetricStorage) { +func (mgr *kubeEventsManager) WithMetricStorage(mstor *metricstorage.MetricStorage) { mgr.metricStorage = mstor } @@ -73,7 +73,7 @@ func (mgr *kubeEventsManager) AddMonitor(monitorConfig *MonitorConfig) error { mgr.KubeClient, mgr.metricStorage, monitorConfig, - func(ev KubeEvent) { + func(ev kemtypes.KubeEvent) { defer trace.StartRegion(context.Background(), "EmitKubeEvent").End() mgr.KubeEventCh <- ev }, @@ -130,7 +130,7 @@ func (mgr *kubeEventsManager) StopMonitor(monitorID string) error { } // Ch returns a channel to receive KubeEvent objects. -func (mgr *kubeEventsManager) Ch() chan KubeEvent { +func (mgr *kubeEventsManager) Ch() chan kemtypes.KubeEvent { return mgr.KubeEventCh } diff --git a/pkg/kube_events_manager/kube_events_manager_test.go b/pkg/kube_events_manager/kube_events_manager_test.go index ed55b4d5..0d4c92ed 100644 --- a/pkg/kube_events_manager/kube_events_manager_test.go +++ b/pkg/kube_events_manager/kube_events_manager_test.go @@ -1,4 +1,4 @@ -package kube_events_manager +package kubeeventsmanager import ( "context" @@ -15,7 +15,7 @@ import ( fakediscovery "k8s.io/client-go/discovery/fake" klient "github.com/flant/kube-client/client" - . "github.com/flant/shell-operator/pkg/kube_events_manager/types" + kemtypes "github.com/flant/shell-operator/pkg/kube_events_manager/types" ) func Test_MainKubeEventsManager_Run(t *testing.T) { @@ -56,12 +56,12 @@ func Test_MainKubeEventsManager_Run(t *testing.T) { monitor := &MonitorConfig{ ApiVersion: "v1", Kind: "Pod", - NamespaceSelector: &NamespaceSelector{ - NameSelector: &NameSelector{ + NamespaceSelector: &kemtypes.NamespaceSelector{ + NameSelector: &kemtypes.NameSelector{ MatchNames: []string{"default", "prod", "stage"}, }, }, - NameSelector: &NameSelector{ + NameSelector: &kemtypes.NameSelector{ MatchNames: []string{"pod-1", "pod-2", "pod-3", "pod-4"}, }, } @@ -139,15 +139,15 @@ func Test_MainKubeEventsManager_HandleEvents(t *testing.T) { // Init() replacement mgr := NewKubeEventsManager(ctx, kubeClient, log.NewNop()) - mgr.KubeEventCh = make(chan KubeEvent, 10) + mgr.KubeEventCh = make(chan kemtypes.KubeEvent, 10) // monitor with 3 namespaces and 4 object names and all event types monitor := &MonitorConfig{ ApiVersion: "v1", Kind: "Pod", - EventTypes: []WatchEventType{WatchEventAdded, WatchEventModified, WatchEventDeleted}, - NamespaceSelector: &NamespaceSelector{ - NameSelector: &NameSelector{ + EventTypes: []kemtypes.WatchEventType{kemtypes.WatchEventAdded, kemtypes.WatchEventModified, kemtypes.WatchEventDeleted}, + NamespaceSelector: &kemtypes.NamespaceSelector{ + NameSelector: &kemtypes.NameSelector{ MatchNames: []string{"default"}, }, }, @@ -212,7 +212,7 @@ func Test_MainKubeEventsManager_HandleEvents(t *testing.T) { assert.Equal(t, "Event", ev.Type) assert.Equal(t, "MonitorId", ev.MonitorId) - assert.Equal(t, WatchEventAdded, ev.WatchEvents[0]) + assert.Equal(t, kemtypes.WatchEventAdded, ev.WatchEvents[0]) assert.Len(t, ev.Objects, 1) name := ev.Objects[0].Object.GetName() @@ -320,9 +320,9 @@ func Test_FakeClient_CatchUpdates(t *testing.T) { monitor := &MonitorConfig{ ApiVersion: "v1", Kind: "Pod", - EventTypes: []WatchEventType{WatchEventAdded, WatchEventModified, WatchEventDeleted}, - NamespaceSelector: &NamespaceSelector{ - NameSelector: &NameSelector{ + EventTypes: []kemtypes.WatchEventType{kemtypes.WatchEventAdded, kemtypes.WatchEventModified, kemtypes.WatchEventDeleted}, + NamespaceSelector: &kemtypes.NamespaceSelector{ + NameSelector: &kemtypes.NameSelector{ MatchNames: []string{"default"}, }, }, @@ -392,7 +392,7 @@ func Test_FakeClient_CatchUpdates(t *testing.T) { assert.Equal(t, "Event", ev.Type) assert.Equal(t, "MonitorId", ev.MonitorId) - assert.Equal(t, WatchEventAdded, ev.WatchEvents[0]) + assert.Equal(t, kemtypes.WatchEventAdded, ev.WatchEvents[0]) assert.Len(t, ev.Objects, 1) name := ev.Objects[0].Object.GetName() diff --git a/pkg/kube_events_manager/monitor.go b/pkg/kube_events_manager/monitor.go index 1cbc4b65..14c453fd 100644 --- a/pkg/kube_events_manager/monitor.go +++ b/pkg/kube_events_manager/monitor.go @@ -1,4 +1,4 @@ -package kube_events_manager +package kubeeventsmanager import ( "context" @@ -8,8 +8,8 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" klient "github.com/flant/kube-client/client" - . "github.com/flant/shell-operator/pkg/kube_events_manager/types" - "github.com/flant/shell-operator/pkg/metric_storage" + kemtypes "github.com/flant/shell-operator/pkg/kube_events_manager/types" + metricstorage "github.com/flant/shell-operator/pkg/metric_storage" utils "github.com/flant/shell-operator/pkg/utils/labels" ) @@ -18,7 +18,7 @@ type Monitor interface { Start(context.Context) Stop() PauseHandleEvents() - Snapshot() []ObjectAndFilterResult + Snapshot() []kemtypes.ObjectAndFilterResult EnableKubeEventCb() GetConfig() *MonitorConfig SnapshotOperations() (total *CachedObjectsInfo, last *CachedObjectsInfo) @@ -36,7 +36,7 @@ type monitor struct { // map of dynamically starting informers VaryingInformers map[string][]*resourceInformer - eventCb func(KubeEvent) + eventCb func(kemtypes.KubeEvent) eventsEnabled bool // Index of namespaces statically defined in monitor configuration staticNamespaces map[string]bool @@ -45,12 +45,12 @@ type monitor struct { ctx context.Context cancel context.CancelFunc - metricStorage *metric_storage.MetricStorage + metricStorage *metricstorage.MetricStorage logger *log.Logger } -func NewMonitor(ctx context.Context, client *klient.Client, mstor *metric_storage.MetricStorage, config *MonitorConfig, eventCb func(KubeEvent), logger *log.Logger) *monitor { +func NewMonitor(ctx context.Context, client *klient.Client, mstor *metricstorage.MetricStorage, config *MonitorConfig, eventCb func(kemtypes.KubeEvent), logger *log.Logger) *monitor { cctx, cancel := context.WithCancel(ctx) return &monitor{ @@ -186,8 +186,8 @@ func (m *monitor) CreateInformers() error { } // Snapshot returns all existed objects from all created informers -func (m *monitor) Snapshot() []ObjectAndFilterResult { - objects := make([]ObjectAndFilterResult, 0) +func (m *monitor) Snapshot() []kemtypes.ObjectAndFilterResult { + objects := make([]kemtypes.ObjectAndFilterResult, 0) for _, informer := range m.ResourceInformers { objects = append(objects, informer.getCachedObjects()...) @@ -200,7 +200,7 @@ func (m *monitor) Snapshot() []ObjectAndFilterResult { } // Sort objects by namespace and name - sort.Sort(ByNamespaceAndName(objects)) + sort.Sort(kemtypes.ByNamespaceAndName(objects)) return objects } diff --git a/pkg/kube_events_manager/monitor_config.go b/pkg/kube_events_manager/monitor_config.go index 08e87566..acc191ac 100644 --- a/pkg/kube_events_manager/monitor_config.go +++ b/pkg/kube_events_manager/monitor_config.go @@ -1,11 +1,11 @@ -package kube_events_manager +package kubeeventsmanager import ( "github.com/deckhouse/deckhouse/pkg/log" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - . "github.com/flant/shell-operator/pkg/kube_events_manager/types" + kemtypes "github.com/flant/shell-operator/pkg/kube_events_manager/types" ) // MonitorConfig is a config that suits the latest @@ -17,49 +17,49 @@ type MonitorConfig struct { LogLabels map[string]string MetricLabels map[string]string } - EventTypes []WatchEventType + EventTypes []kemtypes.WatchEventType ApiVersion string Kind string - NameSelector *NameSelector - NamespaceSelector *NamespaceSelector + NameSelector *kemtypes.NameSelector + NamespaceSelector *kemtypes.NamespaceSelector LabelSelector *metav1.LabelSelector - FieldSelector *FieldSelector + FieldSelector *kemtypes.FieldSelector JqFilter string Logger *log.Logger - Mode KubeEventMode + Mode kemtypes.KubeEventMode KeepFullObjectsInMemory bool FilterFunc func(*unstructured.Unstructured) (interface{}, error) } -func (c *MonitorConfig) WithEventTypes(types []WatchEventType) *MonitorConfig { +func (c *MonitorConfig) WithEventTypes(types []kemtypes.WatchEventType) *MonitorConfig { if types == nil { - c.EventTypes = []WatchEventType{ - WatchEventAdded, - WatchEventModified, - WatchEventDeleted, + c.EventTypes = []kemtypes.WatchEventType{ + kemtypes.WatchEventAdded, + kemtypes.WatchEventModified, + kemtypes.WatchEventDeleted, } } else { - c.EventTypes = []WatchEventType{} + c.EventTypes = []kemtypes.WatchEventType{} c.EventTypes = append(c.EventTypes, types...) } return c } // WithNameSelector copies input NameSelector into monitor.NameSelector -func (c *MonitorConfig) WithNameSelector(nSel *NameSelector) { +func (c *MonitorConfig) WithNameSelector(nSel *kemtypes.NameSelector) { if nSel != nil { - c.NameSelector = &NameSelector{ + c.NameSelector = &kemtypes.NameSelector{ MatchNames: nSel.MatchNames, } } } // WithNamespaceSelector copies input NamespaceSelector into monitor.NamespaceSelector -func (c *MonitorConfig) WithNamespaceSelector(nsSel *NamespaceSelector) { +func (c *MonitorConfig) WithNamespaceSelector(nsSel *kemtypes.NamespaceSelector) { if nsSel != nil { - c.NamespaceSelector = &NamespaceSelector{} + c.NamespaceSelector = &kemtypes.NamespaceSelector{} if nsSel.NameSelector != nil { - c.NamespaceSelector.NameSelector = &NameSelector{ + c.NamespaceSelector.NameSelector = &kemtypes.NameSelector{ MatchNames: nsSel.NameSelector.MatchNames, } } @@ -73,9 +73,9 @@ func (c *MonitorConfig) WithNamespaceSelector(nsSel *NamespaceSelector) { } // WithFieldSelector copies input FieldSelector into monitor.FieldSelector -func (c *MonitorConfig) WithFieldSelector(fieldSel *FieldSelector) { +func (c *MonitorConfig) WithFieldSelector(fieldSel *kemtypes.FieldSelector) { if fieldSel != nil { - c.FieldSelector = &FieldSelector{ + c.FieldSelector = &kemtypes.FieldSelector{ MatchExpressions: fieldSel.MatchExpressions, } } @@ -131,9 +131,9 @@ func (c *MonitorConfig) namespaces() (nsNames []string) { return nsNames } -func (c *MonitorConfig) WithMode(mode KubeEventMode) { +func (c *MonitorConfig) WithMode(mode kemtypes.KubeEventMode) { if mode == "" { - c.Mode = ModeIncremental + c.Mode = kemtypes.ModeIncremental } c.Mode = mode } diff --git a/pkg/kube_events_manager/monitor_test.go b/pkg/kube_events_manager/monitor_test.go index b71d9a8b..6da8bdb4 100644 --- a/pkg/kube_events_manager/monitor_test.go +++ b/pkg/kube_events_manager/monitor_test.go @@ -1,4 +1,4 @@ -package kube_events_manager +package kubeeventsmanager import ( "context" @@ -12,7 +12,7 @@ import ( "github.com/flant/kube-client/fake" "github.com/flant/kube-client/manifest" - . "github.com/flant/shell-operator/pkg/kube_events_manager/types" + kemtypes "github.com/flant/shell-operator/pkg/kube_events_manager/types" ) func Test_Monitor_should_handle_dynamic_ns_events(t *testing.T) { @@ -26,8 +26,8 @@ func Test_Monitor_should_handle_dynamic_ns_events(t *testing.T) { monitorCfg := &MonitorConfig{ ApiVersion: "v1", Kind: "ConfigMap", - EventTypes: []WatchEventType{WatchEventAdded, WatchEventModified, WatchEventDeleted}, - NamespaceSelector: &NamespaceSelector{ + EventTypes: []kemtypes.WatchEventType{kemtypes.WatchEventAdded, kemtypes.WatchEventModified, kemtypes.WatchEventDeleted}, + NamespaceSelector: &kemtypes.NamespaceSelector{ LabelSelector: &metav1.LabelSelector{ MatchLabels: map[string]string{ "test-label": "", @@ -37,7 +37,7 @@ func Test_Monitor_should_handle_dynamic_ns_events(t *testing.T) { } objsFromEvents := make([]string, 0) - mon := NewMonitor(context.Background(), fc.Client, nil, monitorCfg, func(ev KubeEvent) { + mon := NewMonitor(context.Background(), fc.Client, nil, monitorCfg, func(ev kemtypes.KubeEvent) { objsFromEvents = append(objsFromEvents, snapshotResourceIDs(ev.Objects)...) }, log.NewNop()) @@ -139,7 +139,7 @@ data: `, name) } -func snapshotResourceIDs(snap []ObjectAndFilterResult) []string { +func snapshotResourceIDs(snap []kemtypes.ObjectAndFilterResult) []string { ids := make([]string, 0) for _, obj := range snap { ids = append(ids, obj.Metadata.ResourceId) diff --git a/pkg/kube_events_manager/namespace_informer.go b/pkg/kube_events_manager/namespace_informer.go index 726cee94..6abe9151 100644 --- a/pkg/kube_events_manager/namespace_informer.go +++ b/pkg/kube_events_manager/namespace_informer.go @@ -1,4 +1,4 @@ -package kube_events_manager +package kubeeventsmanager // Namespace manager monitor namespaces for onKubernetesEvent config. diff --git a/pkg/kube_events_manager/resource_informer.go b/pkg/kube_events_manager/resource_informer.go index 3f7d25c0..2ef1c70e 100644 --- a/pkg/kube_events_manager/resource_informer.go +++ b/pkg/kube_events_manager/resource_informer.go @@ -1,4 +1,4 @@ -package kube_events_manager +package kubeeventsmanager import ( "context" @@ -15,8 +15,10 @@ import ( "k8s.io/client-go/tools/cache" klient "github.com/flant/kube-client/client" - . "github.com/flant/shell-operator/pkg/kube_events_manager/types" - "github.com/flant/shell-operator/pkg/metric_storage" + "github.com/flant/shell-operator/pkg/app" + "github.com/flant/shell-operator/pkg/filter/jq" + kemtypes "github.com/flant/shell-operator/pkg/kube_events_manager/types" + metricstorage "github.com/flant/shell-operator/pkg/metric_storage" "github.com/flant/shell-operator/pkg/utils/measure" ) @@ -35,7 +37,7 @@ type resourceInformer struct { ListOptions metav1.ListOptions // A cache of objects and filterResults. It is a part of the Monitor's snapshot. - cachedObjects map[string]*ObjectAndFilterResult + cachedObjects map[string]*kemtypes.ObjectAndFilterResult cacheLock sync.RWMutex // Cached objects operations since start @@ -45,18 +47,18 @@ type resourceInformer struct { // Events buffer for "Synchronization" mode: it stores events between CachedObjects call and enableKubeEventCb call // to replay them when "Synchronization" hook is done. - eventBuf []KubeEvent + eventBuf []kemtypes.KubeEvent eventBufLock sync.Mutex // A callback function that define custom handling of Kubernetes events. - eventCb func(KubeEvent) + eventCb func(kemtypes.KubeEvent) eventCbEnabled bool // TODO resourceInformer should be stoppable (think of deleted namespaces and disabled modules in addon-operator) ctx context.Context cancel context.CancelFunc - metricStorage *metric_storage.MetricStorage + metricStorage *metricstorage.MetricStorage // a flag to stop handle events after Stop() stopped bool @@ -67,8 +69,8 @@ type resourceInformer struct { // resourceInformer should implement ResourceInformer type resourceInformerConfig struct { client *klient.Client - mstor *metric_storage.MetricStorage - eventCb func(KubeEvent) + mstor *metricstorage.MetricStorage + eventCb func(kemtypes.KubeEvent) monitor *MonitorConfig logger *log.Logger @@ -83,7 +85,7 @@ func newResourceInformer(ns, name string, cfg *resourceInformerConfig) *resource Name: name, eventCb: cfg.eventCb, Monitor: cfg.monitor, - cachedObjects: make(map[string]*ObjectAndFilterResult), + cachedObjects: make(map[string]*kemtypes.ObjectAndFilterResult), cacheLock: sync.RWMutex{}, eventBufLock: sync.Mutex{}, cachedObjectsInfo: &CachedObjectsInfo{}, @@ -97,7 +99,7 @@ func (ei *resourceInformer) withContext(ctx context.Context) { ei.ctx, ei.cancel = context.WithCancel(ctx) } -func (ei *resourceInformer) putEvent(ev KubeEvent) { +func (ei *resourceInformer) putEvent(ev kemtypes.KubeEvent) { if ei.eventCb != nil { ei.eventCb(ev) } @@ -147,9 +149,9 @@ func (ei *resourceInformer) createSharedInformer() (err error) { } // Snapshot returns all cached objects for this informer -func (ei *resourceInformer) getCachedObjects() []ObjectAndFilterResult { +func (ei *resourceInformer) getCachedObjects() []kemtypes.ObjectAndFilterResult { ei.cacheLock.RLock() - res := make([]ObjectAndFilterResult, 0) + res := make([]kemtypes.ObjectAndFilterResult, 0) for _, obj := range ei.cachedObjects { res = append(res, *obj) } @@ -201,19 +203,20 @@ func (ei *resourceInformer) loadExistedObjects() error { // log.Debugf("%s: Got %d existing '%s' resources: %+v", ei.Monitor.Metadata.DebugName, len(objList.Items), ei.Monitor.Kind, objList.Items) log.Debugf("%s: '%s' initial list: Got %d existing resources", ei.Monitor.Metadata.DebugName, ei.Monitor.Kind, len(objList.Items)) - filteredObjects := make(map[string]*ObjectAndFilterResult) + filteredObjects := make(map[string]*kemtypes.ObjectAndFilterResult) for _, item := range objList.Items { // copy loop var to avoid duplication of pointer in filteredObjects obj := item - var objFilterRes *ObjectAndFilterResult + var objFilterRes *kemtypes.ObjectAndFilterResult var err error func() { defer measure.Duration(func(d time.Duration) { ei.metricStorage.HistogramObserve("{PREFIX}kube_jq_filter_duration_seconds", d.Seconds(), ei.Monitor.Metadata.MetricLabels, nil) })() - objFilterRes, err = applyFilter(ei.Monitor.JqFilter, ei.Monitor.FilterFunc, &obj) + filter := jq.NewFilter(app.JqLibraryPath) + objFilterRes, err = applyFilter(ei.Monitor.JqFilter, filter, ei.Monitor.FilterFunc, &obj) }() if err != nil { @@ -246,22 +249,22 @@ func (ei *resourceInformer) loadExistedObjects() error { } func (ei *resourceInformer) OnAdd(obj interface{}, _ bool) { - ei.handleWatchEvent(obj, WatchEventAdded) + ei.handleWatchEvent(obj, kemtypes.WatchEventAdded) } func (ei *resourceInformer) OnUpdate(_, newObj interface{}) { - ei.handleWatchEvent(newObj, WatchEventModified) + ei.handleWatchEvent(newObj, kemtypes.WatchEventModified) } func (ei *resourceInformer) OnDelete(obj interface{}) { - ei.handleWatchEvent(obj, WatchEventDeleted) + ei.handleWatchEvent(obj, kemtypes.WatchEventDeleted) } // HandleKubeEvent register object in cache. Pass object to callback if object's checksum is changed. // TODO refactor: pass KubeEvent as argument // TODO add delay to merge Added and Modified events (node added and then labels applied — one hook run on Added+Modified is enough) // func (ei *resourceInformer) HandleKubeEvent(obj *unstructured.Unstructured, objectId string, filterResult string, newChecksum string, eventType WatchEventType) { -func (ei *resourceInformer) handleWatchEvent(object interface{}, eventType WatchEventType) { +func (ei *resourceInformer) handleWatchEvent(object interface{}, eventType kemtypes.WatchEventType) { // check if stop if ei.stopped { log.Debugf("%s: received WATCH for a stopped %s/%s informer %s", @@ -286,13 +289,14 @@ func (ei *resourceInformer) handleWatchEvent(object interface{}, eventType Watch // Always calculate checksum and update cache, because we need an actual state in ei.cachedObjects. - var objFilterRes *ObjectAndFilterResult + var objFilterRes *kemtypes.ObjectAndFilterResult var err error func() { defer measure.Duration(func(d time.Duration) { ei.metricStorage.HistogramObserve("{PREFIX}kube_jq_filter_duration_seconds", d.Seconds(), ei.Monitor.Metadata.MetricLabels, nil) })() - objFilterRes, err = applyFilter(ei.Monitor.JqFilter, ei.Monitor.FilterFunc, obj) + filter := jq.NewFilter(app.JqLibraryPath) + objFilterRes, err = applyFilter(ei.Monitor.JqFilter, filter, ei.Monitor.FilterFunc, obj) }() if err != nil { log.Errorf("%s: WATCH %s: %s", @@ -309,9 +313,9 @@ func (ei *resourceInformer) handleWatchEvent(object interface{}, eventType Watch // Do not fire Added or Modified if object is in cache and its checksum is equal to the newChecksum. // Delete is always fired. switch eventType { - case WatchEventAdded: + case kemtypes.WatchEventAdded: fallthrough - case WatchEventModified: + case kemtypes.WatchEventModified: // Update object in cache ei.cacheLock.Lock() cachedObject, objectInCache := ei.cachedObjects[resourceId] @@ -328,7 +332,7 @@ func (ei *resourceInformer) handleWatchEvent(object interface{}, eventType Watch ei.cachedObjects[resourceId] = objFilterRes // Update cached objects info. ei.cachedObjectsInfo.Count = uint64(len(ei.cachedObjects)) - if eventType == WatchEventAdded { + if eventType == kemtypes.WatchEventAdded { ei.cachedObjectsInfo.Added++ ei.cachedObjectsIncrement.Added++ } else { @@ -342,7 +346,7 @@ func (ei *resourceInformer) handleWatchEvent(object interface{}, eventType Watch return } - case WatchEventDeleted: + case kemtypes.WatchEventDeleted: ei.cacheLock.Lock() delete(ei.cachedObjects, resourceId) // Update cached objects info. @@ -368,11 +372,11 @@ func (ei *resourceInformer) handleWatchEvent(object interface{}, eventType Watch // TODO: should be disabled by default and enabled by a debug feature switch // log.Debugf("HandleKubeEvent: obj type is %T, value:\n%#v", obj, obj) - kubeEvent := KubeEvent{ - Type: TypeEvent, + kubeEvent := kemtypes.KubeEvent{ + Type: kemtypes.TypeEvent, MonitorId: ei.Monitor.Metadata.MonitorId, - WatchEvents: []WatchEventType{eventType}, - Objects: []ObjectAndFilterResult{*objFilterRes}, + WatchEvents: []kemtypes.WatchEventType{eventType}, + Objects: []kemtypes.ObjectAndFilterResult{*objFilterRes}, } // fix race with enableKubeEventCb. @@ -388,7 +392,7 @@ func (ei *resourceInformer) handleWatchEvent(object interface{}, eventType Watch ei.eventBufLock.Lock() // Save event in buffer until the callback is enabled. if ei.eventBuf == nil { - ei.eventBuf = make([]KubeEvent, 0) + ei.eventBuf = make([]kemtypes.KubeEvent, 0) } ei.eventBuf = append(ei.eventBuf, kubeEvent) ei.eventBufLock.Unlock() @@ -396,24 +400,24 @@ func (ei *resourceInformer) handleWatchEvent(object interface{}, eventType Watch } } -func (ei *resourceInformer) adjustFieldSelector(selector *FieldSelector, objName string) *FieldSelector { - var selectorCopy *FieldSelector +func (ei *resourceInformer) adjustFieldSelector(selector *kemtypes.FieldSelector, objName string) *kemtypes.FieldSelector { + var selectorCopy *kemtypes.FieldSelector if selector != nil { - selectorCopy = &FieldSelector{ + selectorCopy = &kemtypes.FieldSelector{ MatchExpressions: selector.MatchExpressions, } } if objName != "" { - objNameReq := FieldSelectorRequirement{ + objNameReq := kemtypes.FieldSelectorRequirement{ Field: "metadata.name", Operator: "=", Value: objName, } if selectorCopy == nil { - selectorCopy = &FieldSelector{ - MatchExpressions: []FieldSelectorRequirement{ + selectorCopy = &kemtypes.FieldSelector{ + MatchExpressions: []kemtypes.FieldSelectorRequirement{ objNameReq, }, } @@ -425,7 +429,7 @@ func (ei *resourceInformer) adjustFieldSelector(selector *FieldSelector, objName return selectorCopy } -func (ei *resourceInformer) shouldFireEvent(checkEvent WatchEventType) bool { +func (ei *resourceInformer) shouldFireEvent(checkEvent kemtypes.WatchEventType) bool { for _, event := range ei.Monitor.EventTypes { if event == checkEvent { return true diff --git a/pkg/kube_events_manager/util.go b/pkg/kube_events_manager/util.go index b4459d2e..fa5cd7b6 100644 --- a/pkg/kube_events_manager/util.go +++ b/pkg/kube_events_manager/util.go @@ -1,4 +1,4 @@ -package kube_events_manager +package kubeeventsmanager import ( "fmt" @@ -9,7 +9,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/fields" - . "github.com/flant/shell-operator/pkg/kube_events_manager/types" + kemtypes "github.com/flant/shell-operator/pkg/kube_events_manager/types" ) // ResourceId describes object with namespace, kind and name @@ -28,7 +28,7 @@ func FormatLabelSelector(selector *metav1.LabelSelector) (string, error) { return res.String(), nil } -func FormatFieldSelector(selector *FieldSelector) (string, error) { +func FormatFieldSelector(selector *kemtypes.FieldSelector) (string, error) { if selector == nil || selector.MatchExpressions == nil { return "", nil } diff --git a/pkg/kube_events_manager/util_test.go b/pkg/kube_events_manager/util_test.go index 0356126e..ecb46e6a 100644 --- a/pkg/kube_events_manager/util_test.go +++ b/pkg/kube_events_manager/util_test.go @@ -1,4 +1,4 @@ -package kube_events_manager +package kubeeventsmanager import ( "fmt" diff --git a/pkg/metric/storage_test.go b/pkg/metric/storage_test.go index b7f907d8..083ef9ee 100644 --- a/pkg/metric/storage_test.go +++ b/pkg/metric/storage_test.go @@ -2,12 +2,12 @@ package metric_test import ( "github.com/flant/shell-operator/pkg/metric" - "github.com/flant/shell-operator/pkg/metric_storage" + metricstorage "github.com/flant/shell-operator/pkg/metric_storage" "github.com/flant/shell-operator/pkg/metric_storage/vault" ) var ( - _ metric.Storage = (*metric_storage.MetricStorage)(nil) + _ metric.Storage = (*metricstorage.MetricStorage)(nil) _ metric.Storage = (*metric.StorageMock)(nil) _ metric.GroupedStorage = (*vault.GroupedVault)(nil) diff --git a/pkg/metric_storage/metric_storage.go b/pkg/metric_storage/metric_storage.go index 3c74fae2..1ab1f0e0 100644 --- a/pkg/metric_storage/metric_storage.go +++ b/pkg/metric_storage/metric_storage.go @@ -1,4 +1,4 @@ -package metric_storage +package metricstorage import ( "context" diff --git a/pkg/schedule_manager/schedule_manager.go b/pkg/schedule_manager/schedule_manager.go index 6681b5bd..2ee10c42 100644 --- a/pkg/schedule_manager/schedule_manager.go +++ b/pkg/schedule_manager/schedule_manager.go @@ -1,4 +1,4 @@ -package schedule_manager +package schedulemanager import ( "context" @@ -6,14 +6,14 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "gopkg.in/robfig/cron.v2" - . "github.com/flant/shell-operator/pkg/schedule_manager/types" + smtypes "github.com/flant/shell-operator/pkg/schedule_manager/types" ) type ScheduleManager interface { Stop() Start() - Add(entry ScheduleEntry) - Remove(entry ScheduleEntry) + Add(entry smtypes.ScheduleEntry) + Remove(entry smtypes.ScheduleEntry) Ch() chan string } @@ -57,7 +57,7 @@ func (sm *scheduleManager) Stop() { // Add create entry for crontab and id and start scheduled function. // Crontab string should be validated with cron.Parse // function before pass to Add. -func (sm *scheduleManager) Add(newEntry ScheduleEntry) { +func (sm *scheduleManager) Add(newEntry smtypes.ScheduleEntry) { logEntry := sm.logger.With("operator.component", "scheduleManager") cronEntry, hasCronEntry := sm.Entries[newEntry.Crontab] @@ -88,7 +88,7 @@ func (sm *scheduleManager) Add(newEntry ScheduleEntry) { } } -func (sm *scheduleManager) Remove(delEntry ScheduleEntry) { +func (sm *scheduleManager) Remove(delEntry smtypes.ScheduleEntry) { cronEntry, hasCronEntry := sm.Entries[delEntry.Crontab] // Nothing to Remove diff --git a/pkg/schedule_manager/schedule_manager_test.go b/pkg/schedule_manager/schedule_manager_test.go index 21efbc2b..044b398a 100644 --- a/pkg/schedule_manager/schedule_manager_test.go +++ b/pkg/schedule_manager/schedule_manager_test.go @@ -1,4 +1,4 @@ -package schedule_manager +package schedulemanager import ( "context" diff --git a/pkg/shell-operator/bootstrap.go b/pkg/shell-operator/bootstrap.go index 760726a4..3f00f53f 100644 --- a/pkg/shell-operator/bootstrap.go +++ b/pkg/shell-operator/bootstrap.go @@ -10,10 +10,10 @@ import ( "github.com/flant/shell-operator/pkg/app" "github.com/flant/shell-operator/pkg/config" "github.com/flant/shell-operator/pkg/debug" + "github.com/flant/shell-operator/pkg/filter/jq" "github.com/flant/shell-operator/pkg/hook" - "github.com/flant/shell-operator/pkg/jq" - "github.com/flant/shell-operator/pkg/kube_events_manager" - "github.com/flant/shell-operator/pkg/schedule_manager" + kubeeventsmanager "github.com/flant/shell-operator/pkg/kube_events_manager" + schedulemanager "github.com/flant/shell-operator/pkg/schedule_manager" "github.com/flant/shell-operator/pkg/task/queue" utils "github.com/flant/shell-operator/pkg/utils/file" "github.com/flant/shell-operator/pkg/webhook/admission" @@ -29,7 +29,8 @@ func Init(logger *log.Logger) (*ShellOperator, error) { // Log version and jq filtering implementation. logger.Info(app.AppStartMessage) - logger.Debug(jq.FilterInfo()) + fl := jq.NewFilter(app.JqLibraryPath) + logger.Debug(fl.FilterInfo()) hooksDir, err := utils.RequireExistingDirectory(app.HooksDir) if err != nil { @@ -154,10 +155,10 @@ func (op *ShellOperator) SetupEventManagers() { op.TaskQueues.WithMetricStorage(op.MetricStorage) // Initialize schedule manager. - op.ScheduleManager = schedule_manager.NewScheduleManager(op.ctx, op.logger.Named("schedule-manager")) + op.ScheduleManager = schedulemanager.NewScheduleManager(op.ctx, op.logger.Named("schedule-manager")) // Initialize kubernetes events manager. - op.KubeEventsManager = kube_events_manager.NewKubeEventsManager(op.ctx, op.KubeClient, op.logger.Named("kube-events-manager")) + op.KubeEventsManager = kubeeventsmanager.NewKubeEventsManager(op.ctx, op.KubeClient, op.logger.Named("kube-events-manager")) op.KubeEventsManager.WithMetricStorage(op.MetricStorage) // Initialize events handler that emit tasks to run hooks diff --git a/pkg/shell-operator/combine_binding_context.go b/pkg/shell-operator/combine_binding_context.go index 7638f4db..5b05e10c 100644 --- a/pkg/shell-operator/combine_binding_context.go +++ b/pkg/shell-operator/combine_binding_context.go @@ -3,14 +3,14 @@ package shell_operator import ( "fmt" - . "github.com/flant/shell-operator/pkg/hook/binding_context" + bctx "github.com/flant/shell-operator/pkg/hook/binding_context" . "github.com/flant/shell-operator/pkg/hook/task_metadata" "github.com/flant/shell-operator/pkg/task" "github.com/flant/shell-operator/pkg/task/queue" ) type CombineResult struct { - BindingContexts []BindingContext + BindingContexts []bctx.BindingContext MonitorIDs []string } @@ -72,7 +72,7 @@ func (op *ShellOperator) combineBindingContextForHook(tqs *queue.TaskQueueSet, q } // Combine binding context and make a map to delete excess tasks - combinedContext := make([]BindingContext, 0) + combinedContext := make([]bctx.BindingContext, 0) monitorIDs := taskMeta.(MonitorIDAccessor).GetMonitorIDs() tasksFilter := make(map[string]bool) // current task always remain in queue @@ -96,7 +96,7 @@ func (op *ShellOperator) combineBindingContextForHook(tqs *queue.TaskQueueSet, q }) // group is used to compact binding contexts when only snapshots are needed - compactedContext := make([]BindingContext, 0) + compactedContext := make([]bctx.BindingContext, 0) for i := 0; i < len(combinedContext); i++ { keep := true diff --git a/pkg/shell-operator/combine_binding_context_test.go b/pkg/shell-operator/combine_binding_context_test.go index 6caef60b..b889aec7 100644 --- a/pkg/shell-operator/combine_binding_context_test.go +++ b/pkg/shell-operator/combine_binding_context_test.go @@ -7,10 +7,10 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" . "github.com/onsi/gomega" - "github.com/flant/shell-operator/pkg/hook/binding_context" + bindingcontext "github.com/flant/shell-operator/pkg/hook/binding_context" . "github.com/flant/shell-operator/pkg/hook/task_metadata" "github.com/flant/shell-operator/pkg/hook/types" - . "github.com/flant/shell-operator/pkg/kube_events_manager/types" + kemtypes "github.com/flant/shell-operator/pkg/kube_events_manager/types" "github.com/flant/shell-operator/pkg/task" "github.com/flant/shell-operator/pkg/task/queue" ) @@ -31,10 +31,10 @@ func Test_CombineBindingContext_MultipleHooks(t *testing.T) { WithQueueName("test_multiple_hooks"). WithMetadata(HookMetadata{ HookName: "hook1.sh", - BindingContext: []binding_context.BindingContext{ + BindingContext: []bindingcontext.BindingContext{ { Binding: "kubernetes", - Type: TypeEvent, + Type: kemtypes.TypeEvent, }, }, }), @@ -42,10 +42,10 @@ func Test_CombineBindingContext_MultipleHooks(t *testing.T) { WithQueueName("test_multiple_hooks"). WithMetadata(HookMetadata{ HookName: "hook1.sh", - BindingContext: []binding_context.BindingContext{ + BindingContext: []bindingcontext.BindingContext{ { Binding: "kubernetes", - Type: TypeEvent, + Type: kemtypes.TypeEvent, }, }, }), @@ -53,7 +53,7 @@ func Test_CombineBindingContext_MultipleHooks(t *testing.T) { WithQueueName("test_multiple_hooks"). WithMetadata(HookMetadata{ HookName: "hook1.sh", - BindingContext: []binding_context.BindingContext{ + BindingContext: []bindingcontext.BindingContext{ { Binding: "schedule", }, @@ -63,10 +63,10 @@ func Test_CombineBindingContext_MultipleHooks(t *testing.T) { WithQueueName("test_multiple_hooks"). WithMetadata(HookMetadata{ HookName: "hook1.sh", - BindingContext: []binding_context.BindingContext{ + BindingContext: []bindingcontext.BindingContext{ { Binding: "kubernetes", - Type: TypeEvent, + Type: kemtypes.TypeEvent, }, }, }), @@ -74,10 +74,10 @@ func Test_CombineBindingContext_MultipleHooks(t *testing.T) { WithQueueName("test_multiple_hooks"). WithMetadata(HookMetadata{ HookName: "hook2.sh", - BindingContext: []binding_context.BindingContext{ + BindingContext: []bindingcontext.BindingContext{ { Binding: "kubernetes", - Type: TypeEvent, + Type: kemtypes.TypeEvent, }, }, }), @@ -85,10 +85,10 @@ func Test_CombineBindingContext_MultipleHooks(t *testing.T) { WithQueueName("test_multiple_hooks"). WithMetadata(HookMetadata{ HookName: "hook1.sh", - BindingContext: []binding_context.BindingContext{ + BindingContext: []bindingcontext.BindingContext{ { Binding: "kubernetes", - Type: TypeEvent, + Type: kemtypes.TypeEvent, }, }, }), @@ -125,10 +125,10 @@ func Test_CombineBindingContext_Nil_On_NoCombine(t *testing.T) { WithQueueName("test_no_combine"). WithMetadata(HookMetadata{ HookName: "hook1.sh", - BindingContext: []binding_context.BindingContext{ + BindingContext: []bindingcontext.BindingContext{ { Binding: "kubernetes", - Type: TypeEvent, + Type: kemtypes.TypeEvent, }, }, }), @@ -136,10 +136,10 @@ func Test_CombineBindingContext_Nil_On_NoCombine(t *testing.T) { WithQueueName("test_no_combine"). WithMetadata(HookMetadata{ HookName: "hook2.sh", - BindingContext: []binding_context.BindingContext{ + BindingContext: []bindingcontext.BindingContext{ { Binding: "kubernetes", - Type: TypeEvent, + Type: kemtypes.TypeEvent, }, }, }), @@ -147,7 +147,7 @@ func Test_CombineBindingContext_Nil_On_NoCombine(t *testing.T) { WithQueueName("test_no_combine"). WithMetadata(HookMetadata{ HookName: "hook3.sh", - BindingContext: []binding_context.BindingContext{ + BindingContext: []bindingcontext.BindingContext{ { Binding: "schedule", }, @@ -179,7 +179,7 @@ func Test_CombineBindingContext_Group_Compaction(t *testing.T) { } }) - bcMeta := binding_context.BindingContext{}.Metadata + bcMeta := bindingcontext.BindingContext{}.Metadata bcMeta.Group = "pods" tasks := []task.Task{ @@ -188,11 +188,11 @@ func Test_CombineBindingContext_Group_Compaction(t *testing.T) { WithQueueName("test_multiple_hooks"). WithMetadata(HookMetadata{ HookName: "hook1.sh", - BindingContext: []binding_context.BindingContext{ + BindingContext: []bindingcontext.BindingContext{ { Metadata: bcMeta, Binding: "kubernetes", - Type: TypeEvent, + Type: kemtypes.TypeEvent, }, }, }), @@ -200,11 +200,11 @@ func Test_CombineBindingContext_Group_Compaction(t *testing.T) { WithQueueName("test_multiple_hooks"). WithMetadata(HookMetadata{ HookName: "hook1.sh", - BindingContext: []binding_context.BindingContext{ + BindingContext: []bindingcontext.BindingContext{ { Metadata: bcMeta, Binding: "kubernetes", - Type: TypeEvent, + Type: kemtypes.TypeEvent, }, }, }), @@ -212,7 +212,7 @@ func Test_CombineBindingContext_Group_Compaction(t *testing.T) { WithQueueName("test_multiple_hooks"). WithMetadata(HookMetadata{ HookName: "hook1.sh", - BindingContext: []binding_context.BindingContext{ + BindingContext: []bindingcontext.BindingContext{ { Metadata: bcMeta, Binding: "schedule", @@ -223,10 +223,10 @@ func Test_CombineBindingContext_Group_Compaction(t *testing.T) { WithQueueName("test_multiple_hooks"). WithMetadata(HookMetadata{ HookName: "hook1.sh", - BindingContext: []binding_context.BindingContext{ + BindingContext: []bindingcontext.BindingContext{ { Binding: "kubernetes", - Type: TypeEvent, + Type: kemtypes.TypeEvent, }, }, }), @@ -235,10 +235,10 @@ func Test_CombineBindingContext_Group_Compaction(t *testing.T) { WithQueueName("test_multiple_hooks"). WithMetadata(HookMetadata{ HookName: "hook2.sh", - BindingContext: []binding_context.BindingContext{ + BindingContext: []bindingcontext.BindingContext{ { Binding: "kubernetes", - Type: TypeEvent, + Type: kemtypes.TypeEvent, }, }, }), @@ -246,10 +246,10 @@ func Test_CombineBindingContext_Group_Compaction(t *testing.T) { WithQueueName("test_multiple_hooks"). WithMetadata(HookMetadata{ HookName: "hook1.sh", - BindingContext: []binding_context.BindingContext{ + BindingContext: []bindingcontext.BindingContext{ { Binding: "kubernetes", - Type: TypeEvent, + Type: kemtypes.TypeEvent, }, }, }), @@ -281,11 +281,11 @@ func Test_CombineBindingContext_Group_Type(t *testing.T) { } }) - bcMeta := binding_context.BindingContext{}.Metadata + bcMeta := bindingcontext.BindingContext{}.Metadata bcMeta.Group = "pods" bcMeta.BindingType = types.OnKubernetesEvent - schMeta := binding_context.BindingContext{}.Metadata + schMeta := bindingcontext.BindingContext{}.Metadata schMeta.Group = "pods" schMeta.BindingType = types.Schedule @@ -294,11 +294,11 @@ func Test_CombineBindingContext_Group_Type(t *testing.T) { WithQueueName("test_multiple_hooks"). WithMetadata(HookMetadata{ HookName: "hook1.sh", - BindingContext: []binding_context.BindingContext{ + BindingContext: []bindingcontext.BindingContext{ { Metadata: bcMeta, Binding: "kubernetes", - Type: TypeSynchronization, + Type: kemtypes.TypeSynchronization, }, }, }), @@ -306,11 +306,11 @@ func Test_CombineBindingContext_Group_Type(t *testing.T) { WithQueueName("test_multiple_hooks"). WithMetadata(HookMetadata{ HookName: "hook1.sh", - BindingContext: []binding_context.BindingContext{ + BindingContext: []bindingcontext.BindingContext{ { Metadata: bcMeta, Binding: "kubernetes2", - Type: TypeSynchronization, + Type: kemtypes.TypeSynchronization, }, }, }), @@ -318,7 +318,7 @@ func Test_CombineBindingContext_Group_Type(t *testing.T) { WithQueueName("test_multiple_hooks"). WithMetadata(HookMetadata{ HookName: "hook1.sh", - BindingContext: []binding_context.BindingContext{ + BindingContext: []bindingcontext.BindingContext{ { Metadata: bcMeta, Binding: "schedule", @@ -327,15 +327,15 @@ func Test_CombineBindingContext_Group_Type(t *testing.T) { }), // stop compaction for group - // bcList[1] type == TypeEvent + // bcList[1] type == kemtypes.TypeEvent task.NewTask(HookRun). WithQueueName("test_multiple_hooks"). WithMetadata(HookMetadata{ HookName: "hook1.sh", - BindingContext: []binding_context.BindingContext{ + BindingContext: []bindingcontext.BindingContext{ { Binding: "kubernetes", - Type: TypeEvent, + Type: kemtypes.TypeEvent, }, }, }), @@ -344,7 +344,7 @@ func Test_CombineBindingContext_Group_Type(t *testing.T) { WithQueueName("test_multiple_hooks"). WithMetadata(HookMetadata{ HookName: "hook1.sh", - BindingContext: []binding_context.BindingContext{ + BindingContext: []bindingcontext.BindingContext{ { Metadata: schMeta, Binding: "schedule", @@ -356,10 +356,10 @@ func Test_CombineBindingContext_Group_Type(t *testing.T) { WithQueueName("test_multiple_hooks"). WithMetadata(HookMetadata{ HookName: "hook1.sh", - BindingContext: []binding_context.BindingContext{ + BindingContext: []bindingcontext.BindingContext{ { - Metadata: func() binding_context.BindingContext { - bc := binding_context.BindingContext{} + Metadata: func() bindingcontext.BindingContext { + bc := bindingcontext.BindingContext{} bc.Metadata.BindingType = types.Schedule return bc }().Metadata, @@ -372,11 +372,11 @@ func Test_CombineBindingContext_Group_Type(t *testing.T) { WithQueueName("test_multiple_hooks"). WithMetadata(HookMetadata{ HookName: "hook1.sh", - BindingContext: []binding_context.BindingContext{ + BindingContext: []bindingcontext.BindingContext{ { Metadata: bcMeta, Binding: "kubernetes", - Type: TypeEvent, + Type: kemtypes.TypeEvent, }, }, }), @@ -385,10 +385,10 @@ func Test_CombineBindingContext_Group_Type(t *testing.T) { WithQueueName("test_multiple_hooks"). WithMetadata(HookMetadata{ HookName: "hook1.sh", - BindingContext: []binding_context.BindingContext{ + BindingContext: []bindingcontext.BindingContext{ { Binding: "kubernetes", - Type: TypeEvent, + Type: kemtypes.TypeEvent, }, }, }), @@ -396,10 +396,10 @@ func Test_CombineBindingContext_Group_Type(t *testing.T) { WithQueueName("test_multiple_hooks"). WithMetadata(HookMetadata{ HookName: "hook1.sh", - BindingContext: []binding_context.BindingContext{ + BindingContext: []bindingcontext.BindingContext{ { Binding: "kubernetes", - Type: TypeEvent, + Type: kemtypes.TypeEvent, }, }, }), @@ -424,13 +424,13 @@ func Test_CombineBindingContext_Group_Type(t *testing.T) { g.Expect(string(bcList[0].Type)).Should(Equal(""), "bc: %+v", bcList[0]) g.Expect(bcList[0].Metadata.Group).Should(Equal("pods"), "bc: %+v", bcList[0]) - g.Expect(bcList[1].Type).Should(Equal(TypeEvent)) + g.Expect(bcList[1].Type).Should(Equal(kemtypes.TypeEvent)) g.Expect(string(bcList[2].Type)).Should(Equal("")) g.Expect(bcList[2].Metadata.Group).Should(Equal("pods"), "bc: %+v", bcList[2]) g.Expect(string(bcList[3].Type)).Should(Equal("")) - g.Expect(bcList[4].Type).Should(Equal(TypeEvent)) + g.Expect(bcList[4].Type).Should(Equal(kemtypes.TypeEvent)) g.Expect(bcList[4].Metadata.Group).Should(Equal("pods"), "bc: %+v", bcList[4]) } diff --git a/pkg/shell-operator/kube_client.go b/pkg/shell-operator/kube_client.go index 733341ac..91b1cd35 100644 --- a/pkg/shell-operator/kube_client.go +++ b/pkg/shell-operator/kube_client.go @@ -7,8 +7,8 @@ import ( klient "github.com/flant/kube-client/client" "github.com/flant/shell-operator/pkg/app" - "github.com/flant/shell-operator/pkg/kube/object_patch" - "github.com/flant/shell-operator/pkg/metric_storage" + objectpatch "github.com/flant/shell-operator/pkg/kube/object_patch" + metricstorage "github.com/flant/shell-operator/pkg/metric_storage" utils "github.com/flant/shell-operator/pkg/utils/labels" ) @@ -19,7 +19,7 @@ var ( // defaultMainKubeClient creates a Kubernetes client for hooks. No timeout specified, because // timeout will reset connections for Watchers. -func defaultMainKubeClient(metricStorage *metric_storage.MetricStorage, metricLabels map[string]string, logger *log.Logger) *klient.Client { +func defaultMainKubeClient(metricStorage *metricstorage.MetricStorage, metricLabels map[string]string, logger *log.Logger) *klient.Client { client := klient.New(klient.WithLogger(logger)) client.WithContextName(app.KubeContext) client.WithConfigPath(app.KubeConfig) @@ -29,7 +29,7 @@ func defaultMainKubeClient(metricStorage *metric_storage.MetricStorage, metricLa return client } -func initDefaultMainKubeClient(metricStorage *metric_storage.MetricStorage, logger *log.Logger) (*klient.Client, error) { +func initDefaultMainKubeClient(metricStorage *metricstorage.MetricStorage, logger *log.Logger) (*klient.Client, error) { //nolint:staticcheck klient.RegisterKubernetesClientMetrics(metricStorage, defaultMainKubeClientMetricLabels) kubeClient := defaultMainKubeClient(metricStorage, defaultMainKubeClientMetricLabels, logger.Named("main-kube-client")) @@ -41,7 +41,7 @@ func initDefaultMainKubeClient(metricStorage *metric_storage.MetricStorage, logg } // defaultObjectPatcherKubeClient initializes a Kubernetes client for ObjectPatcher. Timeout is specified here. -func defaultObjectPatcherKubeClient(metricStorage *metric_storage.MetricStorage, metricLabels map[string]string, logger *log.Logger) *klient.Client { +func defaultObjectPatcherKubeClient(metricStorage *metricstorage.MetricStorage, metricLabels map[string]string, logger *log.Logger) *klient.Client { client := klient.New(klient.WithLogger(logger)) client.WithContextName(app.KubeContext) client.WithConfigPath(app.KubeConfig) @@ -52,11 +52,11 @@ func defaultObjectPatcherKubeClient(metricStorage *metric_storage.MetricStorage, return client } -func initDefaultObjectPatcher(metricStorage *metric_storage.MetricStorage, logger *log.Logger) (*object_patch.ObjectPatcher, error) { +func initDefaultObjectPatcher(metricStorage *metricstorage.MetricStorage, logger *log.Logger) (*objectpatch.ObjectPatcher, error) { patcherKubeClient := defaultObjectPatcherKubeClient(metricStorage, defaultObjectPatcherKubeClientMetricLabels, logger.Named("object-patcher-kube-client")) err := patcherKubeClient.Init() if err != nil { return nil, fmt.Errorf("initialize Kubernetes client for Object patcher: %s\n", err) } - return object_patch.NewObjectPatcher(patcherKubeClient, logger), nil + return objectpatch.NewObjectPatcher(patcherKubeClient, logger), nil } diff --git a/pkg/shell-operator/manager_events_handler.go b/pkg/shell-operator/manager_events_handler.go index 643c8de0..21f37be7 100644 --- a/pkg/shell-operator/manager_events_handler.go +++ b/pkg/shell-operator/manager_events_handler.go @@ -5,17 +5,17 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" - "github.com/flant/shell-operator/pkg/kube_events_manager" - . "github.com/flant/shell-operator/pkg/kube_events_manager/types" - "github.com/flant/shell-operator/pkg/schedule_manager" + kubeeventsmanager "github.com/flant/shell-operator/pkg/kube_events_manager" + kemtypes "github.com/flant/shell-operator/pkg/kube_events_manager/types" + schedulemanager "github.com/flant/shell-operator/pkg/schedule_manager" "github.com/flant/shell-operator/pkg/task" "github.com/flant/shell-operator/pkg/task/queue" ) type managerEventsHandlerConfig struct { tqs *queue.TaskQueueSet - mgr kube_events_manager.KubeEventsManager - smgr schedule_manager.ScheduleManager + mgr kubeeventsmanager.KubeEventsManager + smgr schedulemanager.ScheduleManager logger *log.Logger } @@ -24,10 +24,10 @@ type ManagerEventsHandler struct { ctx context.Context cancel context.CancelFunc - kubeEventsManager kube_events_manager.KubeEventsManager - scheduleManager schedule_manager.ScheduleManager + kubeEventsManager kubeeventsmanager.KubeEventsManager + scheduleManager schedulemanager.ScheduleManager - kubeEventCb func(kubeEvent KubeEvent) []task.Task + kubeEventCb func(kubeEvent kemtypes.KubeEvent) []task.Task scheduleCb func(crontab string) []task.Task taskQueues *queue.TaskQueueSet @@ -53,7 +53,7 @@ func newManagerEventsHandler(ctx context.Context, cfg *managerEventsHandlerConfi // WithKubeEventHandler sets custom function for event handling. // This function is used inside addon-operator. -func (m *ManagerEventsHandler) WithKubeEventHandler(fn func(kubeEvent KubeEvent) []task.Task) { +func (m *ManagerEventsHandler) WithKubeEventHandler(fn func(kubeEvent kemtypes.KubeEvent) []task.Task) { m.kubeEventCb = fn } diff --git a/pkg/shell-operator/metrics_hooks.go b/pkg/shell-operator/metrics_hooks.go index 24e2bfa5..e0f448ad 100644 --- a/pkg/shell-operator/metrics_hooks.go +++ b/pkg/shell-operator/metrics_hooks.go @@ -4,11 +4,11 @@ import ( "net/http" "github.com/flant/shell-operator/pkg/app" - "github.com/flant/shell-operator/pkg/metric_storage" + metricstorage "github.com/flant/shell-operator/pkg/metric_storage" ) func (op *ShellOperator) setupHookMetricStorage() { - metricStorage := metric_storage.NewMetricStorage(op.ctx, app.PrometheusMetricsPrefix, true, op.logger.Named("metric-storage")) + metricStorage := metricstorage.NewMetricStorage(op.ctx, app.PrometheusMetricsPrefix, true, op.logger.Named("metric-storage")) op.APIServer.RegisterRoute(http.MethodGet, "/metrics/hooks", metricStorage.Handler().ServeHTTP) // create new metric storage for hooks @@ -17,7 +17,7 @@ func (op *ShellOperator) setupHookMetricStorage() { } // specific metrics for shell-operator HookManager -func registerHookMetrics(metricStorage *metric_storage.MetricStorage) { +func registerHookMetrics(metricStorage *metricstorage.MetricStorage) { // Metrics for enable kubernetes bindings. metricStorage.RegisterGauge("{PREFIX}hook_enable_kubernetes_bindings_seconds", map[string]string{"hook": ""}) metricStorage.RegisterCounter("{PREFIX}hook_enable_kubernetes_bindings_errors_total", map[string]string{"hook": ""}) diff --git a/pkg/shell-operator/metrics_operator.go b/pkg/shell-operator/metrics_operator.go index 021335a9..3ce48558 100644 --- a/pkg/shell-operator/metrics_operator.go +++ b/pkg/shell-operator/metrics_operator.go @@ -4,12 +4,12 @@ import ( "net/http" "github.com/flant/shell-operator/pkg/app" - "github.com/flant/shell-operator/pkg/metric_storage" + metricstorage "github.com/flant/shell-operator/pkg/metric_storage" ) // setupMetricStorage creates and initializes metrics storage for built-in operator metrics func (op *ShellOperator) setupMetricStorage(kubeEventsManagerLabels map[string]string) { - metricStorage := metric_storage.NewMetricStorage(op.ctx, app.PrometheusMetricsPrefix, false, op.logger.Named("metric-storage")) + metricStorage := metricstorage.NewMetricStorage(op.ctx, app.PrometheusMetricsPrefix, false, op.logger.Named("metric-storage")) registerCommonMetrics(metricStorage) registerTaskQueueMetrics(metricStorage) @@ -23,13 +23,13 @@ func (op *ShellOperator) setupMetricStorage(kubeEventsManagerLabels map[string]s // registerCommonMetrics register base metric // This function is used in the addon-operator -func registerCommonMetrics(metricStorage *metric_storage.MetricStorage) { +func registerCommonMetrics(metricStorage *metricstorage.MetricStorage) { metricStorage.RegisterCounter("{PREFIX}live_ticks", map[string]string{}) } // registerTaskQueueMetrics // This function is used in the addon-operator -func registerTaskQueueMetrics(metricStorage *metric_storage.MetricStorage) { +func registerTaskQueueMetrics(metricStorage *metricstorage.MetricStorage) { metricStorage.RegisterHistogram( "{PREFIX}tasks_queue_action_duration_seconds", map[string]string{ @@ -50,7 +50,7 @@ func registerTaskQueueMetrics(metricStorage *metric_storage.MetricStorage) { // registerKubeEventsManagerMetrics registers metrics for kube_event_manager // This function is used in the addon-operator -func registerKubeEventsManagerMetrics(metricStorage *metric_storage.MetricStorage, labels map[string]string) { +func registerKubeEventsManagerMetrics(metricStorage *metricstorage.MetricStorage, labels map[string]string) { // Count of objects in snapshot for one kubernets bindings. metricStorage.RegisterGauge("{PREFIX}kube_snapshot_objects", labels) // Duration of jqFilter applying. diff --git a/pkg/shell-operator/operator.go b/pkg/shell-operator/operator.go index 73306142..81120506 100644 --- a/pkg/shell-operator/operator.go +++ b/pkg/shell-operator/operator.go @@ -11,15 +11,15 @@ import ( klient "github.com/flant/kube-client/client" "github.com/flant/shell-operator/pkg/hook" - "github.com/flant/shell-operator/pkg/hook/binding_context" + bindingcontext "github.com/flant/shell-operator/pkg/hook/binding_context" "github.com/flant/shell-operator/pkg/hook/controller" "github.com/flant/shell-operator/pkg/hook/task_metadata" "github.com/flant/shell-operator/pkg/hook/types" - "github.com/flant/shell-operator/pkg/kube/object_patch" - "github.com/flant/shell-operator/pkg/kube_events_manager" + objectpatch "github.com/flant/shell-operator/pkg/kube/object_patch" + kubeeventsmanager "github.com/flant/shell-operator/pkg/kube_events_manager" kemTypes "github.com/flant/shell-operator/pkg/kube_events_manager/types" - "github.com/flant/shell-operator/pkg/metric_storage" - "github.com/flant/shell-operator/pkg/schedule_manager" + metricstorage "github.com/flant/shell-operator/pkg/metric_storage" + schedulemanager "github.com/flant/shell-operator/pkg/schedule_manager" "github.com/flant/shell-operator/pkg/task" "github.com/flant/shell-operator/pkg/task/queue" utils "github.com/flant/shell-operator/pkg/utils/labels" @@ -40,14 +40,14 @@ type ShellOperator struct { APIServer *baseHTTPServer // MetricStorage collects and store metrics for built-in operator primitives, hook execution - MetricStorage *metric_storage.MetricStorage + MetricStorage *metricstorage.MetricStorage // HookMetricStorage separate metric storage for metrics, which are returned by user hooks - HookMetricStorage *metric_storage.MetricStorage + HookMetricStorage *metricstorage.MetricStorage KubeClient *klient.Client - ObjectPatcher *object_patch.ObjectPatcher + ObjectPatcher *objectpatch.ObjectPatcher - ScheduleManager schedule_manager.ScheduleManager - KubeEventsManager kube_events_manager.KubeEventsManager + ScheduleManager schedulemanager.ScheduleManager + KubeEventsManager kubeeventsmanager.KubeEventsManager TaskQueues *queue.TaskQueueSet @@ -622,12 +622,12 @@ func (op *ShellOperator) handleRunHook(t task.Task, taskHook *hook.Hook, hookMet result, err := taskHook.Run(hookMeta.BindingType, hookMeta.BindingContext, hookLogLabels) if err != nil { if result != nil && len(result.KubernetesPatchBytes) > 0 { - operations, patchStatusErr := object_patch.ParseOperations(result.KubernetesPatchBytes) + operations, patchStatusErr := objectpatch.ParseOperations(result.KubernetesPatchBytes) if patchStatusErr != nil { return fmt.Errorf("%s: couldn't patch status: %s", err, patchStatusErr) } - patchStatusErr = op.ObjectPatcher.ExecuteOperations(object_patch.GetPatchStatusOperationsOnHookError(operations)) + patchStatusErr = op.ObjectPatcher.ExecuteOperations(objectpatch.GetPatchStatusOperationsOnHookError(operations)) if patchStatusErr != nil { return fmt.Errorf("%s: couldn't patch status: %s", err, patchStatusErr) } @@ -644,7 +644,7 @@ func (op *ShellOperator) handleRunHook(t task.Task, taskHook *hook.Hook, hookMet // Try to apply Kubernetes actions. if len(result.KubernetesPatchBytes) > 0 { - operations, err := object_patch.ParseOperations(result.KubernetesPatchBytes) + operations, err := objectpatch.ParseOperations(result.KubernetesPatchBytes) if err != nil { return err } @@ -735,7 +735,7 @@ func (op *ShellOperator) CombineBindingContextForHook(q *queue.TaskQueue, t task } // Combine binding context and make a map to delete excess tasks - combinedContext := make([]binding_context.BindingContext, 0) + combinedContext := make([]bindingcontext.BindingContext, 0) monitorIDs := taskMeta.(task_metadata.MonitorIDAccessor).GetMonitorIDs() tasksFilter := make(map[string]bool) // current task always remain in queue @@ -759,7 +759,7 @@ func (op *ShellOperator) CombineBindingContextForHook(q *queue.TaskQueue, t task }) // group is used to compact binding contexts when only snapshots are needed - compactedContext := make([]binding_context.BindingContext, 0) + compactedContext := make([]bindingcontext.BindingContext, 0) for i := 0; i < len(combinedContext); i++ { keep := true @@ -807,7 +807,7 @@ func (op *ShellOperator) bootstrapMainQueue(tqs *queue.TaskQueueSet) { } for _, hookName := range onStartupHooks { - bc := binding_context.BindingContext{ + bc := bindingcontext.BindingContext{ Binding: string(types.OnStartup), } bc.Metadata.BindingType = types.OnStartup @@ -816,7 +816,7 @@ func (op *ShellOperator) bootstrapMainQueue(tqs *queue.TaskQueueSet) { WithMetadata(task_metadata.HookMetadata{ HookName: hookName, BindingType: types.OnStartup, - BindingContext: []binding_context.BindingContext{bc}, + BindingContext: []bindingcontext.BindingContext{bc}, }). WithQueuedAt(time.Now()) mainQueue.AddLast(newTask) diff --git a/pkg/shell-operator/operator_test.go b/pkg/shell-operator/operator_test.go index 5e8ee610..ea94cb7c 100644 --- a/pkg/shell-operator/operator_test.go +++ b/pkg/shell-operator/operator_test.go @@ -8,7 +8,7 @@ import ( . "github.com/onsi/gomega" . "github.com/flant/shell-operator/pkg/hook/task_metadata" - . "github.com/flant/shell-operator/pkg/hook/types" + htypes "github.com/flant/shell-operator/pkg/hook/types" "github.com/flant/shell-operator/pkg/task" utils "github.com/flant/shell-operator/pkg/utils/file" ) @@ -30,16 +30,16 @@ func Test_Operator_startup_tasks(t *testing.T) { expectTasks := []struct { taskType task.TaskType - bindingType BindingType + bindingType htypes.BindingType hookPrefix string }{ // OnStartup in specified order. // onStartup: 1 - {HookRun, OnStartup, "hook02"}, + {HookRun, htypes.OnStartup, "hook02"}, // onStartup: 10 - {HookRun, OnStartup, "hook03"}, + {HookRun, htypes.OnStartup, "hook03"}, // onStartup: 20 - {HookRun, OnStartup, "hook01"}, + {HookRun, htypes.OnStartup, "hook01"}, // EnableKubernetes and EnableSchedule in alphabet order. {EnableKubernetesBindings, "", "hook01"}, {EnableScheduleBindings, "", "hook02"}, diff --git a/pkg/task/queue/queue_set.go b/pkg/task/queue/queue_set.go index 5c876f25..00113eea 100644 --- a/pkg/task/queue/queue_set.go +++ b/pkg/task/queue/queue_set.go @@ -5,7 +5,7 @@ import ( "sync" "time" - "github.com/flant/shell-operator/pkg/metric_storage" + metricstorage "github.com/flant/shell-operator/pkg/metric_storage" "github.com/flant/shell-operator/pkg/task" ) @@ -15,7 +15,7 @@ const MainQueueName = "main" type TaskQueueSet struct { MainName string - metricStorage *metric_storage.MetricStorage + metricStorage *metricstorage.MetricStorage ctx context.Context cancel context.CancelFunc @@ -40,7 +40,7 @@ func (tqs *TaskQueueSet) WithContext(ctx context.Context) { tqs.ctx, tqs.cancel = context.WithCancel(ctx) } -func (tqs *TaskQueueSet) WithMetricStorage(mstor *metric_storage.MetricStorage) { +func (tqs *TaskQueueSet) WithMetricStorage(mstor *metricstorage.MetricStorage) { tqs.metricStorage = mstor } diff --git a/pkg/task/queue/task_queue.go b/pkg/task/queue/task_queue.go index 32019550..d153c0b2 100644 --- a/pkg/task/queue/task_queue.go +++ b/pkg/task/queue/task_queue.go @@ -10,7 +10,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" - "github.com/flant/shell-operator/pkg/metric_storage" + metricstorage "github.com/flant/shell-operator/pkg/metric_storage" "github.com/flant/shell-operator/pkg/task" "github.com/flant/shell-operator/pkg/utils/exponential_backoff" "github.com/flant/shell-operator/pkg/utils/measure" @@ -56,7 +56,7 @@ type TaskResult struct { type TaskQueue struct { m sync.RWMutex - metricStorage *metric_storage.MetricStorage + metricStorage *metricstorage.MetricStorage ctx context.Context cancel context.CancelFunc @@ -101,7 +101,7 @@ func (q *TaskQueue) WithContext(ctx context.Context) { q.ctx, q.cancel = context.WithCancel(ctx) } -func (q *TaskQueue) WithMetricStorage(mstor *metric_storage.MetricStorage) { +func (q *TaskQueue) WithMetricStorage(mstor *metricstorage.MetricStorage) { q.metricStorage = mstor } diff --git a/test/hook/context/context_combiner.go b/test/hook/context/context_combiner.go index 62b30f25..3c240b41 100644 --- a/test/hook/context/context_combiner.go +++ b/test/hook/context/context_combiner.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - "github.com/flant/shell-operator/pkg/hook/binding_context" + bindingcontext "github.com/flant/shell-operator/pkg/hook/binding_context" "github.com/flant/shell-operator/pkg/hook/controller" "github.com/flant/shell-operator/pkg/hook/task_metadata" "github.com/flant/shell-operator/pkg/hook/types" @@ -54,10 +54,10 @@ func (c *ContextCombiner) AddBindingContext(bindingType types.BindingType, info // CombinedContext returns a combined context or a binding context // from the first task. -func (c *ContextCombiner) Combined() []binding_context.BindingContext { +func (c *ContextCombiner) Combined() []bindingcontext.BindingContext { firstTask := c.q.GetFirst() if firstTask == nil { - return []binding_context.BindingContext{} + return []bindingcontext.BindingContext{} } taskMeta := firstTask.GetMetadata() @@ -81,11 +81,11 @@ func (c *ContextCombiner) QueueLen() int { return c.q.Length() } -func ConvertToGeneratedBindingContexts(bindingContexts []binding_context.BindingContext) (GeneratedBindingContexts, error) { +func ConvertToGeneratedBindingContexts(bindingContexts []bindingcontext.BindingContext) (GeneratedBindingContexts, error) { res := GeneratedBindingContexts{} // Support only v1 binding contexts. - bcList := binding_context.ConvertBindingContextList("v1", bindingContexts) + bcList := bindingcontext.ConvertBindingContextList("v1", bindingContexts) data, err := bcList.Json() if err != nil { return res, fmt.Errorf("marshaling binding context error: %v", err) diff --git a/test/hook/context/generator.go b/test/hook/context/generator.go index a786237e..4fe30dd5 100644 --- a/test/hook/context/generator.go +++ b/test/hook/context/generator.go @@ -9,8 +9,9 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/flant/kube-client/fake" + "github.com/flant/shell-operator/pkg/app" "github.com/flant/shell-operator/pkg/hook" - . "github.com/flant/shell-operator/pkg/hook/binding_context" + bctx "github.com/flant/shell-operator/pkg/hook/binding_context" "github.com/flant/shell-operator/pkg/hook/controller" "github.com/flant/shell-operator/pkg/hook/types" kubeeventsmanager "github.com/flant/shell-operator/pkg/kube_events_manager" @@ -23,7 +24,7 @@ func init() { type GeneratedBindingContexts struct { Rendered string - BindingContexts []BindingContext + BindingContexts []bctx.BindingContext } type BindingContextController struct { @@ -102,7 +103,7 @@ func (b *BindingContextController) Run(initialState string) (GeneratedBindingCon } if b.Hook == nil { - testHook := hook.NewHook("test", "test", b.logger.Named("hook")) + testHook := hook.NewHook("test", "test", app.DebugKeepTmpFiles, app.LogProxyHookJSON, app.ProxyJsonLogKey, b.logger.Named("hook")) testHook, err = testHook.LoadConfig([]byte(b.HookConfig)) if err != nil { return GeneratedBindingContexts{}, fmt.Errorf("couldn't load or validate hook configuration: %v", err) @@ -181,14 +182,14 @@ func (b *BindingContextController) RunBindingWithAllSnapshots(binding types.Bind b.mu.Lock() defer b.mu.Unlock() - bc := BindingContext{ + bc := bctx.BindingContext{ Binding: string(binding), Snapshots: b.HookCtrl.KubernetesSnapshots(), } bc.Metadata.BindingType = binding bc.Metadata.IncludeAllSnapshots = true - return ConvertToGeneratedBindingContexts([]BindingContext{bc}) + return ConvertToGeneratedBindingContexts([]bctx.BindingContext{bc}) } func (b *BindingContextController) Stop() { diff --git a/test/hook/context/generator_test.go b/test/hook/context/generator_test.go index 09028938..88a9c52a 100644 --- a/test/hook/context/generator_test.go +++ b/test/hook/context/generator_test.go @@ -7,11 +7,11 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" . "github.com/onsi/gomega" - . "github.com/flant/shell-operator/pkg/hook/binding_context" + bctx "github.com/flant/shell-operator/pkg/hook/binding_context" ) -func parseContexts(contexts string) []BindingContext { - var parsedBindingContexts []BindingContext +func parseContexts(contexts string) []bctx.BindingContext { + var parsedBindingContexts []bctx.BindingContext _ = json.Unmarshal([]byte(contexts), &parsedBindingContexts) return parsedBindingContexts } diff --git a/test/integration/kube_event_manager/kube_event_manager_test.go b/test/integration/kube_event_manager/kube_event_manager_test.go index 51d1ecdd..b2813d44 100644 --- a/test/integration/kube_event_manager/kube_event_manager_test.go +++ b/test/integration/kube_event_manager/kube_event_manager_test.go @@ -13,8 +13,8 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/flant/shell-operator/pkg/app" - "github.com/flant/shell-operator/pkg/kube_events_manager" - . "github.com/flant/shell-operator/pkg/kube_events_manager/types" + kubeeventsmanager "github.com/flant/shell-operator/pkg/kube_events_manager" + kemtypes "github.com/flant/shell-operator/pkg/kube_events_manager/types" . "github.com/flant/shell-operator/test/integration/suite" testutils "github.com/flant/shell-operator/test/utils" ) @@ -24,25 +24,25 @@ func Test(t *testing.T) { } var _ = Describe("Binding 'kubernetes' with kind 'Pod' should emit KubeEvent objects", func() { - var KubeEventsManager kube_events_manager.KubeEventsManager + var KubeEventsManager kubeeventsmanager.KubeEventsManager BeforeEach(func() { - KubeEventsManager = kube_events_manager.NewKubeEventsManager(context.Background(), KubeClient, log.NewNop()) + KubeEventsManager = kubeeventsmanager.NewKubeEventsManager(context.Background(), KubeClient, log.NewNop()) }) Context("with configVersion: v1", func() { - var monitorConfig *kube_events_manager.MonitorConfig + var monitorConfig *kubeeventsmanager.MonitorConfig BeforeEach(func() { - monitorConfig = &kube_events_manager.MonitorConfig{ + monitorConfig = &kubeeventsmanager.MonitorConfig{ Kind: "Pod", ApiVersion: "v1", KeepFullObjectsInMemory: true, - EventTypes: []WatchEventType{ - WatchEventAdded, + EventTypes: []kemtypes.WatchEventType{ + kemtypes.WatchEventAdded, }, - NamespaceSelector: &NamespaceSelector{ - NameSelector: &NameSelector{ + NamespaceSelector: &kemtypes.NamespaceSelector{ + NameSelector: &kemtypes.NameSelector{ MatchNames: []string{"default"}, }, }, diff --git a/test/integration/kubeclient/object_patch_test.go b/test/integration/kubeclient/object_patch_test.go index 22182317..95af445f 100644 --- a/test/integration/kubeclient/object_patch_test.go +++ b/test/integration/kubeclient/object_patch_test.go @@ -7,6 +7,8 @@ import ( "context" "encoding/json" + objectpatch "github.com/flant/shell-operator/pkg/kube/object_patch" + . "github.com/flant/shell-operator/test/integration/suite" uuid "github.com/gofrs/uuid/v5" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -16,9 +18,6 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/yaml" - - "github.com/flant/shell-operator/pkg/kube/object_patch" - . "github.com/flant/shell-operator/test/integration/suite" ) const ( @@ -65,7 +64,7 @@ var _ = Describe("Kubernetes API object patching", func() { }) It("should fail to Create() an object if it already exists", func() { - err := ObjectPatcher.ExecuteOperation(object_patch.NewCreateOperation(unstructuredCM)) + err := ObjectPatcher.ExecuteOperation(objectpatch.NewCreateOperation(unstructuredCM)) Expect(err).To(Not(Succeed())) }) @@ -79,7 +78,7 @@ var _ = Describe("Kubernetes API object patching", func() { unstructuredNewTestCM, err := generateUnstructured(newTestCM) Expect(err).To(Succeed()) - err = ObjectPatcher.ExecuteOperation(object_patch.NewCreateOperation(unstructuredNewTestCM, object_patch.UpdateIfExists())) + err = ObjectPatcher.ExecuteOperation(objectpatch.NewCreateOperation(unstructuredNewTestCM, objectpatch.UpdateIfExists())) Expect(err).To(Succeed()) cm, err := KubeClient.CoreV1().ConfigMaps(testCM.Namespace).Get(context.TODO(), newTestCM.Name, metav1.GetOptions{}) @@ -94,7 +93,7 @@ var _ = Describe("Kubernetes API object patching", func() { unstructuredSeparateTestCM, err := generateUnstructured(separateTestCM) Expect(err).To(Succeed()) - err = ObjectPatcher.ExecuteOperation(object_patch.NewCreateOperation(unstructuredSeparateTestCM, object_patch.UpdateIfExists())) + err = ObjectPatcher.ExecuteOperation(objectpatch.NewCreateOperation(unstructuredSeparateTestCM, objectpatch.UpdateIfExists())) Expect(err).To(Succeed()) _, err = KubeClient.CoreV1().ConfigMaps(testCM.Namespace).Get(context.TODO(), separateTestCM.Name, metav1.GetOptions{}) @@ -118,9 +117,9 @@ var _ = Describe("Kubernetes API object patching", func() { }) It("should successfully delete an object", func() { - err := ObjectPatcher.ExecuteOperation(object_patch.NewDeleteOperation( + err := ObjectPatcher.ExecuteOperation(objectpatch.NewDeleteOperation( testCM.APIVersion, testCM.Kind, testCM.Namespace, testCM.Name, - object_patch.InBackground())) + objectpatch.InBackground())) Expect(err).Should(Succeed()) _, err = KubeClient.CoreV1().ConfigMaps(testCM.Namespace).Get(context.TODO(), testCM.Name, metav1.GetOptions{}) @@ -128,7 +127,7 @@ var _ = Describe("Kubernetes API object patching", func() { }) It("should successfully delete an object if it doesn't exist", func() { - err := ObjectPatcher.ExecuteOperation(object_patch.NewDeleteOperation(testCM.APIVersion, testCM.Kind, testCM.Namespace, testCM.Name)) + err := ObjectPatcher.ExecuteOperation(objectpatch.NewDeleteOperation(testCM.APIVersion, testCM.Kind, testCM.Namespace, testCM.Name)) Expect(err).Should(Succeed()) }) }) @@ -149,8 +148,8 @@ var _ = Describe("Kubernetes API object patching", func() { }) It("should successfully JQPatch an object", func() { - err := ObjectPatcher.ExecuteOperation(object_patch.NewFromOperationSpec(object_patch.OperationSpec{ - Operation: object_patch.JQPatch, + err := ObjectPatcher.ExecuteOperation(objectpatch.NewFromOperationSpec(objectpatch.OperationSpec{ + Operation: objectpatch.JQPatch, ApiVersion: testCM.APIVersion, Kind: testCM.Kind, Namespace: testCM.Namespace, @@ -176,7 +175,7 @@ data: mergePatchJson, err := json.Marshal(mergePatch) Expect(err).To(Succeed()) - err = ObjectPatcher.ExecuteOperation(object_patch.NewMergePatchOperation(mergePatchJson, testCM.APIVersion, testCM.Kind, testCM.Namespace, testCM.Name)) + err = ObjectPatcher.ExecuteOperation(objectpatch.NewMergePatchOperation(mergePatchJson, testCM.APIVersion, testCM.Kind, testCM.Namespace, testCM.Name)) Expect(err).To(Succeed()) existingCM, err := KubeClient.CoreV1().ConfigMaps(testCM.Namespace).Get(context.TODO(), testCM.Name, metav1.GetOptions{}) @@ -185,7 +184,7 @@ data: }) It("should successfully JSONPatch an object", func() { - err := ObjectPatcher.ExecuteOperation(object_patch.NewJSONPatchOperation( + err := ObjectPatcher.ExecuteOperation(objectpatch.NewJSONPatchOperation( []byte(`[{ "op": "replace", "path": "/data/firstField", "value": "jsonPatched"}]`), testCM.APIVersion, testCM.Kind, testCM.Namespace, testCM.Name)) Expect(err).To(Succeed()) @@ -205,7 +204,7 @@ func ensureNamespace(name string) error { panic(err) } - return ObjectPatcher.ExecuteOperation(object_patch.NewCreateOperation(unstructuredNS, object_patch.UpdateIfExists())) + return ObjectPatcher.ExecuteOperation(objectpatch.NewCreateOperation(unstructuredNS, objectpatch.UpdateIfExists())) } func ensureTestObject(_ string, obj interface{}) error { @@ -214,11 +213,11 @@ func ensureTestObject(_ string, obj interface{}) error { panic(err) } - return ObjectPatcher.ExecuteOperation(object_patch.NewCreateOperation(unstructuredObj, object_patch.UpdateIfExists())) + return ObjectPatcher.ExecuteOperation(objectpatch.NewCreateOperation(unstructuredObj, objectpatch.UpdateIfExists())) } func removeNamespace(name string) error { - return ObjectPatcher.ExecuteOperation(object_patch.NewDeleteOperation("", "Namespace", "", name)) + return ObjectPatcher.ExecuteOperation(objectpatch.NewDeleteOperation("", "Namespace", "", name)) } func generateUnstructured(obj interface{}) (*unstructured.Unstructured, error) { diff --git a/test/integration/suite/run.go b/test/integration/suite/run.go index f6fb6dc6..9ede565f 100644 --- a/test/integration/suite/run.go +++ b/test/integration/suite/run.go @@ -7,19 +7,18 @@ import ( "os" "testing" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/deckhouse/deckhouse/pkg/log" klient "github.com/flant/kube-client/client" - "github.com/flant/shell-operator/pkg/kube/object_patch" + objectpatch "github.com/flant/shell-operator/pkg/kube/object_patch" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" ) var ( ClusterName string ContextName string KubeClient *klient.Client - ObjectPatcher *object_patch.ObjectPatcher + ObjectPatcher *objectpatch.ObjectPatcher ) func RunIntegrationSuite(t *testing.T, description string, clusterPrefix string) { @@ -37,5 +36,5 @@ var _ = BeforeSuite(func() { err := KubeClient.Init() Expect(err).ShouldNot(HaveOccurred()) - ObjectPatcher = object_patch.NewObjectPatcher(KubeClient, log.NewNop()) + ObjectPatcher = objectpatch.NewObjectPatcher(KubeClient, log.NewNop()) })