diff --git a/.github/workflows/pull-request.yml b/.github/workflows/pull-request.yml index de77519..13a5793 100644 --- a/.github/workflows/pull-request.yml +++ b/.github/workflows/pull-request.yml @@ -53,19 +53,18 @@ jobs: steps: - uses: actions/setup-go@v4 - uses: actions/checkout@v3 - - run: go test ./... -v -failfast + - run: go test ./... -race -failfast go-test-coverage: runs-on: ubuntu-latest steps: - uses: actions/setup-go@v4 - uses: actions/checkout@v3 - - run: go test ./... -race -covermode=atomic -coverprofile=coverage.out - # https://about.codecov.io/blog/getting-started-with-code-coverage-for-golang/ + - run: go test ./... -race -covermode=atomic -coverprofile=coverprofile + - run: cat coverprofile | grep -v yaccpar > coverage.out - uses: codecov/codecov-action@v3 - # https://github.com/mattn/goveralls#github-actions - run: go install github.com/mattn/goveralls@latest - - run: goveralls -service=github -coverprofile=coverage.out + - run: goveralls -coverprofile=coverage.out -service=github env: COVERALLS_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/Dockerfile b/Dockerfile index 74d25f3..ffefe2f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,16 +1,14 @@ -FROM golang:1.19-alpine AS base +FROM golang:1.20-alpine AS base ARG VERSION WORKDIR /temp/ COPY . ./ RUN go mod download -x RUN go build -ldflags="-X 'main.Version=$VERSION'" -o /app/bin/lethe RUN cp -a ./etc /app/etc -RUN cd cli && go build -o /app/bin/lethetool -FROM alpine:3.15 +FROM alpine:3.18 COPY --from=base /app /app -RUN set -x \ -&& apk add --no-cache coreutils util-linux +RUN set -x && apk add --no-cache coreutils util-linux curl grep -WORKDIR /app +WORKDIR /app ENTRYPOINT ["/app/bin/lethe"] diff --git a/Makefile b/Makefile index c982502..2c3c386 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -VERSION := v0.2.0-beta.1 +VERSION := v0.2.0 IMAGE := ghcr.io/kuoss/lethe:$(VERSION) install-dev: diff --git a/cli/cmd/list/list.go b/cli/cmd/list/list.go deleted file mode 100644 index bf56a82..0000000 --- a/cli/cmd/list/list.go +++ /dev/null @@ -1,22 +0,0 @@ -package list - -import ( - "github.com/spf13/cobra" -) - -var listCmd = &cobra.Command{ - Use: "list", - Short: "List log files or dirs", -} - -func Init(parentCmd *cobra.Command) { - initListDirs() - initListFiles() - initListTargets() - parentCmd.AddCommand(listCmd) -} - -// for test -func GetListCmd() *cobra.Command { - return listCmd -} diff --git a/cli/cmd/list/list_dirs.go b/cli/cmd/list/list_dirs.go deleted file mode 100644 index db978f4..0000000 --- a/cli/cmd/list/list_dirs.go +++ /dev/null @@ -1,62 +0,0 @@ -package list - -import ( - "bytes" - "fmt" - - "github.com/kuoss/lethe/logs/rotator" - - cliutil "github.com/kuoss/lethe/cli/util" - "github.com/spf13/cobra" -) - -func initListDirs() { - listCmd.AddCommand(&cobra.Command{ - Use: "dirs", - Short: "List log dirs", - Run: func(cmd *cobra.Command, args []string) { - listDirs(cmd) - }, - }) -} - -func listDirs(c *cobra.Command) { - dirs := rotator.NewRotator().ListDirsWithSize() - - var data [][]string - var totalSize int64 - totalFileCount := 0 - for _, dir := range dirs { - totalSize += dir.Size - totalFileCount += dir.FileCount - firstFile := dir.FirstFile - lastFile := dir.LastFile - if firstFile == "" { - firstFile = "-" - } - if lastFile == "" { - lastFile = "-" - } - data = append(data, []string{ - dir.FullPath, - fmt.Sprintf("%.1f", float64(dir.Size)/1024/1024), - fmt.Sprintf("%d", dir.FileCount), - firstFile, - lastFile, - }) - } - data = append(data, []string{ - "TOTAL", - fmt.Sprintf("%.1f", float64(totalSize)/1024/1024), - fmt.Sprintf("%d", totalFileCount), - "-", - "-", - }) - - buf := &bytes.Buffer{} - table := cliutil.NewTableWriter(buf) - table.SetHeader([]string{"DIR", "SIZE(Mi)", "FILES", "FIRST FILE", "LAST FILE"}) - table.AppendBulk(data) - table.Render() - c.Print(buf.String()) -} diff --git a/cli/cmd/list/list_files.go b/cli/cmd/list/list_files.go deleted file mode 100644 index 21e3003..0000000 --- a/cli/cmd/list/list_files.go +++ /dev/null @@ -1,47 +0,0 @@ -package list - -import ( - "bytes" - "fmt" - - "github.com/kuoss/lethe/logs/rotator" - - cliutil "github.com/kuoss/lethe/cli/util" - "github.com/spf13/cobra" -) - -func initListFiles() { - listCmd.AddCommand(&cobra.Command{ - Use: "files", - Short: "List log files", - RunE: func(cmd *cobra.Command, args []string) error { - return listFiles(cmd) - }, - }) -} - -func listFiles(c *cobra.Command) error { - files, err := rotator.NewRotator().ListFiles() - if err != nil { - return fmt.Errorf("error on ListFiles: %w", err) - } - - var data [][]string - var totalSize int64 - for _, file := range files { - totalSize += file.Size - data = append(data, []string{ - file.FullPath, - fmt.Sprintf("%.1f", float64(file.Size)/1024/1024), - }) - } - data = append(data, []string{"TOTAL", fmt.Sprintf("%.1f", float64(totalSize)/1024/1024)}) - - buf := &bytes.Buffer{} - table := cliutil.NewTableWriter(buf) - table.SetHeader([]string{"FILEPATH", "SIZE(Mi)"}) - table.AppendBulk(data) - table.Render() - c.Print(buf.String()) - return nil -} diff --git a/cli/cmd/list/list_targets.go b/cli/cmd/list/list_targets.go deleted file mode 100644 index 303a5be..0000000 --- a/cli/cmd/list/list_targets.go +++ /dev/null @@ -1,82 +0,0 @@ -package list - -import ( - "bytes" - "fmt" - "regexp" - "time" - - "github.com/kuoss/lethe/logs/rotator" - - cliutil "github.com/kuoss/lethe/cli/util" - "github.com/spf13/cobra" -) - -func initListTargets() { - listCmd.AddCommand(&cobra.Command{ - Use: "targets", - Short: "List targets", - Run: func(cmd *cobra.Command, args []string) { - listTargets(cmd) - }, - }) -} - -func listTargets(c *cobra.Command) { - now := time.Now().UTC() - dirs := rotator.NewRotator().ListTargets() - - var data [][]string - var totalSize int64 - totalFileCount := 0 - for _, dir := range dirs { - totalSize += dir.Size - totalFileCount += dir.FileCount - firstFile := dir.FirstFile - lastFile := dir.LastFile - lastForward := dir.LastForward - if firstFile == "" { - firstFile = "-" - } - if lastFile == "" { - lastFile = "-" - } - if lastForward == "" || len(lastForward) != 20 { - lastForward = "-" - } else { - // fmt.Println(lastForward) - dt, err := time.Parse("2006-01-02T15:04:05Z", lastForward) - if err != nil { - lastForward = "-" - } else { - age := now.Sub(dt).Round(time.Second) - re := regexp.MustCompile(`[0-9]+[a-z]`) - match := re.FindStringSubmatch(age.String()) - lastForward = fmt.Sprintf("%s (%v)", dir.LastForward, match[0]) - } - } - data = append(data, []string{ - dir.FullPath, - fmt.Sprintf("%.1f", float64(dir.Size)/1024/1024), - fmt.Sprintf("%d", dir.FileCount), - firstFile, - lastFile, - lastForward, - }) - } - data = append(data, []string{ - "TOTAL", - fmt.Sprintf("%.1f", float64(totalSize)/1024/1024), - fmt.Sprintf("%d", totalFileCount), - "-", - "-", - "-", - }) - - buf := &bytes.Buffer{} - table := cliutil.NewTableWriter(buf) - table.SetHeader([]string{"DIR", "SIZE(Mi)", "FILES", "FIRST FILE", "LAST FILE", "LAST FORWARD"}) - table.AppendBulk(data) - table.Render() - c.Print(buf.String()) -} diff --git a/cli/cmd/logs.go b/cli/cmd/logs.go deleted file mode 100644 index 4819aba..0000000 --- a/cli/cmd/logs.go +++ /dev/null @@ -1,44 +0,0 @@ -package cmd - -import ( - "fmt" - - "github.com/kuoss/lethe/letheql" - "github.com/spf13/cobra" -) - -var logsCmd = &cobra.Command{ - Use: "logs", - Short: "Print LetheQL execution result", - Run: func(cmd *cobra.Command, args []string) { - Query(cmd) - }, -} - -func init() { - var query string - logsCmd.Flags().StringVarP(&query, "query", "q", "", "letheql") - rootCmd.AddCommand(logsCmd) -} - -func Query(cmd *cobra.Command) { - query, err := cmd.Flags().GetString("query") - fmt.Println("=== query=", query) - if err != nil { - cmd.PrintErr(err) - return - } - - if query == "" { - cmd.PrintErr("error: logs command needs an flag: --query\n") - return - } - data, err := letheql.ProcQuery(query, letheql.TimeRange{}) - if err != nil { - cmd.PrintErr(err) - return - } - for _, log := range data.Logs { - cmd.Println(log) - } -} diff --git a/cli/cmd/root.go b/cli/cmd/root.go deleted file mode 100644 index cd7091e..0000000 --- a/cli/cmd/root.go +++ /dev/null @@ -1,28 +0,0 @@ -package cmd - -import ( - "github.com/kuoss/lethe/cli/cmd/list" - "github.com/kuoss/lethe/cli/cmd/task" - "github.com/spf13/cobra" -) - -var ( - rootCmd = &cobra.Command{ - Use: "lethetool", - Short: "Tooling for the Lethe logging system.", - } -) - -func Execute() error { - return rootCmd.Execute() -} - -func init() { - list.Init(rootCmd) - task.Init(rootCmd) -} - -// for test -func GetRootCmd() *cobra.Command { - return rootCmd -} diff --git a/cli/cmd/task/task.go b/cli/cmd/task/task.go deleted file mode 100644 index a61a034..0000000 --- a/cli/cmd/task/task.go +++ /dev/null @@ -1,16 +0,0 @@ -package task - -import ( - "github.com/spf13/cobra" -) - -var taskCmd = &cobra.Command{ - Use: "task", - Short: "Run task", -} - -func Init(parentCmd *cobra.Command) { - initDeleteByAge() - initDeleteBySize() - parentCmd.AddCommand(taskCmd) -} diff --git a/cli/cmd/task/task_deleteByAge.go b/cli/cmd/task/task_deleteByAge.go deleted file mode 100644 index 08e1aed..0000000 --- a/cli/cmd/task/task_deleteByAge.go +++ /dev/null @@ -1,26 +0,0 @@ -package task - -import ( - "github.com/kuoss/common/logger" - "github.com/kuoss/lethe/logs/rotator" - "github.com/spf13/cobra" -) - -var deleteByAgeCmd = &cobra.Command{ - Use: "delete-by-age", - Short: "Delete log files by age", - Run: func(cmd *cobra.Command, args []string) { - DeleteByAge(cmd) - }, -} - -func initDeleteByAge() { - taskCmd.AddCommand(deleteByAgeCmd) -} - -func DeleteByAge(cmd *cobra.Command) { - err := rotator.NewRotator().DeleteByAge() - if err != nil { - logger.Errorf("error on DeleteByAge: %s", err) - } -} diff --git a/cli/cmd/task/task_deleteBySize.go b/cli/cmd/task/task_deleteBySize.go deleted file mode 100644 index 3a0a446..0000000 --- a/cli/cmd/task/task_deleteBySize.go +++ /dev/null @@ -1,26 +0,0 @@ -package task - -import ( - "github.com/kuoss/common/logger" - "github.com/kuoss/lethe/logs/rotator" - "github.com/spf13/cobra" -) - -var deleteBySizeCmd = &cobra.Command{ - Use: "delete-by-size", - Short: "Delete log files by size", - Run: func(cmd *cobra.Command, args []string) { - DeleteBySize(cmd) - }, -} - -func initDeleteBySize() { - taskCmd.AddCommand(deleteBySizeCmd) -} - -func DeleteBySize(cmd *cobra.Command) { - err := rotator.NewRotator().DeleteBySize() - if err != nil { - logger.Errorf("error on DeleteByAge: %s", err) - } -} diff --git a/cli/cmd/version.go b/cli/cmd/version.go deleted file mode 100644 index 8db9367..0000000 --- a/cli/cmd/version.go +++ /dev/null @@ -1,17 +0,0 @@ -package cmd - -import ( - "github.com/spf13/cobra" -) - -var versionCmd = &cobra.Command{ - Use: "version", - Short: "Print the version number of lethetool", - Run: func(cmd *cobra.Command, args []string) { - cmd.Println("lethetool v0.0.1") - }, -} - -func init() { - rootCmd.AddCommand(versionCmd) -} diff --git a/cli/init_test.go b/cli/init_test.go deleted file mode 100644 index 4a05fa6..0000000 --- a/cli/init_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package main - -import ( - "bytes" - "strings" - - "github.com/kuoss/lethe/cli/cmd" - cliutil "github.com/kuoss/lethe/cli/util" - "github.com/kuoss/lethe/config" - testutil "github.com/kuoss/lethe/testutil" -) - -//var rotator *rotator2.Rotator - -func init() { - testutil.Init() - testutil.SetTestLogFiles() - - config.SetWriter(cliutil.GetWriter()) - // time.Sleep(500 * time.Millisecond) -} - -func execute(args ...string) string { - buf := new(bytes.Buffer) - cmd := cmd.GetRootCmd() - cmd.SetOut(buf) - cmd.SetErr(buf) - cmd.SetArgs(args) - _ = cmd.Execute() - result := strings.TrimSpace(buf.String() + cliutil.GetString()) - // time.Sleep(500 * time.Millisecond) - return result -} diff --git a/cli/list_test.go b/cli/list_test.go deleted file mode 100644 index f89f2f4..0000000 --- a/cli/list_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package main - -import ( - "runtime" - "testing" - - "github.com/kuoss/lethe/testutil" - "github.com/stretchr/testify/assert" -) - -func Test_list_dirs(t *testing.T) { - testutil.SetTestLogFiles() - - actual := execute("list", "dirs") - - expected := "DIR SIZE(Mi) FILES FIRST FILE LAST FILE \ntmp/log/node/node01 0.0 2 2009-11-10_21.log 2009-11-10_22.log \ntmp/log/node/node02 0.0 2 2009-11-01_00.log 2009-11-10_21.log \ntmp/log/pod/namespace01 0.0 4 2000-01-01_00.log 2029-11-10_23.log \ntmp/log/pod/namespace02 0.0 2 0000-00-00_00.log 2009-11-10_22.log \nTOTAL 0.0 10 - -" - if runtime.GOOS == "windows" { - expected = "DIR SIZE(Mi) FILES FIRST FILE LAST FILE \ntmp\\log\\node\\node01 0.0 2 2009-11-10_21.log 2009-11-10_22.log \ntmp\\log\\node\\node02 0.0 2 2009-11-01_00.log 2009-11-10_21.log \ntmp\\log\\pod\\namespace01 0.0 4 2000-01-01_00.log 2029-11-10_23.log \ntmp\\log\\pod\\namespace02 0.0 2 0000-00-00_00.log 2009-11-10_22.log \nTOTAL 0.0 10 - -" - } - assert.Equal(t, expected, actual) -} - -func Test_list_files(t *testing.T) { - testutil.SetTestLogFiles() - - actual := execute("list", "files") - expected := "FILEPATH SIZE(Mi) \ntmp/log/node/node01/2009-11-10_21.log 0.0 \ntmp/log/node/node01/2009-11-10_22.log 0.0 \ntmp/log/node/node02/2009-11-01_00.log 0.0 \ntmp/log/node/node02/2009-11-10_21.log 0.0 \ntmp/log/pod/namespace01/2000-01-01_00.log 0.0 \ntmp/log/pod/namespace01/2009-11-10_21.log 0.0 \ntmp/log/pod/namespace01/2009-11-10_22.log 0.0 \ntmp/log/pod/namespace01/2029-11-10_23.log 0.0 \ntmp/log/pod/namespace02/0000-00-00_00.log 0.0 \ntmp/log/pod/namespace02/2009-11-10_22.log 0.0 \nTOTAL 0.0" - if runtime.GOOS == "windows" { - expected = "FILEPATH SIZE(Mi) \ntmp\\log\\node\\node01\\2009-11-10_21.log 0.0 \ntmp\\log\\node\\node01\\2009-11-10_22.log 0.0 \ntmp\\log\\node\\node02\\2009-11-01_00.log 0.0 \ntmp\\log\\node\\node02\\2009-11-10_21.log 0.0 \ntmp\\log\\pod\\namespace01\\2000-01-01_00.log 0.0 \ntmp\\log\\pod\\namespace01\\2009-11-10_21.log 0.0 \ntmp\\log\\pod\\namespace01\\2009-11-10_22.log 0.0 \ntmp\\log\\pod\\namespace01\\2029-11-10_23.log 0.0 \ntmp\\log\\pod\\namespace02\\0000-00-00_00.log 0.0 \ntmp\\log\\pod\\namespace02\\2009-11-10_22.log 0.0 \nTOTAL 0.0" - } - assert.Equal(t, expected, actual) -} diff --git a/cli/logs_test.go b/cli/logs_test.go deleted file mode 100644 index 709a753..0000000 --- a/cli/logs_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package main - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" - - // cliutil "github.com/kuoss/lethe/cli/util" - testutil "github.com/kuoss/lethe/testutil" -) - -func init() { - testutil.Init() - // testutil.SetTestLogFiles() - // rotator = logs.NewRotator() - // config.SetWriter(cliutil.GetWriter()) -} - -func Test_logs(t *testing.T) { - - tests := map[string]struct { - args []string - want string - }{ - "logs --query": { - args: []string{"logs", "--query"}, - want: "Error: flag needs an argument: --query\nUsage:\n lethetool logs [flags]\n\nFlags:\n -h, --help help for logs\n -q, --query string letheql", - }, - `logs --query pod{namespace=""}`: { - args: []string{"logs", "--query", `pod{namespace=""}`}, - want: "namespace value cannot be empty", - }, - `logs --query pod{namespace="ns-not-exists"}`: { - args: []string{"logs", "--query", `pod{namespace="ns-not-exists"}`}, - want: "", - }, - `logs --query pod{namespace="namespace01"}`: { - args: []string{"logs", "--query", `pod{namespace="namespace01"}`}, - want: "{ 2009-11-10T21:00:00Z namespace01 nginx-deployment-75675f5897-7ci7o nginx hello world}\n{ 2009-11-10T21:01:00Z namespace01 nginx-deployment-75675f5897-7ci7o nginx hello world}\n{ 2009-11-10T21:02:00Z namespace01 nginx-deployment-75675f5897-7ci7o nginx hello world}\n{ 2009-11-10T22:56:00Z namespace01 apache-75675f5897-7ci7o httpd hello from sidecar}\n{ 2009-11-10T22:56:00Z namespace01 apache-75675f5897-7ci7o httpd hello from sidecar}\n{ 2009-11-10T22:56:00Z namespace01 apache-75675f5897-7ci7o httpd hello from sidecar}\n{ 2009-11-10T22:57:00Z namespace01 apache-75675f5897-7ci7o httpd hello from sidecar}\n{ 2009-11-10T22:57:00Z namespace01 apache-75675f5897-7ci7o httpd hello from sidecar}\n{ 2009-11-10T22:57:00Z namespace01 apache-75675f5897-7ci7o httpd hello from sidecar}\n{ 2009-11-10T22:58:00Z namespace01 nginx-deployment-75675f5897-7ci7o sidecar hello from sidecar}\n{ 2009-11-10T22:58:00Z namespace01 nginx-deployment-75675f5897-7ci7o sidecar lerom from sidecar}\n{ 2009-11-10T22:58:00Z namespace01 nginx-deployment-75675f5897-7ci7o sidecar hello from sidecar}\n{ 2009-11-10T22:59:00Z namespace01 nginx-deployment-75675f5897-7ci7o nginx lerom ipsum}\n{ 2009-11-10T22:59:00Z namespace01 nginx-deployment-75675f5897-7ci7o nginx hello world}", - }, - `logs --query pod{namespace="namespace02"}`: { - args: []string{"logs", "--query", `pod{namespace="namespace02"}`}, - want: "{ 2009-11-10T22:58:00Z namespace02 nginx-deployment-75675f5897-7ci7o nginx hello world}\n{ 2009-11-10T22:58:00Z namespace02 nginx-deployment-75675f5897-7ci7o nginx lerom ipsum}\n{ 2009-11-10T22:58:00Z namespace02 nginx-deployment-75675f5897-7ci7o nginx hello world}\n{ 2009-11-10T22:58:00Z namespace02 nginx-deployment-75675f5897-7ci7o sidecar hello from sidecar}\n{ 2009-11-10T22:58:00Z namespace02 nginx-deployment-75675f5897-7ci7o sidecar lerom from sidecar}\n{ 2009-11-10T22:58:00Z namespace02 nginx-deployment-75675f5897-7ci7o sidecar hello from sidecar}\n{ 2009-11-10T22:58:00Z namespace02 apache-75675f5897-7ci7o httpd hello from sidecar}\n{ 2009-11-10T22:58:00Z namespace02 apache-75675f5897-7ci7o httpd hello from sidecar}\n{ 2009-11-10T22:58:00Z namespace02 apache-75675f5897-7ci7o httpd hello from sidecar}\n{ 2009-11-10T22:58:00Z namespace02 apache-75675f5897-7ci7o httpd hello from sidecar}\n{ 2009-11-10T22:58:00Z namespace02 apache-75675f5897-7ci7o httpd hello from sidecar}\n{ 2009-11-10T22:58:00Z namespace02 apache-75675f5897-7ci7o httpd hello from sidecar}", - }, - } - - for name, test := range tests { - t.Run(name, func(subt *testing.T) { - got := execute(test.args...) - assert.Equal(subt, test.want, got) - }) - } - time.Sleep(3000 * time.Millisecond) -} diff --git a/cli/main.go b/cli/main.go deleted file mode 100644 index bb02ecb..0000000 --- a/cli/main.go +++ /dev/null @@ -1,21 +0,0 @@ -package main - -import ( - "github.com/kuoss/common/logger" - "github.com/kuoss/lethe/cli/cmd" - cliutil "github.com/kuoss/lethe/cli/util" - "github.com/kuoss/lethe/config" -) - -func main() { - var err error - err = config.LoadConfig() - if err != nil { - logger.Fatalf("error on LoadConfig: %s", err) - } - config.SetWriter(cliutil.GetWriter()) - err = cmd.Execute() - if err != nil { - logger.Fatalf("error on Execute: %s", err) - } -} diff --git a/cli/main_test.go b/cli/main_test.go deleted file mode 100644 index 0d538fd..0000000 --- a/cli/main_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package main - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func Test_main(t *testing.T) { - main() -} - -func Test_version(t *testing.T) { - actual := execute("version") - expected := "lethetool v0.0.1" - assert.Equal(t, expected, actual) -} diff --git a/cli/task_test.go b/cli/task_test.go deleted file mode 100644 index f8ff682..0000000 --- a/cli/task_test.go +++ /dev/null @@ -1,114 +0,0 @@ -package main - -import ( - "testing" - - "github.com/kuoss/lethe/config" - "github.com/kuoss/lethe/testutil" - "github.com/stretchr/testify/assert" -) - -func Test_task_deleteByAge_10d(t *testing.T) { - testutil.Init() - testutil.SetTestLogFiles() - - config.Viper().Set("retention.time", "20d") - execute("task", "delete-by-age") - - assert.FileExists(t, "./tmp/log/node/node01/2009-11-10_21.log") - assert.FileExists(t, "./tmp/log/node/node01/2009-11-10_22.log") - - assert.FileExists(t, "./tmp/log/node/node02/2009-11-01_00.log") - assert.FileExists(t, "./tmp/log/node/node02/2009-11-10_21.log") - - // assert.NoFileExists(t, "./tmp/log/pod/namespace01/2000-01-01_00.log") - assert.FileExists(t, "./tmp/log/pod/namespace01/2009-11-10_21.log") - assert.FileExists(t, "./tmp/log/pod/namespace01/2009-11-10_22.log") - - // assert.NoFileExists(t, "./tmp/log/pod/namespace02/0000-00-00_00.log") - assert.NoFileExists(t, "./tmp/log/pod/namespace02/2009-11-10_21.log") -} - -func Test_task_deleteByAge_1d(t *testing.T) { - testutil.Init() - testutil.SetTestLogFiles() - - config.Viper().Set("retention.time", "2d") - execute("task", "delete-by-age") - - assert.FileExists(t, "./tmp/log/node/node01/2009-11-10_21.log") - assert.FileExists(t, "./tmp/log/node/node01/2009-11-10_22.log") - - // assert.NoFileExists(t, "./tmp/log/node/node02/2009-11-01_00.log") - assert.FileExists(t, "./tmp/log/node/node02/2009-11-10_21.log") - - // assert.NoFileExists(t, "./tmp/log/pod/namespace01/2000-01-01_00.log") - assert.FileExists(t, "./tmp/log/pod/namespace01/2009-11-10_21.log") - assert.FileExists(t, "./tmp/log/pod/namespace01/2009-11-10_22.log") - - // assert.NoFileExists(t, "./tmp/log/pod/namespace02/0000-00-00_00.log") - assert.NoFileExists(t, "./tmp/log/pod/namespace02/2009-11-10_21.log") -} - -func Test_task_deleteByAge_1h(t *testing.T) { - testutil.Init() - testutil.SetTestLogFiles() - - config.Viper().Set("retention.time", "1h") - execute("task", "delete-by-age") - - // assert.NoFileExists(t, "./tmp/log/node/node01/2009-11-10_21.log") - assert.FileExists(t, "./tmp/log/node/node01/2009-11-10_22.log") - - // assert.NoFileExists(t, "./tmp/log/node/node02/2009-11-01_00.log") - // assert.NoFileExists(t, "./tmp/log/node/node02/2009-11-10_21.log") - - // assert.NoFileExists(t, "./tmp/log/pod/namespace01/2000-01-01_00.log") - // assert.FileExists(t, "./tmp/log/pod/namespace01/2009-11-10_21.log") - assert.FileExists(t, "./tmp/log/pod/namespace01/2009-11-10_22.log") - - // assert.NoFileExists(t, "./tmp/log/pod/namespace02/0000-00-00_00.log") - assert.NoFileExists(t, "./tmp/log/pod/namespace02/2009-11-10_21.log") -} - -func Test_task_deleteBySize_1m(t *testing.T) { - testutil.Init() - testutil.SetTestLogFiles() - - config.Viper().Set("retention.size", "1m") - execute("task", "delete-by-size") - - assert.FileExists(t, "./tmp/log/node/node01/2009-11-10_21.log") - assert.FileExists(t, "./tmp/log/node/node01/2009-11-10_22.log") - - assert.FileExists(t, "./tmp/log/node/node02/2009-11-01_00.log") - assert.FileExists(t, "./tmp/log/node/node02/2009-11-10_21.log") - - assert.FileExists(t, "./tmp/log/pod/namespace01/2000-01-01_00.log") - assert.FileExists(t, "./tmp/log/pod/namespace01/2009-11-10_21.log") - assert.FileExists(t, "./tmp/log/pod/namespace01/2009-11-10_22.log") - - assert.FileExists(t, "./tmp/log/pod/namespace02/0000-00-00_00.log") - assert.NoFileExists(t, "./tmp/log/pod/namespace02/2009-11-10_21.log") -} - -func Test_task_deleteBySize_1k(t *testing.T) { - testutil.Init() - testutil.SetTestLogFiles() - - config.Viper().Set("retention.size", "1k") - execute("task", "delete-by-size") - - // assert.NoFileExists(t, "./tmp/log/node/node01/2009-11-10_21.log") - // assert.NoFileExists(t, "./tmp/log/node/node01/2009-11-10_22.log") - - // assert.NoFileExists(t, "./tmp/log/node/node02/2009-11-01_00.log") - // assert.NoFileExists(t, "./tmp/log/node/node02/2009-11-10_21.log") - - // assert.NoFileExists(t, "./tmp/log/pod/namespace01/2000-01-01_00.log") - // assert.FileExists(t, "./tmp/log/pod/namespace01/2009-11-10_21.log") - // assert.FileExists(t, "./tmp/log/pod/namespace01/2009-11-10_22.log") - - // assert.NoFileExists(t, "./tmp/log/pod/namespace02/0000-00-00_00.log") - assert.NoFileExists(t, "./tmp/log/pod/namespace02/2009-11-10_21.log") -} diff --git a/cli/util/table.go b/cli/util/table.go deleted file mode 100644 index bc219ce..0000000 --- a/cli/util/table.go +++ /dev/null @@ -1,20 +0,0 @@ -package util - -import ( - "bytes" - - "github.com/olekukonko/tablewriter" -) - -func NewTableWriter(buf *bytes.Buffer) *tablewriter.Table { - table := tablewriter.NewWriter(buf) - table.SetHeaderLine(false) - table.SetBorder(false) - table.SetNoWhiteSpace(true) - table.SetTablePadding("") - table.SetColumnSeparator("") - table.SetTablePadding(" ") - table.SetHeaderAlignment(tablewriter.ALIGN_LEFT) - table.SetAutoFormatHeaders(false) - return table -} diff --git a/cli/util/writer.go b/cli/util/writer.go deleted file mode 100644 index a220257..0000000 --- a/cli/util/writer.go +++ /dev/null @@ -1,18 +0,0 @@ -package util - -import "bytes" - -var writer = new(bytes.Buffer) - -func GetWriter() *bytes.Buffer { - return writer -} - -func Clean() { - writer.Truncate(0) -} - -func GetString() string { - defer Clean() - return writer.String() -} diff --git a/clock/clock.go b/clock/clock.go index 9d7cdaf..1bf0124 100644 --- a/clock/clock.go +++ b/clock/clock.go @@ -1,13 +1,21 @@ package clock import ( - "os" "time" ) -func GetNow() time.Time { - if os.Getenv("TEST_MODE") == "1" { - return time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC) +var ( + playgroundMode = false + playgroundTime = time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC) +) + +func Now() time.Time { + if playgroundMode { + return playgroundTime } return time.Now() } + +func SetPlaygroundMode(newPlaygroundMode bool) { + playgroundMode = newPlaygroundMode +} diff --git a/clock/clock_test.go b/clock/clock_test.go index dda5be0..bbb9dd4 100644 --- a/clock/clock_test.go +++ b/clock/clock_test.go @@ -1,29 +1,30 @@ package clock import ( - "os" "testing" "time" - "github.com/kuoss/lethe/testutil" + "github.com/stretchr/testify/assert" ) -var testTime time.Time = time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC) +var ( + playgroundTime_test = time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC) +) -func init() { - testutil.Init() -} +func TestNow(t *testing.T) { -func Test_GetNow_TestMode(t *testing.T) { - if GetNow() != testTime { - t.Fatalf("Now != testTime in test mode.") - } -} + justBefore := time.Now() + + // normal mode + assert.NotEqual(t, playgroundTime_test, Now()) + assert.Greater(t, Now(), justBefore) + + // playground mode + SetPlaygroundMode(true) + assert.Equal(t, playgroundTime_test, Now()) -func Test_GetNow_ProdMode(t *testing.T) { - os.Setenv("TEST_MODE", "0") - if GetNow() == testTime { - t.Fatalf("Now == testTime in prod mode.") - } - os.Setenv("TEST_MODE", "1") + // normal mode + SetPlaygroundMode(false) + assert.NotEqual(t, playgroundTime_test, Now()) + assert.Greater(t, Now(), justBefore) } diff --git a/codecov.yml b/codecov.yml index 959972a..8d5f303 100644 --- a/codecov.yml +++ b/codecov.yml @@ -1 +1 @@ -comment: false \ No newline at end of file +#comment: false \ No newline at end of file diff --git a/config/config.go b/config/config.go index ef9ea86..ddedb54 100644 --- a/config/config.go +++ b/config/config.go @@ -2,89 +2,126 @@ package config import ( "fmt" - "io" - "os" "path/filepath" + "time" "github.com/kuoss/common/logger" _ "github.com/kuoss/lethe/storage/driver/filesystem" + "github.com/kuoss/lethe/util" "github.com/spf13/viper" - "gopkg.in/yaml.v2" ) -var ( - vip *viper.Viper - logDataPath string - webListenAddress string - writer io.Writer = os.Stdout - limit int = 1000 -) - -func LoadConfig() error { - vip = viper.New() +type Config struct { + limit int + logDataPath string + retentionSize int + retentionTime time.Duration + retentionSizingStrategy string + timeout time.Duration + version string + webListenAddress string +} - vip.SetDefault("storage.log_data_path", "./tmp/log") - vip.SetDefault("web.listen_address", ":6060") +func New(version string) (*Config, error) { + v := viper.New() + v.SetConfigName("lethe") + v.SetConfigType("yaml") + v.AddConfigPath(filepath.Join(".", "etc")) + v.AddConfigPath(filepath.Join("..", "etc")) - vip.SetConfigName("lethe") - vip.SetConfigType("yaml") - vip.AddConfigPath(filepath.Join(".", "etc")) - vip.AddConfigPath(filepath.Join("..", "etc")) - err := vip.ReadInConfig() + err := v.ReadInConfig() if err != nil { - return fmt.Errorf("error on ReadInConfig: %w", err) + return &Config{}, fmt.Errorf("readInConfig err: %w", err) } - err = viper.Unmarshal(&vip) + err = viper.Unmarshal(&v) if err != nil { - return fmt.Errorf("error on Unmarshal: %w", err) + return &Config{}, fmt.Errorf("unmarshal err: %w", err) + } + + logDataPath := v.GetString("storage.log_data_path") + if logDataPath == "" { + logDataPath = "./tmp/log" } - SetLogDataPath(vip.GetString("storage.log_data_path")) - SetWebListenAddress(vip.GetString("web.listen_address")) - // show all settings in yaml format - yamlBytes, err := yaml.Marshal(vip.AllSettings()) + retentionSize, err := util.StringToBytes(v.GetString("retention.size")) if err != nil { - return fmt.Errorf("error on Marshal: %w", err) + return &Config{}, fmt.Errorf("stringToBytes err: %w", err) + } + retentionTimeString := v.GetString("retention.time") + if retentionTimeString == "" { + retentionTimeString = "15d" + } + retentionTime, err := util.GetDurationFromAge(retentionTimeString) + if err != nil { + return &Config{}, fmt.Errorf("getDurationFromAge err: %w", err) + } + + retentionSizingStrategy := v.GetString("retention.sizingStrategy") + if retentionSizingStrategy == "" { + retentionSizingStrategy = "file" + } + + timeout := 20 * time.Second + + webListenAddress := v.GetString("web.listen_address") + if webListenAddress == "" { + webListenAddress = ":6060" + } + + cfg := Config{ + limit: 1000, + logDataPath: logDataPath, + retentionSize: retentionSize, + retentionTime: retentionTime, + retentionSizingStrategy: retentionSizingStrategy, + timeout: timeout, + version: version, + webListenAddress: webListenAddress, } - logger.Infof("settings:\n====================================\n" + string(yamlBytes) + "====================================") - return nil -} -func Viper() *viper.Viper { - return vip + logger.Infof("====================================") + logger.Infof("%+v", cfg) + logger.Infof("====================================") + return &cfg, nil } -func SetWriter(w io.Writer) { - writer = w +func (c *Config) Limit() int { + return c.limit } -func GetWriter() io.Writer { - if writer == nil { - return os.Stdout - } - return writer +func (c *Config) LogDataPath() string { + return c.logDataPath +} +func (c *Config) SetLogDataPath(logDataPath string) { + c.logDataPath = logDataPath } -func GetLimit() int { - return limit +func (c *Config) RetentionSize() int { + return c.retentionSize +} +func (c *Config) SetRetentionSize(retentionSize int) { + c.retentionSize = retentionSize } -func SetLimit(newLimit int) { - limit = newLimit +func (c *Config) RetentionTime() time.Duration { + return c.retentionTime +} +func (c *Config) SetRetentionTime(retentionTime time.Duration) { + c.retentionTime = retentionTime } -func GetLogDataPath() string { - return logDataPath +func (c *Config) RetentionSizingStrategy() string { + return c.retentionSizingStrategy } -func SetLogDataPath(newLogDataPath string) { - logDataPath = newLogDataPath +func (c *Config) Timeout() time.Duration { + return c.timeout } -func GetWebListenAddress() string { - return webListenAddress +func (c *Config) Version() string { + return c.version } -func SetWebListenAddress(newWebListenAddress string) { - webListenAddress = newWebListenAddress +func (c *Config) WebListenAddress() string { + return c.webListenAddress } diff --git a/config/config_inner_test.go b/config/config_inner_test.go deleted file mode 100644 index 2fbfcb1..0000000 --- a/config/config_inner_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package config - -import ( - "bytes" - "os" - "testing" - - "github.com/stretchr/testify/assert" -) - -func init() { - err := LoadConfig() - if err != nil { - panic(err) - } -} - -func TestLoadConfig(t *testing.T) { - assert.NotNil(t, vip) - assert.NotZero(t, vip) -} - -func TestViper(t *testing.T) { - vip := Viper() - assert.NotNil(t, vip) - assert.NotZero(t, vip) -} - -func TestGetWriter(t *testing.T) { - assert.Equal(t, os.Stdout, GetWriter()) -} - -func TestSetWriter(t *testing.T) { - tempWriter := new(bytes.Buffer) - SetWriter(tempWriter) - assert.Equal(t, tempWriter, GetWriter()) - SetWriter(os.Stdout) -} - -func TestGetLimit(t *testing.T) { - assert.Equal(t, 1000, GetLimit()) -} - -func TestSetLimit(t *testing.T) { - SetLimit(2000) - assert.Equal(t, 2000, GetLimit()) - SetLimit(1000) -} - -func TestGetLogDataPath(t *testing.T) { - assert.Equal(t, "/data/log", GetLogDataPath()) -} - -func TestSetLogDataPath(t *testing.T) { - SetLogDataPath("hello") - assert.Equal(t, "hello", GetLogDataPath()) - SetLogDataPath("/data/log") -} diff --git a/config/config_outer_test.go b/config/config_outer_test.go deleted file mode 100644 index 6b3971f..0000000 --- a/config/config_outer_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package config_test - -import ( - "bytes" - "os" - "testing" - - "github.com/kuoss/lethe/config" - "github.com/stretchr/testify/assert" -) - -func init() { - err := config.LoadConfig() - if err != nil { - panic(err) - } -} - -func TestViper(t *testing.T) { - vip := config.Viper() - assert.NotNil(t, vip) - assert.NotZero(t, vip) -} - -func TestGetWriter(t *testing.T) { - assert.Equal(t, os.Stdout, config.GetWriter()) -} - -func TestSetWriter(t *testing.T) { - tempWriter := new(bytes.Buffer) - config.SetWriter(tempWriter) - assert.Equal(t, tempWriter, config.GetWriter()) - config.SetWriter(os.Stdout) -} - -func TestGetLimit(t *testing.T) { - assert.Equal(t, 1000, config.GetLimit()) -} - -func TestSetLimit(t *testing.T) { - config.SetLimit(2000) - assert.Equal(t, 2000, config.GetLimit()) - config.SetLimit(1000) -} - -func TestGetLogDataPath(t *testing.T) { - assert.Equal(t, "/data/log", config.GetLogDataPath()) -} - -func TestSetLogDataPath(t *testing.T) { - config.SetLogDataPath("hello") - assert.Equal(t, "hello", config.GetLogDataPath()) - config.SetLogDataPath("/data/log") -} diff --git a/config/config_test.go b/config/config_test.go new file mode 100644 index 0000000..2bdc330 --- /dev/null +++ b/config/config_test.go @@ -0,0 +1,60 @@ +package config + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +var ( + config1 *Config +) + +func init() { + var err error + config1, err = New("test") + if err != nil { + panic(err) + } +} + +func TestNew(t *testing.T) { + assert.NotEmpty(t, config1) +} + +func TestLimit(t *testing.T) { + assert.Equal(t, 1000, config1.Limit()) +} + +func TestLogDataPath(t *testing.T) { + assert.Equal(t, "/data/log", config1.LogDataPath()) + config1.SetLogDataPath("tmp/hello") + assert.Equal(t, "tmp/hello", config1.LogDataPath()) + config1.SetLogDataPath("/data/log") +} + +func TestRetentionSize(t *testing.T) { + assert.Equal(t, 100*1024*1024, config1.RetentionSize()) // 100 MiB + config1.SetRetentionSize(1 * 1024 * 1024) // 1 MiB + assert.Equal(t, 1*1024*1024, config1.RetentionSize()) // 1 MiB + config1.SetRetentionSize(100 * 1024 * 1024) // 100 MiB +} +func TestRetentionTime(t *testing.T) { + assert.Equal(t, 24*time.Hour, config1.RetentionTime()) // 1 day + config1.SetRetentionTime(15 * 24 * time.Hour) // 15 days + assert.Equal(t, 15*24*time.Hour, config1.RetentionTime()) // 15 days + config1.SetRetentionTime(24 * time.Hour) // 1 day +} + +func TestRetentionSizingStrategy(t *testing.T) { + assert.Equal(t, "file", config1.RetentionSizingStrategy()) +} + +func TestVersion(t *testing.T) { + assert.Equal(t, "test", config1.Version()) +} + +func TestWebListenAddress(t *testing.T) { + assert.Equal(t, ":6060", config1.WebListenAddress()) +} diff --git a/docs/go-licenses.csv b/docs/go-licenses.csv index d25730d..ea67e01 100644 --- a/docs/go-licenses.csv +++ b/docs/go-licenses.csv @@ -1,5 +1,3 @@ -github.com/VictoriaMetrics/metrics,https://github.com/VictoriaMetrics/metrics/blob/v1.18.1/LICENSE,MIT -github.com/VictoriaMetrics/metricsql,https://github.com/VictoriaMetrics/metricsql/blob/v0.43.0/LICENSE,Apache-2.0 github.com/beorn7/perks/quantile,https://github.com/beorn7/perks/blob/v1.0.1/LICENSE,MIT github.com/cespare/xxhash/v2,https://github.com/cespare/xxhash/blob/v2.2.0/LICENSE.txt,MIT github.com/davecgh/go-spew/spew,https://github.com/davecgh/go-spew/blob/v1.1.1/LICENSE,ISC @@ -13,7 +11,7 @@ github.com/go-playground/locales,https://github.com/go-playground/locales/blob/v github.com/go-playground/universal-translator,https://github.com/go-playground/universal-translator/blob/v0.18.0/LICENSE,MIT github.com/go-playground/validator/v10,https://github.com/go-playground/validator/blob/v10.11.0/LICENSE,MIT github.com/gogo/protobuf,https://github.com/gogo/protobuf/blob/v1.3.2/LICENSE,BSD-3-Clause -github.com/golang/protobuf,https://github.com/golang/protobuf/blob/v1.5.3/LICENSE,BSD-3-Clause +github.com/golang/protobuf/proto,https://github.com/golang/protobuf/blob/v1.5.3/LICENSE,BSD-3-Clause github.com/grafana/regexp,https://github.com/grafana/regexp/blob/6b5c0a4cb7fd/LICENSE,BSD-3-Clause github.com/hashicorp/hcl,https://github.com/hashicorp/hcl/blob/v1.0.0/LICENSE,MPL-2.0 github.com/kuoss/common/logger,https://github.com/kuoss/common/blob/v0.1.3/LICENSE,Apache-2.0 @@ -26,10 +24,10 @@ github.com/mitchellh/mapstructure,https://github.com/mitchellh/mapstructure/blob github.com/pelletier/go-toml,https://github.com/pelletier/go-toml/blob/v1.9.4/LICENSE,Apache-2.0 github.com/pkg/errors,https://github.com/pkg/errors/blob/v0.9.1/LICENSE,BSD-2-Clause github.com/pmezard/go-difflib/difflib,https://github.com/pmezard/go-difflib/blob/v1.0.0/LICENSE,BSD-3-Clause -github.com/prometheus/client_golang/prometheus,https://github.com/prometheus/client_golang/blob/v1.14.0/LICENSE,Apache-2.0 -github.com/prometheus/client_model/go,https://github.com/prometheus/client_model/blob/v0.3.0/LICENSE,Apache-2.0 -github.com/prometheus/common,https://github.com/prometheus/common/blob/v0.42.0/LICENSE,Apache-2.0 -github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg,https://github.com/prometheus/common/blob/v0.42.0/internal/bitbucket.org/ww/goautoneg/README.txt,BSD-3-Clause +github.com/prometheus/client_golang/prometheus,https://github.com/prometheus/client_golang/blob/v1.15.1/LICENSE,Apache-2.0 +github.com/prometheus/client_model/go,https://github.com/prometheus/client_model/blob/v0.4.0/LICENSE,Apache-2.0 +github.com/prometheus/common,https://github.com/prometheus/common/blob/v0.43.0/LICENSE,Apache-2.0 +github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg,https://github.com/prometheus/common/blob/v0.43.0/internal/bitbucket.org/ww/goautoneg/README.txt,BSD-3-Clause github.com/prometheus/procfs,https://github.com/prometheus/procfs/blob/v0.9.0/LICENSE,Apache-2.0 github.com/prometheus/prometheus,https://github.com/prometheus/prometheus/blob/v0.43.0/LICENSE,Apache-2.0 github.com/sirupsen/logrus,https://github.com/sirupsen/logrus/blob/v1.9.0/LICENSE,MIT @@ -40,17 +38,14 @@ github.com/spf13/pflag,https://github.com/spf13/pflag/blob/v1.0.5/LICENSE,BSD-3- github.com/spf13/viper,https://github.com/spf13/viper/blob/v1.11.0/LICENSE,MIT github.com/stretchr/testify,https://github.com/stretchr/testify/blob/v1.8.2/LICENSE,MIT github.com/subosito/gotenv,https://github.com/subosito/gotenv/blob/v1.2.0/LICENSE,MIT -github.com/thoas/go-funk,https://github.com/thoas/go-funk/blob/v0.9.2/LICENSE,MIT github.com/ugorji/go/codec,https://github.com/ugorji/go/blob/codec/v1.2.7/codec/LICENSE,MIT -github.com/valyala/fastrand,https://github.com/valyala/fastrand/blob/v1.1.0/LICENSE,MIT -github.com/valyala/histogram,https://github.com/valyala/histogram/blob/v1.2.0/LICENSE,MIT go.uber.org/atomic,https://github.com/uber-go/atomic/blob/v1.10.0/LICENSE.txt,MIT go.uber.org/goleak,https://github.com/uber-go/goleak/blob/v1.2.1/LICENSE,MIT golang.org/x/crypto/sha3,https://cs.opensource.google/go/x/crypto/+/v0.7.0:LICENSE,BSD-3-Clause -golang.org/x/exp,https://cs.opensource.google/go/x/exp/+/24139beb:LICENSE,BSD-3-Clause -golang.org/x/sys/unix,https://cs.opensource.google/go/x/sys/+/v0.6.0:LICENSE,BSD-3-Clause -golang.org/x/text,https://cs.opensource.google/go/x/text/+/v0.8.0:LICENSE,BSD-3-Clause -google.golang.org/protobuf,https://github.com/protocolbuffers/protobuf-go/blob/v1.29.0/LICENSE,BSD-3-Clause +golang.org/x/exp,https://cs.opensource.google/go/x/exp/+/10a50721:LICENSE,BSD-3-Clause +golang.org/x/sys/unix,https://cs.opensource.google/go/x/sys/+/v0.7.0:LICENSE,BSD-3-Clause +golang.org/x/text,https://cs.opensource.google/go/x/text/+/v0.9.0:LICENSE,BSD-3-Clause +google.golang.org/protobuf,https://github.com/protocolbuffers/protobuf-go/blob/v1.30.0/LICENSE,BSD-3-Clause gopkg.in/ini.v1,https://github.com/go-ini/ini/blob/v1.67.0/LICENSE,Apache-2.0 gopkg.in/yaml.v2,https://github.com/go-yaml/yaml/blob/v2.4.0/LICENSE,Apache-2.0 gopkg.in/yaml.v3,https://github.com/go-yaml/yaml/blob/v3.0.1/LICENSE,MIT diff --git a/go.mod b/go.mod index 42ebd2f..021e417 100644 --- a/go.mod +++ b/go.mod @@ -1,27 +1,21 @@ module github.com/kuoss/lethe -go 1.18 +go 1.20 require ( - github.com/VictoriaMetrics/metricsql v0.43.0 github.com/gin-gonic/gin v1.7.7 github.com/google/fscrypt v0.3.4 github.com/kuoss/common v0.1.3 - github.com/olekukonko/tablewriter v0.0.5 - github.com/pkg/errors v0.9.1 - github.com/prometheus/common v0.42.0 + github.com/prometheus/common v0.43.0 github.com/prometheus/prometheus v0.43.0 github.com/spf13/cast v1.4.1 - github.com/spf13/cobra v1.4.0 github.com/spf13/viper v1.11.0 github.com/stretchr/testify v1.8.2 - github.com/thoas/go-funk v0.9.2 - golang.org/x/sys v0.6.0 - gopkg.in/yaml.v2 v2.4.0 + golang.org/x/sys v0.7.0 ) require ( - github.com/VictoriaMetrics/metrics v1.18.1 // indirect + github.com/aws/aws-sdk-go v1.44.245 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect @@ -37,21 +31,20 @@ require ( github.com/golang/protobuf v1.5.3 // indirect github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect github.com/hashicorp/hcl v1.0.0 // indirect - github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/leodido/go-urn v1.2.1 // indirect github.com/magiconair/properties v1.8.6 // indirect github.com/mattn/go-isatty v0.0.17 // indirect - github.com/mattn/go-runewidth v0.0.9 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/pelletier/go-toml v1.9.4 // indirect github.com/pelletier/go-toml/v2 v2.0.0-beta.8 // indirect + github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_golang v1.14.0 // indirect - github.com/prometheus/client_model v0.3.0 // indirect + github.com/prometheus/client_golang v1.15.1 // indirect + github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/procfs v0.9.0 // indirect github.com/sirupsen/logrus v1.9.0 // indirect github.com/spf13/afero v1.8.2 // indirect @@ -59,14 +52,13 @@ require ( github.com/spf13/pflag v1.0.5 // indirect github.com/subosito/gotenv v1.2.0 // indirect github.com/ugorji/go/codec v1.2.7 // indirect - github.com/valyala/fastrand v1.1.0 // indirect - github.com/valyala/histogram v1.2.0 // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/goleak v1.2.1 // indirect golang.org/x/crypto v0.7.0 // indirect - golang.org/x/exp v0.0.0-20230307190834-24139beb5833 // indirect - golang.org/x/text v0.8.0 // indirect - google.golang.org/protobuf v1.29.0 // indirect + golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect + golang.org/x/text v0.9.0 // indirect + google.golang.org/protobuf v1.30.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 15f47b3..6e0b271 100644 --- a/go.sum +++ b/go.sum @@ -39,12 +39,9 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7 github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/VictoriaMetrics/metrics v1.18.1 h1:OZ0+kTTto8oPfHnVAnTOoyl0XlRhRkoQrD2n2cOuRw0= -github.com/VictoriaMetrics/metrics v1.18.1/go.mod h1:ArjwVz7WpgpegX/JpB0zpNF2h2232kErkEnzH1sxMmA= -github.com/VictoriaMetrics/metricsql v0.43.0 h1:pFkzfExn9GJ1w3tE1pFTkjlyPd4kr/onh5CBAJAZf+s= -github.com/VictoriaMetrics/metricsql v0.43.0/go.mod h1:6pP1ZeLVJHqJrHlF6Ij3gmpQIznSsgktEcZgsAWYel0= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= -github.com/aws/aws-sdk-go v1.44.217 h1:FcWC56MRl+k756aH3qeMQTylSdeJ58WN0iFz3fkyRz0= +github.com/aws/aws-sdk-go v1.44.245 h1:KtY2s4q31/kn33AdV63R5t77mdxsI7rq3YT7Mgo805M= +github.com/aws/aws-sdk-go v1.44.245/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -58,7 +55,6 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -168,9 +164,9 @@ github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -182,8 +178,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -198,8 +194,6 @@ github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPK github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= -github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= @@ -212,8 +206,6 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= -github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= -github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.0.0-beta.8 h1:dy81yyLYJDwMTifq24Oi/IslOslRrDSb3jwDggjz3Z0= @@ -224,13 +216,13 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= -github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= +github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI= +github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= -github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= -github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= +github.com/prometheus/common v0.43.0 h1:iq+BVjvYLei5f27wiuNiB1DN6DYQkp1c8Bx0Vykh5us= +github.com/prometheus/common v0.43.0/go.mod h1:NCvr5cQIh3Y/gy73/RdVtC9r8xxrxwJnB+2lB3BxrFc= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= @@ -239,9 +231,8 @@ github.com/prometheus/prometheus v0.43.0/go.mod h1:2BA14LgBeqlPuzObSEbh+Y+JwLH2G github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= @@ -249,8 +240,6 @@ github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo= github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo= github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q= -github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -272,24 +261,19 @@ github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/thoas/go-funk v0.9.2 h1:oKlNYv0AY5nyf9g+/GhMgS/UO2ces0QRdPKwkhY3VCk= -github.com/thoas/go-funk v0.9.2/go.mod h1:+IWnUfUmFO1+WVYQWQtIJHeRRdaIyyYglZN7xzUPe4Q= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/ugorji/go/codec v1.2.7 h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0= github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= github.com/urfave/cli v1.22.5/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/valyala/fastrand v1.1.0 h1:f+5HkLW4rsgzdNoleUOB69hyT9IlD2ZQh9GyDMfb5G8= -github.com/valyala/fastrand v1.1.0/go.mod h1:HWqCzkrkg6QXT8V2EXWvXCoow7vLwOFN002oeRzjapQ= -github.com/valyala/histogram v1.2.0 h1:wyYGAZZt3CpwUiIb9AU/Zbllg1llXyrtApRS815OLoQ= -github.com/valyala/histogram v1.2.0/go.mod h1:Hb4kBwb4UxsaNbbbh+RRz8ZR6pdodR57tzWUS3BUzXY= github.com/wadey/gocovmerge v0.0.0-20160331181800-b5bfa59ec0ad/go.mod h1:Hy8o65+MXnS6EwGElrSRjUzQDLXreJlzYLlWiHtt8hM= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -322,8 +306,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230307190834-24139beb5833 h1:SChBja7BCQewoTAU7IgvucQKMIXrEpFxNMs0spT3/5s= -golang.org/x/exp v0.0.0-20230307190834-24139beb5833/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= +golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -350,6 +334,7 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -383,7 +368,9 @@ golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -393,7 +380,7 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw= +golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -405,6 +392,7 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -445,13 +433,17 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -460,8 +452,9 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -515,6 +508,7 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.11-0.20220316014157-77aa08bb151a/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -611,8 +605,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.29.0 h1:44S3JjaKmLEE4YIkjzexaP+NzZsudE3Zin5Njn/pYX0= -google.golang.org/protobuf v1.29.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/hack/test-cover.sh b/hack/test-cover.sh index 8e4be03..0a76493 100755 --- a/hack/test-cover.sh +++ b/hack/test-cover.sh @@ -5,11 +5,12 @@ cd $(dirname $0)/.. export PS4='[$(basename $0):$LINENO] ' set -x -go test ./... -v -failfast -race -covermode=atomic -coverprofile /tmp/cover.out +go test ./... -v -failfast -race -covermode=atomic -coverprofile /tmp/coverprofile if [[ $? != 0 ]]; then echo "❌ FAIL - test failed" exit 1 fi +cat /tmp/coverprofile | grep -v yaccpar /tmp/cover.out COVER=$(go tool cover -func /tmp/cover.out | tail -1 | grep -oP [0-9.]+) rm -f /tmp/cover.out diff --git a/handler/handler.go b/handler/handler.go new file mode 100644 index 0000000..4bba12a --- /dev/null +++ b/handler/handler.go @@ -0,0 +1,44 @@ +package handler + +import ( + "github.com/gin-gonic/gin" + "github.com/kuoss/lethe/config" + "github.com/kuoss/lethe/storage/fileservice" + "github.com/kuoss/lethe/storage/queryservice" +) + +type Handler struct { + config *config.Config + fileService *fileservice.FileService + queryService *queryservice.QueryService + router *gin.Engine +} + +func New(cfg *config.Config, fileService *fileservice.FileService, queryService *queryservice.QueryService) *Handler { + handler := &Handler{ + config: cfg, + fileService: fileService, + queryService: queryService, + } + handler.setupRouter() + return handler +} + +func (h *Handler) setupRouter() { + r := gin.Default() + + r.GET("/ping", func(c *gin.Context) { c.JSON(200, gin.H{"message": "pong"}) }) + + r.GET("/-/healthy", h.Healthy) + r.GET("/-/ready", h.Ready) + + r.GET("/api/v1/metadata", h.Metadata) + r.GET("/api/v1/query", h.Query) + r.GET("/api/v1/query_range", h.QueryRange) + r.GET("/api/v1/targets", h.Target) + h.router = r +} + +func (h *Handler) Run() error { + return h.router.Run(h.config.WebListenAddress()) +} diff --git a/handler/handler_test.go b/handler/handler_test.go new file mode 100644 index 0000000..e8ca7a1 --- /dev/null +++ b/handler/handler_test.go @@ -0,0 +1,17 @@ +package handler + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNew(t *testing.T) { + assert.NotEmpty(t, handler1) +} + +func TestPingRoute(t *testing.T) { + code, body := testGET("/ping") + assert.Equal(t, 200, code) + assert.Equal(t, `{"message":"pong"}`, body) +} diff --git a/handler/init_test.go b/handler/init_test.go new file mode 100644 index 0000000..9cc8fb7 --- /dev/null +++ b/handler/init_test.go @@ -0,0 +1,46 @@ +package handler + +import ( + "net/http" + "net/http/httptest" + + "github.com/kuoss/lethe/clock" + "github.com/kuoss/lethe/config" + "github.com/kuoss/lethe/storage/fileservice" + "github.com/kuoss/lethe/storage/logservice" + "github.com/kuoss/lethe/storage/queryservice" + "github.com/kuoss/lethe/util/testutil" +) + +var ( + handler1 *Handler +) + +func init() { + testutil.ChdirRoot() + testutil.ResetLogData() + clock.SetPlaygroundMode(true) + + cfg, err := config.New("test") + if err != nil { + panic(err) + } + cfg.SetLogDataPath("tmp/init") + fileService, err := fileservice.New(cfg) + if err != nil { + panic(err) + } + logService := logservice.New(fileService) + queryService := queryservice.New(logService) + handler1 = New(cfg, fileService, queryService) +} + +func testGET(url string) (code int, body string) { + w := httptest.NewRecorder() + req, err := http.NewRequest("GET", url, nil) + if err != nil { + panic(err) + } + handler1.router.ServeHTTP(w, req) + return w.Code, w.Body.String() +} diff --git a/handler/metadata.go b/handler/metadata.go new file mode 100644 index 0000000..11bd43d --- /dev/null +++ b/handler/metadata.go @@ -0,0 +1,32 @@ +package handler + +import ( + "fmt" + "net/http" + "path/filepath" + + "github.com/gin-gonic/gin" +) + +func (h *Handler) Metadata(c *gin.Context) { + targets := []string{} + dirs := h.fileService.ListTargets() + + for _, d := range dirs { + var key string + switch d.LogType { + case "pod": + key = "namespace" + case "node": + key = "node" + } + value := filepath.Base(d.Subpath) + targets = append(targets, fmt.Sprintf(`%s{%s="%s"}`, d.LogType, key, value)) + } + c.JSON(http.StatusOK, gin.H{ + "status": "success", + "data": gin.H{ + "targets": targets, + }, + }) +} diff --git a/handler/metadata_test.go b/handler/metadata_test.go new file mode 100644 index 0000000..e395195 --- /dev/null +++ b/handler/metadata_test.go @@ -0,0 +1,20 @@ +package handler + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestMetadata(t *testing.T) { + code, body := testGET("/api/v1/metadata") + assert.Equal(t, 200, code) + assert.JSONEq(t, `{ + "data": { + "targets": [ + "node{node=\"node01\"}", + "node{node=\"node02\"}", + "pod{namespace=\"namespace01\"}", + "pod{namespace=\"namespace02\"}"]}, + "status": "success"}`, body) +} diff --git a/handler/probe.go b/handler/probe.go new file mode 100644 index 0000000..a3019c0 --- /dev/null +++ b/handler/probe.go @@ -0,0 +1,15 @@ +package handler + +import ( + "net/http" + + "github.com/gin-gonic/gin" +) + +func (h *Handler) Healthy(c *gin.Context) { + c.String(http.StatusOK, "Venti is Healthy.\n") +} + +func (h *Handler) Ready(c *gin.Context) { + c.String(http.StatusOK, "Venti is Ready.\n") +} diff --git a/handler/probe_test.go b/handler/probe_test.go new file mode 100644 index 0000000..4116bdf --- /dev/null +++ b/handler/probe_test.go @@ -0,0 +1,19 @@ +package handler + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestHealthy(t *testing.T) { + code, body := testGET("/-/healthy") + assert.Equal(t, 200, code) + assert.Equal(t, "Venti is Healthy.\n", body) +} + +func TestReady(t *testing.T) { + code, body := testGET("/-/ready") + assert.Equal(t, 200, code) + assert.Equal(t, "Venti is Ready.\n", body) +} diff --git a/handler/query.go b/handler/query.go new file mode 100644 index 0000000..c1fce8c --- /dev/null +++ b/handler/query.go @@ -0,0 +1,71 @@ +package handler + +import ( + "net/http" + + "github.com/gin-gonic/gin" + "github.com/kuoss/lethe/letheql/model" + "github.com/kuoss/lethe/util" +) + +func (h *Handler) Query(c *gin.Context) { + qs := c.Query("query") + h.query(c, qs, model.TimeRange{}) +} + +func (h *Handler) QueryRange(c *gin.Context) { + qs := c.Query("query") + start := c.Query("start") + end := c.Query("end") + h.query(c, qs, model.TimeRange{Start: util.FloatStringToTime(start), End: util.FloatStringToTime(end)}) +} + +func (h *Handler) query(c *gin.Context, qs string, tr model.TimeRange) { + result := h.queryService.Query(c.Request.Context(), qs, tr) + + // error + if result.Err != nil { + obj := gin.H{ + "status": "error", + "errorType": "queryError", + "error": result.Err.Error(), + } + if len(result.Warnings) > 0 { + obj["warnings"] = result.Warnings + } + c.JSON(http.StatusInternalServerError, obj) + return + } + + // log + if result.Value.Type() == model.ValueTypeLog { + log, ok := result.Value.(model.Log) + if ok { + obj := gin.H{ + "status": "success", + "data": gin.H{ + "resultType": "logs", + "result": log.Lines, + }, + } + if len(result.Warnings) > 0 { + obj["warnings"] = result.Warnings + } + c.JSON(http.StatusOK, obj) + return + } + } + + // any + obj := gin.H{ + "status": "success", + "data": gin.H{ + "resultType": result.Value.Type(), + "result": result.Value, + }, + } + if len(result.Warnings) > 0 { + obj["warnings"] = result.Warnings + } + c.JSON(http.StatusOK, obj) +} diff --git a/handler/query_test.go b/handler/query_test.go new file mode 100644 index 0000000..a0753ca --- /dev/null +++ b/handler/query_test.go @@ -0,0 +1,105 @@ +package handler + +import ( + "fmt" + "net/url" + "testing" + "time" + + "github.com/kuoss/lethe/clock" + "github.com/stretchr/testify/assert" +) + +func TestQuery(t *testing.T) { + testCases := []struct { + qs string + wantCode int + wantBody string + }{ + { + `hello`, + 500, `{"error":"unknown logType: hello","errorType":"queryError","status":"error"}`, + }, + { + `pod`, + 500, `{"error":"getTargets err: target matcher err: not found label 'namespace' for logType 'pod'","errorType":"queryError","status":"error"}`, + }, + { + `pod{}`, + 500, `{"error":"getTargets err: target matcher err: not found label 'namespace' for logType 'pod'","errorType":"queryError","status":"error"}`, + }, + { + `pod{namespace="namespace01"}`, + 200, `{"data":{"result":[{"time":"2009-11-10T22:59:00.000000Z","namespace":"namespace01","pod":"nginx-deployment-75675f5897-7ci7o","container":"nginx","log":"lerom ipsum"},{"time":"2009-11-10T22:59:00.000000Z","namespace":"namespace01","pod":"nginx-deployment-75675f5897-7ci7o","container":"nginx","log":"hello world"}],"resultType":"logs"},"status":"success"}`, + }, + { + `pod{namespace="namespace01"} |= "ipsum"`, + 200, `{"data":{"result":[{"time":"2009-11-10T22:59:00.000000Z","namespace":"namespace01","pod":"nginx-deployment-75675f5897-7ci7o","container":"nginx","log":"lerom ipsum"}],"resultType":"logs"},"status":"success"}`, + }, + { + `node{node="node01",process!="kubelet"} |= "hello" != "sidecar"`, + 200, `{"data":{"result":[{"time":"2009-11-10T23:00:00.000000Z","node":"node01","process":"containerd","log":"hello world"}],"resultType":"logs"},"status":"success"}`, + }, + } + for i, tc := range testCases { + t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { + v := url.Values{} + v.Add("query", tc.qs) + code, body := testGET("/api/v1/query?" + v.Encode()) + assert.Equal(t, tc.wantCode, code) + assert.Equal(t, tc.wantBody, body) + }) + } +} + +func TestQueryRange(t *testing.T) { + now := clock.Now() + ago10d := now.Add(-240 * time.Hour) + testCases := []struct { + qs string + start time.Time + end time.Time + wantCode int + wantBody string + }{ + { + `hello`, ago10d, now, + 500, `{"error":"unknown logType: hello","errorType":"queryError","status":"error"}`, + }, + { + `pod`, ago10d, now, + 500, `{"error":"getTargets err: target matcher err: not found label 'namespace' for logType 'pod'","errorType":"queryError","status":"error"}`, + }, + { + `pod{}`, ago10d, now, + 500, `{"error":"getTargets err: target matcher err: not found label 'namespace' for logType 'pod'","errorType":"queryError","status":"error"}`, + }, + { + `pod{namespace="namespace01"}`, ago10d, now, + 200, `{"data":{"result":[{"time":"2009-11-10T21:00:00.000000Z","namespace":"namespace01","pod":"nginx-deployment-75675f5897-7ci7o","container":"nginx","log":"hello world"},{"time":"2009-11-10T21:01:00.000000Z","namespace":"namespace01","pod":"nginx-deployment-75675f5897-7ci7o","container":"nginx","log":"hello world"},{"time":"2009-11-10T21:02:00.000000Z","namespace":"namespace01","pod":"nginx-deployment-75675f5897-7ci7o","container":"nginx","log":"hello world"},{"time":"2009-11-10T22:56:00.000000Z","namespace":"namespace01","pod":"apache-75675f5897-7ci7o","container":"httpd","log":"hello from sidecar"},{"time":"2009-11-10T22:56:00.000000Z","namespace":"namespace01","pod":"apache-75675f5897-7ci7o","container":"httpd","log":"hello from sidecar"},{"time":"2009-11-10T22:56:00.000000Z","namespace":"namespace01","pod":"apache-75675f5897-7ci7o","container":"httpd","log":"hello from sidecar"},{"time":"2009-11-10T22:59:00.000000Z","namespace":"namespace01","pod":"nginx-deployment-75675f5897-7ci7o","container":"nginx","log":"lerom ipsum"},{"time":"2009-11-10T22:57:00.000000Z","namespace":"namespace01","pod":"apache-75675f5897-7ci7o","container":"httpd","log":"hello from sidecar"},{"time":"2009-11-10T22:57:00.000000Z","namespace":"namespace01","pod":"apache-75675f5897-7ci7o","container":"httpd","log":"hello from sidecar"},{"time":"2009-11-10T22:57:00.000000Z","namespace":"namespace01","pod":"apache-75675f5897-7ci7o","container":"httpd","log":"hello from sidecar"},{"time":"2009-11-10T22:58:00.000000Z","namespace":"namespace01","pod":"nginx-deployment-75675f5897-7ci7o","container":"sidecar","log":"hello from sidecar"},{"time":"2009-11-10T22:58:00.000000Z","namespace":"namespace01","pod":"nginx-deployment-75675f5897-7ci7o","container":"sidecar","log":"lerom from sidecar"},{"time":"2009-11-10T22:58:00.000000Z","namespace":"namespace01","pod":"nginx-deployment-75675f5897-7ci7o","container":"sidecar","log":"hello from sidecar"},{"time":"2009-11-10T22:59:00.000000Z","namespace":"namespace01","pod":"nginx-deployment-75675f5897-7ci7o","container":"nginx","log":"hello world"}],"resultType":"logs"},"status":"success"}`, + }, + { + `pod{namespace="namespace01"} |= "ipsum"`, ago10d, now, + 200, `{"data":{"result":[{"time":"2009-11-10T22:59:00.000000Z","namespace":"namespace01","pod":"nginx-deployment-75675f5897-7ci7o","container":"nginx","log":"lerom ipsum"}],"resultType":"logs"},"status":"success"}`, + }, + { + `node{node="node01",process!="kubelet"} |= "hello" != "sidecar"`, ago10d, now, + 200, `{"data":{"result":[{"time":"2009-11-10T22:59:00.000000Z","node":"node01","process":"containerd","log":"hello world"},{"time":"2009-11-10T23:00:00.000000Z","node":"node01","process":"containerd","log":"hello world"}],"resultType":"logs"},"status":"success"}`, + }, + } + for i, tc := range testCases { + t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { + v := url.Values{} + v.Add("query", tc.qs) + v.Add("start", time2string(tc.start)) + v.Add("end", time2string(tc.end)) + code, body := testGET("/api/v1/query_range?" + v.Encode()) + assert.Equal(t, tc.wantCode, code) + assert.Equal(t, tc.wantBody, body) + }) + } +} + +func time2string(t time.Time) string { + return fmt.Sprintf("%d", t.Unix()) +} diff --git a/handler/targets.go b/handler/targets.go new file mode 100644 index 0000000..a9d2db9 --- /dev/null +++ b/handler/targets.go @@ -0,0 +1,31 @@ +package handler + +import ( + "net/http" + + "github.com/gin-gonic/gin" +) + +func (h *Handler) Target(c *gin.Context) { + activeTargets := []gin.H{} + for _, target := range h.fileService.ListTargets() { + labelKey := "__meta_kubernetes_node_name" + if target.LogType == "pod" { + labelKey = "__meta_kubernetes_namespace" + } + activeTargets = append(activeTargets, gin.H{ + "lastScrape": target.LastForward, + "health": "up", + "discoveredLabels": gin.H{ + "job": target.LogType, + labelKey: target.Target, + }, + }) + } + c.JSON(http.StatusOK, gin.H{ + "status": "success", + "data": gin.H{ + "activeTargets": activeTargets, + }, + }) +} diff --git a/handler/targets_test.go b/handler/targets_test.go new file mode 100644 index 0000000..607394c --- /dev/null +++ b/handler/targets_test.go @@ -0,0 +1,17 @@ +package handler + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestTarget(t *testing.T) { + code, body := testGET("/api/v1/targets") + assert.Equal(t, 200, code) + assert.JSONEq(t, `{"data":{"activeTargets":[ + {"discoveredLabels":{"__meta_kubernetes_node_name":"node01","job":"node"},"health":"up","lastScrape":"2009-11-10T23:00:00Z"}, + {"discoveredLabels":{"__meta_kubernetes_node_name":"node02","job":"node"},"health":"up","lastScrape":"2009-11-10T21:58:00Z"}, + {"discoveredLabels":{"__meta_kubernetes_namespace":"namespace01","job":"pod"},"health":"up","lastScrape":"2009-11-10T23:00:00Z"}, + {"discoveredLabels":{"__meta_kubernetes_namespace":"namespace02","job":"pod"},"health":"up","lastScrape":"2009-11-10T22:58:00Z"}]},"status":"success"}`, body) +} diff --git a/handlers/metadata.go b/handlers/metadata.go deleted file mode 100644 index 362c7e2..0000000 --- a/handlers/metadata.go +++ /dev/null @@ -1,62 +0,0 @@ -package handlers - -import ( - "fmt" - "net/http" - - "github.com/kuoss/lethe/logs/rotator" - - "github.com/gin-gonic/gin" - "github.com/kuoss/lethe/util" - "github.com/thoas/go-funk" -) - -type LetheHandler struct { - *rotator.Rotator -} - -func (lh *LetheHandler) Metadata(c *gin.Context) { - dirs := lh.ListDirs() - targets := funk.Map(dirs, func(x string) string { - typ := util.SubstrBefore(x, "/") - value := util.SubstrAfter(x, "/") - var key string - switch typ { - case "pod": - key = "namespace" - case "node": - key = "node" - } - return fmt.Sprintf(`%s{%s="%s"}`, typ, key, value) - }) - c.JSON(http.StatusOK, gin.H{ - "status": "success", - "data": gin.H{ - "targets": targets, - }, - }) -} - -func (lh *LetheHandler) Target(c *gin.Context) { - activeTargets := []gin.H{} - for _, target := range lh.ListTargets() { - labelKey := "__meta_kubernetes_node_name" - if target.LogType == "pod" { - labelKey = "__meta_kubernetes_namespace" - } - activeTargets = append(activeTargets, gin.H{ - "lastScrape": target.LastForward, - "health": "up", - "discoveredLabels": gin.H{ - "job": target.LogType, - labelKey: target.Target, - }, - }) - } - c.JSON(http.StatusOK, gin.H{ - "status": "success", - "data": gin.H{ - "activeTargets": activeTargets, - }, - }) -} diff --git a/handlers/query.go b/handlers/query.go deleted file mode 100644 index 1c484ea..0000000 --- a/handlers/query.go +++ /dev/null @@ -1,154 +0,0 @@ -package handlers - -import ( - "fmt" - "log" - "net/http" - - "github.com/gin-gonic/gin" - "github.com/kuoss/lethe/letheql" - "github.com/kuoss/lethe/util" -) - -func (lh *LetheHandler) Query(c *gin.Context) { - query := c.Query("query") - logFormat := c.Query("logFormat") - log.Println("QueryHandler", "query=", query) - - if query == "" { - c.JSON(http.StatusBadRequest, gin.H{ - "status": "error", - "error": "empty query", - }) - return - } - - queryData, err := letheql.ProcQuery(query, letheql.TimeRange{}) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{ - "status": "error", - "error": fmt.Sprintf("%s", err), - }) - return - } - - if queryData.ResultType == letheql.ValueTypeLogs { - if logFormat == "json" { - c.JSON(http.StatusOK, gin.H{ - "status": "success", - "data": gin.H{ - "resultType": "logs", - "result": queryData.Logs, - }, - }) - return - } - var stringLogs []string - for _, logLine := range queryData.Logs { - stringLogs = append(stringLogs, logLine.CompactRaw()) - } - c.JSON(http.StatusOK, gin.H{ - "status": "success", - "data": gin.H{ - "resultType": "logs", - "result": stringLogs, - }, - }) - return - } - - if queryData.Scalar == 0 { - c.JSON(http.StatusOK, gin.H{ - "status": "success", - "data": gin.H{ - "resultType": "vector", - "result": []int{}, - }, - }) - return - } - - c.JSON(http.StatusOK, gin.H{ - "status": "success", - "data": gin.H{ - "resultType": "vector", - "result": []gin.H{ - { - "value": queryData.Scalar, - }, - }, - }, - }) -} - -func (lh *LetheHandler) QueryRange(c *gin.Context) { - query := c.Query("query") - logFormat := c.Query("logFormat") - start := c.Query("start") - end := c.Query("end") - - log.Println("query_range...", query, start, end) - if query == "" || start == "" || end == "" { - c.JSON(http.StatusBadRequest, gin.H{ - "status": "error", - "error": "empty query", - }) - return - } - startTime := util.FloatStringToTime(start) - endTime := util.FloatStringToTime(end) - queryData, err := letheql.ProcQuery(query, letheql.TimeRange{Start: startTime, End: endTime}) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{ - "status": "error", - "error": fmt.Sprintf("%s", err), - }) - return - } - fmt.Println("queryData.ResultType=", queryData.ResultType) - if queryData.ResultType == letheql.ValueTypeLogs { - if logFormat == "json" { - c.JSON(http.StatusOK, gin.H{ - "status": "success", - "data": gin.H{ - "resultType": "logs", - "result": queryData.Logs, - }, - }) - return - } - var stringLogs []string - for _, logLine := range queryData.Logs { - stringLogs = append(stringLogs, logLine.CompactRaw()) - } - c.JSON(http.StatusOK, gin.H{ - "status": "success", - "data": gin.H{ - "resultType": "logs", - "result": stringLogs, - }, - }) - return - } - if queryData.Scalar == 0 { - c.JSON(http.StatusOK, gin.H{ - "status": "success", - "data": gin.H{ - "resultType": "vector", - "result": []int{}, - }, - }) - return - } - c.JSON(http.StatusOK, gin.H{ - "status": "success", - "data": gin.H{ - "resultType": "vector", - "result": []gin.H{ - { - "value": queryData.Scalar, - }, - }, - }, - }) -} diff --git a/letheql/engine.go b/letheql/engine.go index be45ce1..e7cff4e 100644 --- a/letheql/engine.go +++ b/letheql/engine.go @@ -1,58 +1,332 @@ package letheql import ( - "strings" + "context" + "errors" + "fmt" + "math" + "time" - // "github.com/VictoriaMetrics/metricsql" - "github.com/kuoss/lethe/logs/filter" - "github.com/pkg/errors" + "github.com/kuoss/common/logger" + "github.com/kuoss/lethe/letheql/model" + "github.com/kuoss/lethe/letheql/parser" + "github.com/kuoss/lethe/storage/logservice" + "github.com/prometheus/prometheus/model/timestamp" + "github.com/prometheus/prometheus/storage" ) type Engine struct { + logService *logservice.LogService } -// NewQuery return Query that including filter, keyword, engine -func (e *Engine) newQuery(queryString string) (*query, error) { +func NewEngine(logService *logservice.LogService) *Engine { + return &Engine{logService} +} + +func (ng *Engine) NewInstantQuery(_ context.Context, q storage.Queryable, qs string, ts time.Time) (Query, error) { + expr, err := parser.ParseExpr(qs) + if err != nil { + return nil, err + } + qry, err := ng.newQuery(q, expr, ts, ts, 0) + if err != nil { + return nil, err + } + qry.q = qs + + return qry, nil +} - ok, filterType, err := filter.IsFilterExist(queryString) +func (ng *Engine) NewRangeQuery(_ context.Context, q storage.Queryable, qs string, start, end time.Time, interval time.Duration) (Query, error) { + logger.Infof("newRangeQuery qs: %s", qs) + expr, err := parser.ParseExpr(qs) if err != nil { return nil, err } + qry, err := ng.newQuery(q, expr, start, end, interval) + if err != nil { + return nil, err + } + qry.q = qs + + return qry, nil +} + +func (ng *Engine) newQuery(q storage.Queryable, expr parser.Expr, start, end time.Time, interval time.Duration) (*query, error) { + es := &parser.EvalStmt{ + Expr: PreprocessExpr(expr, start, end), + Start: start, + End: end, + Interval: interval, + } + qry := &query{ + stmt: es, + ng: ng, + queryable: q, + } + return qry, nil +} + +func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws model.Warnings, err error) { + ctx, cancel := context.WithTimeout(ctx, ng.logService.Config().Timeout()) + q.cancel = cancel + defer q.cancel() + switch s := q.Statement().(type) { + case *parser.EvalStmt: + return ng.execEvalStmt(ctx, q, s) + case parser.TestStmt: + return nil, nil, s(ctx) + } + panic(fmt.Errorf("letheql.exec: unhandled statement of type %T", q.Statement())) +} + +func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.EvalStmt) (parser.Value, model.Warnings, error) { + mint, maxt := ng.findMinMaxTime(s) + querier, err := query.queryable.Querier(ctx, mint, maxt) + if err != nil { + return nil, nil, fmt.Errorf("querier err: %w", err) + } + defer querier.Close() - var parsableQuery, keyword string - var f filter.Filter + // Range evaluation. + evaluator := &evaluator{ + logService: ng.logService, + start: s.Start, + end: s.End, + startTimestamp: timeMilliseconds(s.Start), + endTimestamp: timeMilliseconds(s.End), + interval: durationMilliseconds(s.Interval), + ctx: ctx, + } + val, warnings, err := evaluator.Eval(s.Expr) + if err != nil { + return nil, warnings, err + } + switch result := val.(type) { + case model.Log: + return result, warnings, nil + case String: + return result, warnings, nil + default: + panic(fmt.Errorf("promql.Engine.exec: invalid expression type %q", val.Type())) + } +} +func (ng *Engine) findMinMaxTime(s *parser.EvalStmt) (int64, int64) { + var minTimestamp, maxTimestamp int64 = math.MaxInt64, math.MinInt64 + // Whenever a MatrixSelector is evaluated, evalRange is set to the corresponding range. + // The evaluation of the VectorSelector inside then evaluates the given range and unsets + // the variable. - if ok { - parts := strings.Split(queryString, filterType) - parsableQuery = strings.TrimSpace(parts[0]) - keyword = strings.TrimSpace(parts[1]) - filterFromQuery, err := filter.FromQuery(queryString) - if err != nil { - return nil, err + var evalRange time.Duration + parser.Inspect(s.Expr, func(node parser.Node, path []parser.Node) error { + switch n := node.(type) { + case *parser.VectorSelector: + start, end := ng.getTimeRangesForSelector(s, n, path, evalRange) + if start < minTimestamp { + minTimestamp = start + } + if end > maxTimestamp { + maxTimestamp = end + } + evalRange = 0 + case *parser.MatrixSelector: + evalRange = n.Range } - f = filterFromQuery + return nil + }) + + if maxTimestamp == math.MinInt64 { + // This happens when there was no selector. Hence no time range to select. + minTimestamp = 0 + maxTimestamp = 0 + } + + return minTimestamp, maxTimestamp +} +func (ng *Engine) getTimeRangesForSelector(s *parser.EvalStmt, n *parser.VectorSelector, path []parser.Node, evalRange time.Duration) (int64, int64) { + start, end := timestamp.FromTime(s.Start), timestamp.FromTime(s.End) + subqOffset, subqRange, subqTs := subqueryTimes(path) + + if subqTs != nil { + // The timestamp on the subquery overrides the eval statement time ranges. + start = *subqTs + end = *subqTs + } + + if n.Timestamp != nil { + // The timestamp on the selector overrides everything. + start = *n.Timestamp + end = *n.Timestamp + } else { + offsetMilliseconds := durationMilliseconds(subqOffset) + start = start - offsetMilliseconds - durationMilliseconds(subqRange) + end -= offsetMilliseconds + } + + if evalRange == 0 { + start -= durationMilliseconds(s.LookbackDelta) } else { - parsableQuery = queryString - keyword = "" - } - - if len(queryString) < 1 { - return nil, errors.New("empty queryString") - } - return &query{ - q: parsableQuery, - filter: f, - keyword: keyword, - engine: e, - }, nil -} - -// func (e *Engine) parseQuery(q *query) { -// _, err := metricsql.Parse(q.q) -// if err != nil { -// return -// } -// } -// func (e *Engine) exec(q *query) { -// q.Exec() -// } + // For all matrix queries we want to ensure that we have (end-start) + range selected + // this way we have `range` data before the start time + start -= durationMilliseconds(evalRange) + } + + offsetMilliseconds := durationMilliseconds(n.OriginalOffset) + start -= offsetMilliseconds + end -= offsetMilliseconds + + return start, end +} + +func contextDone(ctx context.Context, env string) error { + if err := ctx.Err(); err != nil { + return contextErr(err, env) + } + return nil +} + +func contextErr(err error, env string) error { + switch { + case errors.Is(err, context.Canceled): + return model.ErrQueryCanceled(env) + case errors.Is(err, context.DeadlineExceeded): + return model.ErrQueryTimeout(env) + default: + return err + } +} + +func PreprocessExpr(expr parser.Expr, start, end time.Time) parser.Expr { + isStepInvariant := preprocessExprHelper(expr, start, end) + if isStepInvariant { + return newStepInvariantExpr(expr) + } + return expr +} + +func preprocessExprHelper(expr parser.Expr, start, end time.Time) bool { + switch n := expr.(type) { + case *parser.VectorSelector: + switch n.StartOrEnd { + case parser.START: + n.Timestamp = makeInt64Pointer(timestamp.FromTime(start)) + case parser.END: + n.Timestamp = makeInt64Pointer(timestamp.FromTime(end)) + } + return n.Timestamp != nil + + case *parser.AggregateExpr: + return preprocessExprHelper(n.Expr, start, end) + + case *parser.BinaryExpr: + isInvariant1, isInvariant2 := preprocessExprHelper(n.LHS, start, end), preprocessExprHelper(n.RHS, start, end) + if isInvariant1 && isInvariant2 { + return true + } + + if isInvariant1 { + n.LHS = newStepInvariantExpr(n.LHS) + } + if isInvariant2 { + n.RHS = newStepInvariantExpr(n.RHS) + } + + return false + + case *parser.Call: + _, ok := AtModifierUnsafeFunctions[n.Func.Name] + isStepInvariant := !ok + isStepInvariantSlice := make([]bool, len(n.Args)) + for i := range n.Args { + isStepInvariantSlice[i] = preprocessExprHelper(n.Args[i], start, end) + isStepInvariant = isStepInvariant && isStepInvariantSlice[i] + } + + if isStepInvariant { + // The function and all arguments are step invariant. + return true + } + + for i, isi := range isStepInvariantSlice { + if isi { + n.Args[i] = newStepInvariantExpr(n.Args[i]) + } + } + return false + + case *parser.MatrixSelector: + return preprocessExprHelper(n.VectorSelector, start, end) + + case *parser.SubqueryExpr: + // Since we adjust offset for the @ modifier evaluation, + // it gets tricky to adjust it for every subquery step. + // Hence we wrap the inside of subquery irrespective of + // @ on subquery (given it is also step invariant) so that + // it is evaluated only once w.r.t. the start time of subquery. + isInvariant := preprocessExprHelper(n.Expr, start, end) + if isInvariant { + n.Expr = newStepInvariantExpr(n.Expr) + } + switch n.StartOrEnd { + case parser.START: + n.Timestamp = makeInt64Pointer(timestamp.FromTime(start)) + case parser.END: + n.Timestamp = makeInt64Pointer(timestamp.FromTime(end)) + } + return n.Timestamp != nil + + case *parser.ParenExpr: + return preprocessExprHelper(n.Expr, start, end) + + case *parser.UnaryExpr: + return preprocessExprHelper(n.Expr, start, end) + + case *parser.StringLiteral, *parser.NumberLiteral: + return true + } + + panic(fmt.Sprintf("found unexpected node %#v", expr)) +} + +func makeInt64Pointer(val int64) *int64 { + valp := new(int64) + *valp = val + return valp +} + +func newStepInvariantExpr(expr parser.Expr) parser.Expr { + return &parser.StepInvariantExpr{Expr: expr} +} + +func subqueryTimes(path []parser.Node) (time.Duration, time.Duration, *int64) { + var ( + subqOffset, subqRange time.Duration + ts int64 = math.MaxInt64 + ) + for _, node := range path { + if n, ok := node.(*parser.SubqueryExpr); ok { + subqOffset += n.OriginalOffset + subqRange += n.Range + if n.Timestamp != nil { + // The @ modifier on subquery invalidates all the offset and + // range till now. Hence resetting it here. + subqOffset = n.OriginalOffset + subqRange = n.Range + ts = *n.Timestamp + } + } + } + var tsp *int64 + if ts != math.MaxInt64 { + tsp = &ts + } + return subqOffset, subqRange, tsp +} + +func timeMilliseconds(t time.Time) int64 { + return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) +} + +func durationMilliseconds(d time.Duration) int64 { + return int64(d / (time.Millisecond / time.Nanosecond)) +} diff --git a/letheql/engine_test.go b/letheql/engine_test.go new file mode 100644 index 0000000..160bd1e --- /dev/null +++ b/letheql/engine_test.go @@ -0,0 +1,138 @@ +package letheql + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/kuoss/lethe/clock" + "github.com/kuoss/lethe/letheql/model" + "github.com/kuoss/lethe/storage/logservice/logmodel" + "github.com/kuoss/lethe/storage/querier" + "github.com/prometheus/prometheus/storage" + "github.com/stretchr/testify/assert" +) + +func TestNewEngine(t *testing.T) { + assert.NotNil(t, engine1) +} + +func TestNewInstantQuery(t *testing.T) { + + t.Run("QueryableFunc", func(t *testing.T) { + queryable := storage.QueryableFunc(func(ctx context.Context, mint, maxt int64) (storage.Querier, error) { + return nil, nil + }) + ctx, cancelCtx := context.WithCancel(context.Background()) + defer cancelCtx() + + qry, err := engine1.NewInstantQuery(ctx, queryable, "pod", time.Unix(1, 0)) + assert.NoError(t, err) + assert.NotEmpty(t, qry) + }) + + t.Run("LetheQueryable", func(t *testing.T) { + // LetheQueryable + queryable := &querier.LetheQueryable{LetheQuerier: &querier.LetheQuerier{}} + testCases := []struct { + qs string + wantError string + want *Result + }{ + { + `pod`, + "getTargets err: target matcher err: not found label 'namespace' for logType 'pod'", + &Result{}, + }, + { + `pod{namespace="namespace01"}`, + "", + &Result{Value: model.Log{Name: "pod", Lines: []model.LogLine{}}, Warnings: model.Warnings(nil)}, + }, + } + for i, tc := range testCases { + t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { + qry, err := engine1.NewInstantQuery(context.TODO(), queryable, tc.qs, clock.Now()) + assert.NoError(t, err) + got := qry.Exec(context.TODO()) + err = got.Err + got.Err = nil + if tc.wantError == "" { + assert.NoError(t, err) + } else { + assert.EqualError(t, err, tc.wantError) + } + assert.Equal(t, tc.want, got) + }) + } + }) +} + +func TestNewRangeQuery(t *testing.T) { + + ago10d := clock.Now().Add(-240 * time.Hour) + ago2m := clock.Now().Add(-2 * time.Minute) + now := clock.Now() + + queryable := &querier.LetheQueryable{LetheQuerier: &querier.LetheQuerier{}} + testCases := []struct { + qs string + start time.Time + end time.Time + wantError string + want *Result + }{ + { + `pod`, ago10d, now, + "getTargets err: target matcher err: not found label 'namespace' for logType 'pod'", + &Result{}, + }, + { + `pod{namespace="namespace01"}`, ago2m, now, + "", + &Result{Err: error(nil), Value: model.Log{Name: "pod", Lines: []model.LogLine{ + logmodel.PodLog{Time: "2009-11-10T22:59:00.000000Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: "lerom ipsum"}, + logmodel.PodLog{Time: "2009-11-10T22:58:00.000000Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:58:00.000000Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: "lerom from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:58:00.000000Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:59:00.000000Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: "hello world"}}}, Warnings: model.Warnings(nil)}, + }, + { + `pod{namespace="namespace01"}`, ago10d, now, + "", + &Result{Err: error(nil), Value: model.Log{Name: "pod", Lines: []model.LogLine{ + logmodel.PodLog{Time: "2009-11-10T21:00:00.000000Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: "hello world"}, + logmodel.PodLog{Time: "2009-11-10T21:01:00.000000Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: "hello world"}, + logmodel.PodLog{Time: "2009-11-10T21:02:00.000000Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: "hello world"}, + logmodel.PodLog{Time: "2009-11-10T22:56:00.000000Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:56:00.000000Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:56:00.000000Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:59:00.000000Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: "lerom ipsum"}, + logmodel.PodLog{Time: "2009-11-10T22:57:00.000000Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:57:00.000000Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:57:00.000000Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:58:00.000000Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:58:00.000000Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: "lerom from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:58:00.000000Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:59:00.000000Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: "hello world"}}}, Warnings: model.Warnings(nil)}, + }, + } + for i, tc := range testCases { + t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { + ctx := context.TODO() + qry, err := engine1.NewRangeQuery(ctx, queryable, tc.qs, tc.start, tc.end, 0) + assert.NoError(t, err) + got := qry.Exec(ctx) + err = got.Err + got.Err = nil + if tc.wantError == "" { + assert.NoError(t, err) + } else { + assert.EqualError(t, err, tc.wantError) + } + assert.Equal(t, tc.want, got) + }) + } + +} diff --git a/letheql/evaluator.go b/letheql/evaluator.go new file mode 100644 index 0000000..475e9c5 --- /dev/null +++ b/letheql/evaluator.go @@ -0,0 +1,153 @@ +package letheql + +import ( + "context" + "fmt" + "runtime" + "time" + + "github.com/kuoss/common/logger" + "github.com/kuoss/lethe/letheql/model" + "github.com/kuoss/lethe/letheql/parser" + "github.com/kuoss/lethe/storage/logservice" +) + +type evaluator struct { + logService *logservice.LogService + + ctx context.Context + + startTimestamp int64 // Start time in milliseconds. + endTimestamp int64 // End time in milliseconds. + interval int64 // Interval in milliseconds. + + start time.Time + end time.Time +} + +func (ev *evaluator) error(err error) { + panic(err) +} + +func (ev *evaluator) recover(expr parser.Expr, ws *model.Warnings, errp *error) { + e := recover() + if e == nil { + return + } + + switch err := e.(type) { + case runtime.Error: + buf := make([]byte, 64<<10) + buf = buf[:runtime.Stack(buf, false)] + + logger.Errorf("msg: runtime panic in parser. expr: %s, err: %s, stacktrace: %s", expr.String(), e, string(buf)) + *errp = fmt.Errorf("unexpected error: %w", err) + case model.ErrWithWarnings: + *errp = err.Err + *ws = append(*ws, err.Warnings...) + case error: + *errp = err + default: + *errp = fmt.Errorf("%v", err) + } +} + +func (ev *evaluator) Eval(expr parser.Expr) (v parser.Value, ws model.Warnings, err error) { + defer ev.recover(expr, &ws, &err) + val, ws := ev.eval(expr) + + ls, ok := val.(*model.LogSelector) + if ok { + val, ws = ev.evalWithWarnings(ls, &ws) + } + return val, ws, nil +} + +func (ev *evaluator) eval(expr parser.Expr) (parser.Value, model.Warnings) { + + if err := contextDone(ev.ctx, "expression evaluation"); err != nil { + ev.error(err) + } + + switch e := expr.(type) { + case *parser.BinaryExpr: + return ev.evalBinaryExpr(e) + + case *parser.StringLiteral: + return String{V: e.Val, T: ev.startTimestamp}, nil + + case *parser.VectorSelector: + return ev.vectorSelector(e) + + case *model.LogSelector: + return ev.logSelector(e) + + } + panic(fmt.Errorf("eval: unhandled expr: %#v", expr)) +} + +func (ev *evaluator) evalWithWarnings(expr parser.Expr, warnings *model.Warnings) (parser.Value, model.Warnings) { + val, ws := ev.eval(expr) + *warnings = append(*warnings, ws...) + return val, *warnings +} + +func (ev *evaluator) vectorSelector(vs *parser.VectorSelector) (*model.LogSelector, model.Warnings) { + return &model.LogSelector{ + Name: vs.Name, + LabelMatchers: vs.LabelMatchers, + TimeRange: model.TimeRange{Start: ev.start, End: ev.end}, + }, nil +} + +func (ev *evaluator) logSelector(ls *model.LogSelector) (parser.Value, model.Warnings) { + val, ws, err := ev.logService.SelectLog(ls) + if err != nil { + ev.error(err) + } + return val, ws +} + +func (ev *evaluator) evalBinaryExpr(expr *parser.BinaryExpr) (parser.Value, model.Warnings) { + + // currently we can handle 'filter operator + string' form only + if !expr.Op.IsFilterOperator() { + ev.error(fmt.Errorf("evalBinaryExpr err: not filter operator: %s", expr.Op)) + } + + switch lhs := expr.LHS.(type) { + + case *parser.BinaryExpr: + newLHS, warnings := ev.eval(lhs) + switch nl := newLHS.(type) { + case *model.LogSelector: + expr.LHS = nl + return ev.evalWithWarnings(expr, &warnings) + } + + case *parser.VectorSelector: + newLHS, warnings := ev.vectorSelector(lhs) + expr.LHS = newLHS + return ev.evalWithWarnings(expr, &warnings) + + case *model.LogSelector: + switch rhs := expr.RHS.(type) { + case *parser.StringLiteral: + lhs.LineMatchers = append(lhs.LineMatchers, &model.LineMatcher{ + Op: expr.Op, + Value: rhs.Val, + }) + return lhs, nil + case *parser.StepInvariantExpr: + val, ws := ev.eval(rhs.Expr) + lhs.LineMatchers = append(lhs.LineMatchers, &model.LineMatcher{ + Op: expr.Op, + Value: val.String(), + }) + return lhs, ws + default: + ev.error(fmt.Errorf("unknown type rhs: %#v", rhs)) + } + } + panic(fmt.Errorf("evalBinaryExpr unhandles op:[%s], lhs:[%s], rhs:[%s]", expr.Op, expr.LHS, expr.RHS)) +} diff --git a/letheql/evaluator_test.go b/letheql/evaluator_test.go new file mode 100644 index 0000000..975783c --- /dev/null +++ b/letheql/evaluator_test.go @@ -0,0 +1,356 @@ +package letheql + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/kuoss/lethe/letheql/model" + "github.com/kuoss/lethe/letheql/parser" + "github.com/kuoss/lethe/storage/logservice" + "github.com/kuoss/lethe/storage/logservice/logmodel" + "github.com/stretchr/testify/assert" +) + +func TestNewEvaluator(t *testing.T) { + logService1 := &logservice.LogService{} + contextBackground := context.Background() + contextTODO := context.TODO() + testCases := []struct { + logService *logservice.LogService + ctx context.Context + startTimestamp int64 + endTimestamp int64 + interval int64 + want evaluator + }{ + { + logService1, contextBackground, 0, 0, 0, + evaluator{logService1, contextBackground, 0, 0, 0, time.Time{}, time.Time{}}, + }, + { + logService1, contextTODO, 0, 0, 0, + evaluator{logService1, contextTODO, 0, 0, 0, time.Time{}, time.Time{}}, + }, + } + for i, tc := range testCases { + t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { + got := evaluator{tc.logService, tc.ctx, tc.startTimestamp, tc.endTimestamp, tc.interval, time.Time{}, time.Time{}} + assert.Equal(t, tc.want, got) + }) + } + assert.NotNil(t, evaluator1) +} + +func TestEval(t *testing.T) { + testCases := []struct { + input string + wantParseError string + wantError string + wantWarnings model.Warnings + want parser.Value + }{ + { + ``, + "1:1: parse error: no expression found in input", "", nil, nil, + }, + { + `pod`, + "", "getTargets err: target matcher err: not found label 'namespace' for logType 'pod'", nil, nil, + }, + { + `"hello"`, + "", "", nil, String{T: 0, V: "hello"}, + }, + { + `pod{namespace="hello"}`, + "", "", nil, model.Log{Name: "pod", Lines: []model.LogLine{}}, + }, + { + `pod{namespace|="namespace01"}`, + "1:14: parse error: unexpected character inside braces: '|'", "", nil, nil, + }, + { + `pod{namespace="namespace01"}`, + "", "", nil, + model.Log{Name: "pod", Lines: []model.LogLine{ + logmodel.PodLog{Time: "2009-11-10T21:00:00.000000Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: "hello world"}, + logmodel.PodLog{Time: "2009-11-10T21:01:00.000000Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: "hello world"}, + logmodel.PodLog{Time: "2009-11-10T21:02:00.000000Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: "hello world"}, + logmodel.PodLog{Time: "2009-11-10T22:56:00.000000Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:56:00.000000Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:56:00.000000Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:59:00.000000Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: "lerom ipsum"}, + logmodel.PodLog{Time: "2009-11-10T22:57:00.000000Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:57:00.000000Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:57:00.000000Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:58:00.000000Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:58:00.000000Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: "lerom from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:58:00.000000Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:59:00.000000Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: "hello world"}}}, + }, + { + `pod{namespace!="namespace01"}`, + "", "", + model.Warnings{fmt.Errorf("warnMultiTargets: use operator '=' for selecting target")}, + model.Log{Name: "pod", Lines: []model.LogLine{ + logmodel.PodLog{Time: "2009-11-10T22:58:00.000000Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: "hello world"}, + logmodel.PodLog{Time: "2009-11-10T22:58:00.000000Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: "lerom ipsum"}, + logmodel.PodLog{Time: "2009-11-10T22:58:00.000000Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: "hello world"}, + logmodel.PodLog{Time: "2009-11-10T22:58:00.000000Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:58:00.000000Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: "lerom from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:58:00.000000Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:58:00.000000Z", Namespace: "namespace02", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:58:00.000000Z", Namespace: "namespace02", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:58:00.000000Z", Namespace: "namespace02", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:58:00.000000Z", Namespace: "namespace02", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:58:00.000000Z", Namespace: "namespace02", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:58:00.000000Z", Namespace: "namespace02", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: "hello from sidecar"}}}, + }, + { + `pod{namespace=~"namespace.*"}`, + "", "", + model.Warnings{fmt.Errorf("warnMultiTargets: use operator '=' for selecting target")}, + model.Log{Name: "pod", Lines: []model.LogLine{ + logmodel.PodLog{Time: "2009-11-10T21:00:00.000000Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: "hello world"}, + logmodel.PodLog{Time: "2009-11-10T21:01:00.000000Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: "hello world"}, + logmodel.PodLog{Time: "2009-11-10T21:02:00.000000Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: "hello world"}, + logmodel.PodLog{Time: "2009-11-10T22:56:00.000000Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:56:00.000000Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:56:00.000000Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:59:00.000000Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: "lerom ipsum"}, + logmodel.PodLog{Time: "2009-11-10T22:57:00.000000Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:57:00.000000Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:57:00.000000Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:58:00.000000Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:58:00.000000Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: "lerom from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:58:00.000000Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:59:00.000000Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: "hello world"}, + logmodel.PodLog{Time: "2009-11-10T22:58:00.000000Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: "hello world"}, + logmodel.PodLog{Time: "2009-11-10T22:58:00.000000Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: "lerom ipsum"}, + logmodel.PodLog{Time: "2009-11-10T22:58:00.000000Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: "hello world"}, + logmodel.PodLog{Time: "2009-11-10T22:58:00.000000Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:58:00.000000Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: "lerom from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:58:00.000000Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:58:00.000000Z", Namespace: "namespace02", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:58:00.000000Z", Namespace: "namespace02", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:58:00.000000Z", Namespace: "namespace02", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:58:00.000000Z", Namespace: "namespace02", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:58:00.000000Z", Namespace: "namespace02", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:58:00.000000Z", Namespace: "namespace02", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: "hello from sidecar"}}}, + }, + { + `pod{namespace="namespace01",pod="nginx-.*"}`, + "", "", nil, + model.Log{Name: "pod", Lines: []model.LogLine{}}, + }, + { + `pod{namespace="namespace01",pod~="nginx-.*"}`, + "1:32: parse error: unexpected character inside braces: '~'", "", nil, nil, + }, + { + `pod{namespace="namespace01",pod=~"nginx-.*"}`, + "", "", nil, + model.Log{Name: "pod", Lines: []model.LogLine{ + logmodel.PodLog{Time: "2009-11-10T21:00:00.000000Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: "hello world"}, + logmodel.PodLog{Time: "2009-11-10T21:01:00.000000Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: "hello world"}, + logmodel.PodLog{Time: "2009-11-10T21:02:00.000000Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: "hello world"}, + logmodel.PodLog{Time: "2009-11-10T22:59:00.000000Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: "lerom ipsum"}, + logmodel.PodLog{Time: "2009-11-10T22:58:00.000000Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:58:00.000000Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: "lerom from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:58:00.000000Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:59:00.000000Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: "hello world"}}}, + }, + { + `pod{namespace="namespace01",pod!~"nginx-.*"}`, + "", "", nil, + model.Log{Name: "pod", Lines: []model.LogLine{ + logmodel.PodLog{Time: "2009-11-10T22:56:00.000000Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:56:00.000000Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:56:00.000000Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:57:00.000000Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:57:00.000000Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:57:00.000000Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: "hello from sidecar"}}}, + }, + { + `pod{namespace="namespace01",pod=~"nginx-.*",container="sidecar"}`, + "", "", nil, + model.Log{Name: "pod", Lines: []model.LogLine{ + logmodel.PodLog{Time: "2009-11-10T22:58:00.000000Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: "hello from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:58:00.000000Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: "lerom from sidecar"}, + logmodel.PodLog{Time: "2009-11-10T22:58:00.000000Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: "hello from sidecar"}}}, + }, + { + `node`, + "", "getTargets err: target matcher err: not found label 'node' for logType 'node'", nil, nil, + }, + { + `node{namespace="namespace01"}`, + "", "getTargets err: target matcher err: not found label 'node' for logType 'node'", nil, nil, + }, + { + `node{node="node01"}`, + "", "", nil, + model.Log{Name: "node", Lines: []model.LogLine{ + logmodel.NodeLog{Time: "2009-11-10T22:56:00.000000Z", Node: "node01", Process: "kubelet", Log: "I0525 20:00:45.752587 17221 scope.go:110] \"RemoveContainer\" hello from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T22:56:00.000000Z", Node: "node01", Process: "kubelet", Log: "I0525 20:00:45.752587 17221 scope.go:110] \"RemoveContainer\" hello from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T22:56:00.000000Z", Node: "node01", Process: "kubelet", Log: "I0525 20:00:45.752587 17221 scope.go:110] \"RemoveContainer\" hello from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T22:59:00.000000Z", Node: "node01", Process: "containerd", Log: "lerom ipsum"}, + logmodel.NodeLog{Time: "2009-11-10T22:57:00.000000Z", Node: "node01", Process: "kubelet", Log: "I0525 20:00:45.752587 17221 scope.go:110] \"RemoveContainer\" hello from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T22:57:00.000000Z", Node: "node01", Process: "kubelet", Log: "I0525 20:00:45.752587 17221 scope.go:110] \"RemoveContainer\" hello from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T22:57:00.000000Z", Node: "node01", Process: "kubelet", Log: "I0525 20:00:45.752587 17221 scope.go:110] \"RemoveContainer\" hello from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T22:58:00.000000Z", Node: "node01", Process: "dockerd", Log: "hello from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T22:58:00.000000Z", Node: "node01", Process: "dockerd", Log: "lerom from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T22:58:00.000000Z", Node: "node01", Process: "dockerd", Log: "hello from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T22:59:00.000000Z", Node: "node01", Process: "containerd", Log: "hello world"}, + logmodel.NodeLog{Time: "2009-11-10T23:00:00.000000Z", Node: "node01", Process: "containerd", Log: "hello world"}}}, + }, + { + `node{node=~"node.*"}`, + "", "", + model.Warnings{fmt.Errorf("warnMultiTargets: use operator '=' for selecting target")}, + model.Log{Name: "node", Lines: []model.LogLine{ + logmodel.NodeLog{Time: "2009-11-10T22:56:00.000000Z", Node: "node01", Process: "kubelet", Log: "I0525 20:00:45.752587 17221 scope.go:110] \"RemoveContainer\" hello from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T22:56:00.000000Z", Node: "node01", Process: "kubelet", Log: "I0525 20:00:45.752587 17221 scope.go:110] \"RemoveContainer\" hello from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T22:56:00.000000Z", Node: "node01", Process: "kubelet", Log: "I0525 20:00:45.752587 17221 scope.go:110] \"RemoveContainer\" hello from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T22:59:00.000000Z", Node: "node01", Process: "containerd", Log: "lerom ipsum"}, + logmodel.NodeLog{Time: "2009-11-10T22:57:00.000000Z", Node: "node01", Process: "kubelet", Log: "I0525 20:00:45.752587 17221 scope.go:110] \"RemoveContainer\" hello from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T22:57:00.000000Z", Node: "node01", Process: "kubelet", Log: "I0525 20:00:45.752587 17221 scope.go:110] \"RemoveContainer\" hello from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T22:57:00.000000Z", Node: "node01", Process: "kubelet", Log: "I0525 20:00:45.752587 17221 scope.go:110] \"RemoveContainer\" hello from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T22:58:00.000000Z", Node: "node01", Process: "dockerd", Log: "hello from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T22:58:00.000000Z", Node: "node01", Process: "dockerd", Log: "lerom from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T22:58:00.000000Z", Node: "node01", Process: "dockerd", Log: "hello from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T22:59:00.000000Z", Node: "node01", Process: "containerd", Log: "hello world"}, + logmodel.NodeLog{Time: "2009-11-10T23:00:00.000000Z", Node: "node01", Process: "containerd", Log: "hello world"}, + logmodel.NodeLog{Time: "2009-11-10T21:58:00.000000Z", Node: "node02", Process: "containerd", Log: "hello world"}, + logmodel.NodeLog{Time: "2009-11-10T21:58:00.000000Z", Node: "node02", Process: "containerd", Log: "lerom ipsum"}, + logmodel.NodeLog{Time: "2009-11-10T21:58:00.000000Z", Node: "node02", Process: "containerd", Log: "hello world"}, + logmodel.NodeLog{Time: "2009-11-10T21:58:00.000000Z", Node: "node02", Process: "dockerd", Log: "hello from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T21:58:00.000000Z", Node: "node02", Process: "dockerd", Log: "lerom from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T21:58:00.000000Z", Node: "node02", Process: "dockerd", Log: "hello from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T21:58:00.000000Z", Node: "node02", Process: "kubelet", Log: "I0525 20:00:45.752587 17221 scope.go:110] \"RemoveContainer\" hello from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T21:58:00.000000Z", Node: "node02", Process: "kubelet", Log: "I0525 20:00:45.752587 17221 scope.go:110] \"RemoveContainer\" hello from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T21:58:00.000000Z", Node: "node02", Process: "kubelet", Log: "I0525 20:00:45.752587 17221 scope.go:110] \"RemoveContainer\" hello from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T21:58:00.000000Z", Node: "node02", Process: "kubelet", Log: "I0525 20:00:45.752587 17221 scope.go:110] \"RemoveContainer\" hello from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T21:58:00.000000Z", Node: "node02", Process: "kubelet", Log: "I0525 20:00:45.752587 17221 scope.go:110] \"RemoveContainer\" hello from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T21:58:00.000000Z", Node: "node02", Process: "kubelet", Log: "I0525 20:00:45.752587 17221 scope.go:110] \"RemoveContainer\" hello from sidecar"}}}, + }, + { + `node{node=~"node.*"}`, + "", "", + model.Warnings{fmt.Errorf("warnMultiTargets: use operator '=' for selecting target")}, + model.Log{Name: "node", Lines: []model.LogLine{ + logmodel.NodeLog{Time: "2009-11-10T22:56:00.000000Z", Node: "node01", Process: "kubelet", Log: "I0525 20:00:45.752587 17221 scope.go:110] \"RemoveContainer\" hello from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T22:56:00.000000Z", Node: "node01", Process: "kubelet", Log: "I0525 20:00:45.752587 17221 scope.go:110] \"RemoveContainer\" hello from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T22:56:00.000000Z", Node: "node01", Process: "kubelet", Log: "I0525 20:00:45.752587 17221 scope.go:110] \"RemoveContainer\" hello from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T22:59:00.000000Z", Node: "node01", Process: "containerd", Log: "lerom ipsum"}, + logmodel.NodeLog{Time: "2009-11-10T22:57:00.000000Z", Node: "node01", Process: "kubelet", Log: "I0525 20:00:45.752587 17221 scope.go:110] \"RemoveContainer\" hello from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T22:57:00.000000Z", Node: "node01", Process: "kubelet", Log: "I0525 20:00:45.752587 17221 scope.go:110] \"RemoveContainer\" hello from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T22:57:00.000000Z", Node: "node01", Process: "kubelet", Log: "I0525 20:00:45.752587 17221 scope.go:110] \"RemoveContainer\" hello from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T22:58:00.000000Z", Node: "node01", Process: "dockerd", Log: "hello from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T22:58:00.000000Z", Node: "node01", Process: "dockerd", Log: "lerom from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T22:58:00.000000Z", Node: "node01", Process: "dockerd", Log: "hello from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T22:59:00.000000Z", Node: "node01", Process: "containerd", Log: "hello world"}, + logmodel.NodeLog{Time: "2009-11-10T23:00:00.000000Z", Node: "node01", Process: "containerd", Log: "hello world"}, + logmodel.NodeLog{Time: "2009-11-10T21:58:00.000000Z", Node: "node02", Process: "containerd", Log: "hello world"}, + logmodel.NodeLog{Time: "2009-11-10T21:58:00.000000Z", Node: "node02", Process: "containerd", Log: "lerom ipsum"}, + logmodel.NodeLog{Time: "2009-11-10T21:58:00.000000Z", Node: "node02", Process: "containerd", Log: "hello world"}, + logmodel.NodeLog{Time: "2009-11-10T21:58:00.000000Z", Node: "node02", Process: "dockerd", Log: "hello from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T21:58:00.000000Z", Node: "node02", Process: "dockerd", Log: "lerom from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T21:58:00.000000Z", Node: "node02", Process: "dockerd", Log: "hello from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T21:58:00.000000Z", Node: "node02", Process: "kubelet", Log: "I0525 20:00:45.752587 17221 scope.go:110] \"RemoveContainer\" hello from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T21:58:00.000000Z", Node: "node02", Process: "kubelet", Log: "I0525 20:00:45.752587 17221 scope.go:110] \"RemoveContainer\" hello from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T21:58:00.000000Z", Node: "node02", Process: "kubelet", Log: "I0525 20:00:45.752587 17221 scope.go:110] \"RemoveContainer\" hello from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T21:58:00.000000Z", Node: "node02", Process: "kubelet", Log: "I0525 20:00:45.752587 17221 scope.go:110] \"RemoveContainer\" hello from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T21:58:00.000000Z", Node: "node02", Process: "kubelet", Log: "I0525 20:00:45.752587 17221 scope.go:110] \"RemoveContainer\" hello from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T21:58:00.000000Z", Node: "node02", Process: "kubelet", Log: "I0525 20:00:45.752587 17221 scope.go:110] \"RemoveContainer\" hello from sidecar"}}}, + }, + { + `node{node!~"node.*"}`, + "", "", + model.Warnings{fmt.Errorf("warnMultiTargets: use operator '=' for selecting target")}, + model.Log{Name: "node", Lines: []model.LogLine{}}, + }, + { + `node{node="node01",process!="kubelet"}`, + "", "", nil, + model.Log{Name: "node", Lines: []model.LogLine{ + logmodel.NodeLog{Time: "2009-11-10T22:59:00.000000Z", Node: "node01", Process: "containerd", Log: "lerom ipsum"}, + logmodel.NodeLog{Time: "2009-11-10T22:58:00.000000Z", Node: "node01", Process: "dockerd", Log: "hello from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T22:58:00.000000Z", Node: "node01", Process: "dockerd", Log: "lerom from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T22:58:00.000000Z", Node: "node01", Process: "dockerd", Log: "hello from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T22:59:00.000000Z", Node: "node01", Process: "containerd", Log: "hello world"}, + logmodel.NodeLog{Time: "2009-11-10T23:00:00.000000Z", Node: "node01", Process: "containerd", Log: "hello world"}}}, + }, + { + `node{node="node01",process!="kubelet"} |= "hello"`, + "", "", nil, + model.Log{Name: "node", Lines: []model.LogLine{ + logmodel.NodeLog{Time: "2009-11-10T22:58:00.000000Z", Node: "node01", Process: "dockerd", Log: "hello from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T22:58:00.000000Z", Node: "node01", Process: "dockerd", Log: "hello from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T22:59:00.000000Z", Node: "node01", Process: "containerd", Log: "hello world"}, + logmodel.NodeLog{Time: "2009-11-10T23:00:00.000000Z", Node: "node01", Process: "containerd", Log: "hello world"}}}, + }, + { + `node{node="node01",process!="kubelet"} |= "hello" |= "sidecar"`, + "", "", nil, + model.Log{Name: "node", Lines: []model.LogLine{ + logmodel.NodeLog{Time: "2009-11-10T22:58:00.000000Z", Node: "node01", Process: "dockerd", Log: "hello from sidecar"}, + logmodel.NodeLog{Time: "2009-11-10T22:58:00.000000Z", Node: "node01", Process: "dockerd", Log: "hello from sidecar"}}}, + }, + { + `node{node="node01",process!="kubelet"} |= "hello" != "sidecar"`, + "", "", nil, + model.Log{Name: "node", Lines: []model.LogLine{ + logmodel.NodeLog{Time: "2009-11-10T22:59:00.000000Z", Node: "node01", Process: "containerd", Log: "hello world"}, + logmodel.NodeLog{Time: "2009-11-10T23:00:00.000000Z", Node: "node01", Process: "containerd", Log: "hello world"}}}, + }, + { + `node{node="node01",process!="kubelet"} |~ "ll.*" !~ "car.*"`, + "", "", nil, + model.Log{Name: "node", Lines: []model.LogLine{ + logmodel.NodeLog{Time: "2009-11-10T22:59:00.000000Z", Node: "node01", Process: "containerd", Log: "hello world"}, + logmodel.NodeLog{Time: "2009-11-10T23:00:00.000000Z", Node: "node01", Process: "containerd", Log: "hello world"}}}, + }, + { + `node{node="node01",process!="kubelet"} |~ "ll.*" !~ "car.*"`, + "", "", nil, + model.Log{Name: "node", Lines: []model.LogLine{ + logmodel.NodeLog{Time: "2009-11-10T22:59:00.000000Z", Node: "node01", Process: "containerd", Log: "hello world"}, + logmodel.NodeLog{Time: "2009-11-10T23:00:00.000000Z", Node: "node01", Process: "containerd", Log: "hello world"}}}, + }, + { + `node{node="node01",process!="kubelet"} != "sidecar" |~ "d$" `, + "", "", nil, + model.Log{Name: "node", Lines: []model.LogLine{ + logmodel.NodeLog{Time: "2009-11-10T22:59:00.000000Z", Node: "node01", Process: "containerd", Log: "hello world"}, + logmodel.NodeLog{Time: "2009-11-10T23:00:00.000000Z", Node: "node01", Process: "containerd", Log: "hello world"}}}, + }, + { + `node{node="node01",process!="kubelet"} != "sidecar" |~ "*" `, + "", "getMatchFuncSet err: getLineMatchFuncs err: getLineMatchFunc err: error parsing regexp: missing argument to repetition operator: `*`", nil, nil, + }, + } + for i, tc := range testCases { + t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { + var got parser.Value + var ws model.Warnings + var err error + expr, parseErr := parser.ParseExpr(tc.input) + if tc.wantParseError != "" { + assert.EqualError(t, parseErr, tc.wantParseError) + } else { + assert.NoError(t, parseErr) + got, ws, err = evaluator1.Eval(expr) + } + if tc.wantError != "" { + assert.EqualError(t, err, tc.wantError) + } else { + assert.NoError(t, err) + } + assert.Equal(t, tc.want, got) + assert.Equal(t, tc.wantWarnings, ws) + }) + } +} diff --git a/letheql/functions.go b/letheql/functions.go new file mode 100644 index 0000000..d850562 --- /dev/null +++ b/letheql/functions.go @@ -0,0 +1,11 @@ +package letheql + +var AtModifierUnsafeFunctions = map[string]struct{}{ + // Step invariant functions. + "days_in_month": {}, "day_of_month": {}, "day_of_week": {}, "day_of_year": {}, + "hour": {}, "minute": {}, "month": {}, "year": {}, + "predict_linear": {}, "time": {}, + // Uses timestamp of the argument for the result, + // hence unsafe to use with @ modifier. + "timestamp": {}, +} diff --git a/letheql/init_test.go b/letheql/init_test.go new file mode 100644 index 0000000..aa6d904 --- /dev/null +++ b/letheql/init_test.go @@ -0,0 +1,47 @@ +package letheql + +import ( + "context" + "time" + + "github.com/kuoss/lethe/clock" + "github.com/kuoss/lethe/config" + _ "github.com/kuoss/lethe/storage/driver/filesystem" + "github.com/kuoss/lethe/storage/fileservice" + "github.com/kuoss/lethe/storage/logservice" + "github.com/kuoss/lethe/util/testutil" +) + +var ( + engine1 *Engine + evaluator1 *evaluator +) + +func init() { + testutil.ChdirRoot() + testutil.ResetLogData() + clock.SetPlaygroundMode(true) + + cfg, err := config.New("test") + if err != nil { + panic(err) + } + cfg.SetLogDataPath("tmp/init") + fileService, err := fileservice.New(cfg) + if err != nil { + panic(err) + } + logService := logservice.New(fileService) + engine1 = NewEngine(logService) + + now := clock.Now() + evaluator1 = &evaluator{ + logService: logService, + ctx: context.TODO(), + start: now.Add(-4 * time.Hour), + end: now, + startTimestamp: 0, + endTimestamp: 0, + interval: 0, + } +} diff --git a/letheql/model/error.go b/letheql/model/error.go new file mode 100644 index 0000000..3cea7a5 --- /dev/null +++ b/letheql/model/error.go @@ -0,0 +1,38 @@ +package model + +import "fmt" + +type Warnings []error + +type ErrWithWarnings struct { + Err error + Warnings Warnings +} + +type ( + // ErrQueryTimeout is returned if a query timed out during processing. + ErrQueryTimeout string + // ErrQueryCanceled is returned if a query was canceled during processing. + ErrQueryCanceled string + // ErrTooManySamples is returned if a query would load more than the maximum allowed samples into memory. + ErrTooManySamples string + // ErrStorage is returned if an error was encountered in the storage layer + // during query handling. + ErrStorage struct{ Err error } +) + +func (e ErrQueryTimeout) Error() string { + return fmt.Sprintf("query timed out in %s", string(e)) +} + +func (e ErrQueryCanceled) Error() string { + return fmt.Sprintf("query was canceled in %s", string(e)) +} + +func (e ErrTooManySamples) Error() string { + return fmt.Sprintf("query processing would load too many samples into memory in %s", string(e)) +} + +func (e ErrStorage) Error() string { + return e.Err.Error() +} diff --git a/letheql/model/log.go b/letheql/model/log.go new file mode 100644 index 0000000..69b6bb6 --- /dev/null +++ b/letheql/model/log.go @@ -0,0 +1,27 @@ +package model + +import ( + "strings" + + "github.com/kuoss/lethe/letheql/parser" +) + +const ValueTypeLog parser.ValueType = "log" + +type Log struct { + Name string + Lines []LogLine +} + +func (l Log) Type() parser.ValueType { return ValueTypeLog } +func (l Log) String() string { + entries := make([]string, len(l.Lines)) + for i, line := range l.Lines { + entries[i] = line.String() + } + return strings.Join(entries, "\n") +} + +type LogLine interface { + String() string +} diff --git a/letheql/model/selector.go b/letheql/model/selector.go new file mode 100644 index 0000000..ae785b7 --- /dev/null +++ b/letheql/model/selector.go @@ -0,0 +1,38 @@ +package model + +import ( + "fmt" + "time" + + "github.com/kuoss/lethe/letheql/parser" + "github.com/prometheus/prometheus/model/labels" +) + +type TimeRange struct { + Start time.Time + End time.Time +} + +type LineMatcher struct { + Op parser.ItemType + Value string +} + +// LogSelector represents a Log selection. +type LogSelector struct { + Name string + LabelMatchers []*labels.Matcher + LineMatchers []*LineMatcher + TimeRange TimeRange +} + +const ValueTypeLogSelector parser.ValueType = "logselector" + +// implements parser.Expr +func (ls LogSelector) PromQLExpr() {} +func (ls LogSelector) Pretty(int) string { return ls.String() } +func (ls LogSelector) PositionRange() parser.PositionRange { return parser.PositionRange{} } + +// implements parser.Value +func (ls LogSelector) Type() parser.ValueType { return ValueTypeLogSelector } +func (ls LogSelector) String() string { return fmt.Sprintf("%#v", ls) } diff --git a/letheql/parser/lex.go b/letheql/parser/lex.go index bf2e19d..dbd3f21 100644 --- a/letheql/parser/lex.go +++ b/letheql/parser/lex.go @@ -83,7 +83,7 @@ func (i ItemType) IsComparisonOperator() bool { func (i ItemType) IsFilterOperator() bool { switch i { - case NEQ_REGEX, PIPE_EQL, PIPE_REGEX: + case NEQ, NEQ_REGEX, PIPE_EQL, PIPE_REGEX: return true default: return false @@ -158,20 +158,22 @@ var ItemTypeStr = map[ItemType]string{ TIMES: "x", SPACE: "", - SUB: "-", - ADD: "+", - MUL: "*", - MOD: "%", - DIV: "/", - EQLC: "==", - NEQ: "!=", - LTE: "<=", - LSS: "<", - GTE: ">=", - GTR: ">", - EQL_REGEX: "=~", - NEQ_REGEX: "!~", - POW: "^", + SUB: "-", + ADD: "+", + MUL: "*", + MOD: "%", + DIV: "/", + EQLC: "==", + NEQ: "!=", + LTE: "<=", + LSS: "<", + GTE: ">=", + GTR: ">", + EQL_REGEX: "=~", + NEQ_REGEX: "!~", + PIPE_EQL: "|=", + PIPE_REGEX: "|~", + POW: "^", } func init() { diff --git a/letheql/parser/parser_inner_test.go b/letheql/parser/parser_inner_test.go index 88eb4b2..490af29 100644 --- a/letheql/parser/parser_inner_test.go +++ b/letheql/parser/parser_inner_test.go @@ -1,9 +1,9 @@ package parser import ( + "fmt" "testing" - "github.com/kuoss/lethe/testutil" commonModel "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/stretchr/testify/assert" @@ -11,13 +11,13 @@ import ( func TestParser(t *testing.T) { - testCases := map[string]struct { + testCases := []struct { input string wantError string want Expr }{ // BinaryExpr - single FilterOperator - testutil.TC(): { + { `pod|="hello"`, "", &BinaryExpr{ @@ -30,7 +30,7 @@ func TestParser(t *testing.T) { PosRange: PositionRange{Start: 5, End: 12}}, ReturnBool: false}, }, - testutil.TC(): { + { `pod|~"hel.*"`, "", &BinaryExpr{ @@ -43,7 +43,7 @@ func TestParser(t *testing.T) { PosRange: PositionRange{Start: 5, End: 12}}, ReturnBool: false}, }, - testutil.TC(): { + { `pod!="hello"`, "", &BinaryExpr{ @@ -56,7 +56,7 @@ func TestParser(t *testing.T) { PosRange: PositionRange{Start: 5, End: 12}}, ReturnBool: false}, }, - testutil.TC(): { + { `pod!~"hel.*"`, "", &BinaryExpr{ @@ -69,7 +69,7 @@ func TestParser(t *testing.T) { PosRange: PositionRange{Start: 5, End: 12}}, ReturnBool: false}, }, - testutil.TC(): { + { `pod |= "hello"`, "", &BinaryExpr{ @@ -82,7 +82,7 @@ func TestParser(t *testing.T) { PosRange: PositionRange{Start: 7, End: 14}}, ReturnBool: false}, }, - testutil.TC(): { + { `pod{} |= "hello"`, "", &BinaryExpr{ @@ -97,7 +97,7 @@ func TestParser(t *testing.T) { PosRange: PositionRange{Start: 9, End: 16}}, ReturnBool: false}, }, - testutil.TC(): { + { `pod{} |~ "hello.*"`, "", &BinaryExpr{ @@ -110,7 +110,7 @@ func TestParser(t *testing.T) { PosRange: PositionRange{Start: 9, End: 18}}, ReturnBool: false}, }, - testutil.TC(): { + { `pod{namespace="namespace01"} |= "hello"`, "", &BinaryExpr{ @@ -125,7 +125,7 @@ func TestParser(t *testing.T) { PosRange: PositionRange{Start: 32, End: 39}}, ReturnBool: false}, }, - testutil.TC(): { + { `pod{namespace="namespace01"} |~ "hel.*"`, "", &BinaryExpr{ @@ -141,7 +141,7 @@ func TestParser(t *testing.T) { ReturnBool: false}, }, // BinaryExpr - multi FilterOperator (nested) - testutil.TC(): { + { `pod|="hello"!="world"`, "", &BinaryExpr{ @@ -160,7 +160,7 @@ func TestParser(t *testing.T) { PosRange: PositionRange{Start: 14, End: 21}}, ReturnBool: false}, }, - testutil.TC(): { + { `pod|~"hel.*"|="world"`, "", &BinaryExpr{ @@ -179,7 +179,7 @@ func TestParser(t *testing.T) { PosRange: PositionRange{Start: 14, End: 21}}, ReturnBool: false}, }, - testutil.TC(): { + { `pod|~"hel.*"!~"wor.*"`, "", &BinaryExpr{ @@ -200,7 +200,7 @@ func TestParser(t *testing.T) { }, // NumberLiteral - testutil.TC(): { + { `42`, "", &NumberLiteral{ @@ -208,7 +208,7 @@ func TestParser(t *testing.T) { PosRange: PositionRange{Start: 0, End: 2}}, }, - testutil.TC(): { + { `"hello"`, "", &StringLiteral{ @@ -217,7 +217,7 @@ func TestParser(t *testing.T) { }, // VectorSelector - testutil.TC(): { + { `pod`, "", &VectorSelector{ @@ -226,7 +226,7 @@ func TestParser(t *testing.T) { MustLabelMatcher(labels.MatchEqual, commonModel.MetricNameLabel, "pod")}, PosRange: PositionRange{Start: 0, End: 3}}, }, - testutil.TC(): { + { `pod{}`, "", &VectorSelector{ @@ -235,7 +235,7 @@ func TestParser(t *testing.T) { MustLabelMatcher(labels.MatchEqual, commonModel.MetricNameLabel, "pod")}, PosRange: PositionRange{Start: 0, End: 5}}, }, - testutil.TC(): { + { `pod{namespace="namespace01"}`, "", &VectorSelector{ @@ -245,7 +245,7 @@ func TestParser(t *testing.T) { MustLabelMatcher(labels.MatchEqual, commonModel.MetricNameLabel, "pod")}, PosRange: PositionRange{Start: 0, End: 28}}, }, - testutil.TC(): { + { `pod{namespace="not-exists"}`, "", &VectorSelector{ @@ -255,7 +255,7 @@ func TestParser(t *testing.T) { MustLabelMatcher(labels.MatchEqual, commonModel.MetricNameLabel, "pod")}, PosRange: PositionRange{Start: 0, End: 27}}, }, - testutil.TC(): { + { `pod{namespace="namespace01",pod="nginx"}`, "", &VectorSelector{ @@ -266,7 +266,7 @@ func TestParser(t *testing.T) { MustLabelMatcher(labels.MatchEqual, commonModel.MetricNameLabel, "pod")}, PosRange: PositionRange{Start: 0, End: 40}}, }, - testutil.TC(): { + { `pod{namespace="namespace01",pod="nginx-*"}`, "", &VectorSelector{ @@ -277,7 +277,7 @@ func TestParser(t *testing.T) { MustLabelMatcher(labels.MatchEqual, commonModel.MetricNameLabel, "pod")}, PosRange: PositionRange{Start: 0, End: 42}}, }, - testutil.TC(): { + { `pod{namespace="namespace01",container="nginx"}`, "", &VectorSelector{ @@ -288,7 +288,7 @@ func TestParser(t *testing.T) { MustLabelMatcher(labels.MatchEqual, commonModel.MetricNameLabel, "pod")}, PosRange: PositionRange{Start: 0, End: 46}}, }, - testutil.TC(): { + { `pod{namespace="namespace*",container="nginx"}`, "", &VectorSelector{ @@ -301,7 +301,7 @@ func TestParser(t *testing.T) { }, // MatrixSelector - testutil.TC(): { + { `pod{namespace="namespace01",pod="nginx-*"}[3m]`, "", &MatrixSelector{ @@ -316,7 +316,7 @@ func TestParser(t *testing.T) { }, // Call - testutil.TC(): { + { `count_over_time(pod{namespace="namespace01",pod="nginx-*"}[3m])`, "", &Call{ @@ -336,7 +336,7 @@ func TestParser(t *testing.T) { Range: 180000000000, EndPos: 62}}, PosRange: PositionRange{Start: 0, End: 63}}, }, - testutil.TC(): { + { `count_over_time(pod{}[3m])`, "", &Call{ @@ -355,7 +355,7 @@ func TestParser(t *testing.T) { }, // BinaryExpr - testutil.TC(): { + { `count_over_time(pod{}[3m]) > 10`, "", &BinaryExpr{ @@ -377,7 +377,7 @@ func TestParser(t *testing.T) { Val: 10, PosRange: PositionRange{Start: 29, End: 31}}}, }, - testutil.TC(): { + { `count_over_time(pod{}[3m]) < 10`, "", &BinaryExpr{ @@ -399,7 +399,7 @@ func TestParser(t *testing.T) { Val: 10, PosRange: PositionRange{Start: 29, End: 31}}}, }, - testutil.TC(): { + { `count_over_time(pod{}[3m]) == 21`, "", &BinaryExpr{ @@ -425,7 +425,7 @@ func TestParser(t *testing.T) { }, // ######## ERROR - testutil.TC(): { + { `pod{namespace="namespace01"} "`, "1:30: parse error: unterminated quoted string", &VectorSelector{ @@ -436,7 +436,7 @@ func TestParser(t *testing.T) { PosRange: PositionRange{Start: 0, End: 28}, }, }, - testutil.TC(): { + { `pod{namespace="namespace01"} hello`, "1:30: parse error: unexpected identifier \"hello\"", &VectorSelector{ @@ -447,7 +447,7 @@ func TestParser(t *testing.T) { PosRange: PositionRange{Start: 0, End: 28}, }, }, - testutil.TC(): { + { `pod{namespace="namespace01"} "hello`, "1:30: parse error: unterminated quoted string", &VectorSelector{ @@ -458,7 +458,7 @@ func TestParser(t *testing.T) { PosRange: PositionRange{Start: 0, End: 28}, }, }, - testutil.TC(): { + { `pod{namespace="namespace01"} "hello"`, "1:30: parse error: unexpected string \"\\\"hello\\\"\"", &VectorSelector{ @@ -470,8 +470,8 @@ func TestParser(t *testing.T) { }, }, } - for name, tc := range testCases { - t.Run(name+" "+tc.input, func(t *testing.T) { + for i, tc := range testCases { + t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { expr, err := ParseExpr(tc.input) if tc.wantError == "" { assert.NoError(t, err) diff --git a/letheql/parser/parser_outer_test.go b/letheql/parser/parser_outer_test.go index fd9f2df..9316cc7 100644 --- a/letheql/parser/parser_outer_test.go +++ b/letheql/parser/parser_outer_test.go @@ -1,10 +1,10 @@ package parser_test import ( + "fmt" "testing" "github.com/kuoss/lethe/letheql/parser" - "github.com/kuoss/lethe/testutil" commonModel "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/stretchr/testify/assert" @@ -12,13 +12,13 @@ import ( func TestParser(t *testing.T) { - testCases := map[string]struct { + testCases := []struct { input string wantError string want parser.Expr }{ // BinaryExpr - single FilterOperator - testutil.TC(): { + { `pod|="hello"`, "", &parser.BinaryExpr{ @@ -31,7 +31,7 @@ func TestParser(t *testing.T) { PosRange: parser.PositionRange{Start: 5, End: 12}}, ReturnBool: false}, }, - testutil.TC(): { + { `pod|~"hel.*"`, "", &parser.BinaryExpr{ @@ -44,7 +44,7 @@ func TestParser(t *testing.T) { PosRange: parser.PositionRange{Start: 5, End: 12}}, ReturnBool: false}, }, - testutil.TC(): { + { `pod!="hello"`, "", &parser.BinaryExpr{ @@ -57,7 +57,7 @@ func TestParser(t *testing.T) { PosRange: parser.PositionRange{Start: 5, End: 12}}, ReturnBool: false}, }, - testutil.TC(): { + { `pod!~"hel.*"`, "", &parser.BinaryExpr{ @@ -70,7 +70,7 @@ func TestParser(t *testing.T) { PosRange: parser.PositionRange{Start: 5, End: 12}}, ReturnBool: false}, }, - testutil.TC(): { + { `pod |= "hello"`, "", &parser.BinaryExpr{ @@ -83,7 +83,7 @@ func TestParser(t *testing.T) { PosRange: parser.PositionRange{Start: 7, End: 14}}, ReturnBool: false}, }, - testutil.TC(): { + { `pod{} |= "hello"`, "", &parser.BinaryExpr{ @@ -98,7 +98,7 @@ func TestParser(t *testing.T) { PosRange: parser.PositionRange{Start: 9, End: 16}}, ReturnBool: false}, }, - testutil.TC(): { + { `pod{} |~ "hello.*"`, "", &parser.BinaryExpr{ @@ -111,7 +111,7 @@ func TestParser(t *testing.T) { PosRange: parser.PositionRange{Start: 9, End: 18}}, ReturnBool: false}, }, - testutil.TC(): { + { `pod{namespace="namespace01"} |= "hello"`, "", &parser.BinaryExpr{ @@ -126,7 +126,7 @@ func TestParser(t *testing.T) { PosRange: parser.PositionRange{Start: 32, End: 39}}, ReturnBool: false}, }, - testutil.TC(): { + { `pod{namespace="namespace01"} |~ "hel.*"`, "", &parser.BinaryExpr{ @@ -142,7 +142,7 @@ func TestParser(t *testing.T) { ReturnBool: false}, }, // BinaryExpr - multi FilterOperator (nested) - testutil.TC(): { + { `pod|="hello"!="world"`, "", &parser.BinaryExpr{ @@ -161,7 +161,7 @@ func TestParser(t *testing.T) { PosRange: parser.PositionRange{Start: 14, End: 21}}, ReturnBool: false}, }, - testutil.TC(): { + { `pod|~"hel.*"|="world"`, "", &parser.BinaryExpr{ @@ -180,7 +180,7 @@ func TestParser(t *testing.T) { PosRange: parser.PositionRange{Start: 14, End: 21}}, ReturnBool: false}, }, - testutil.TC(): { + { `pod|~"hel.*"!~"wor.*"`, "", &parser.BinaryExpr{ @@ -201,7 +201,7 @@ func TestParser(t *testing.T) { }, // NumberLiteral - testutil.TC(): { + { `42`, "", &parser.NumberLiteral{ @@ -209,7 +209,7 @@ func TestParser(t *testing.T) { PosRange: parser.PositionRange{Start: 0, End: 2}}, }, - testutil.TC(): { + { `"hello"`, "", &parser.StringLiteral{ @@ -218,7 +218,7 @@ func TestParser(t *testing.T) { }, // VectorSelector - testutil.TC(): { + { `pod`, "", &parser.VectorSelector{ @@ -227,7 +227,7 @@ func TestParser(t *testing.T) { parser.MustLabelMatcher(labels.MatchEqual, commonModel.MetricNameLabel, "pod")}, PosRange: parser.PositionRange{Start: 0, End: 3}}, }, - testutil.TC(): { + { `pod{}`, "", &parser.VectorSelector{ @@ -236,7 +236,7 @@ func TestParser(t *testing.T) { parser.MustLabelMatcher(labels.MatchEqual, commonModel.MetricNameLabel, "pod")}, PosRange: parser.PositionRange{Start: 0, End: 5}}, }, - testutil.TC(): { + { `pod{namespace="namespace01"}`, "", &parser.VectorSelector{ @@ -246,7 +246,7 @@ func TestParser(t *testing.T) { parser.MustLabelMatcher(labels.MatchEqual, commonModel.MetricNameLabel, "pod")}, PosRange: parser.PositionRange{Start: 0, End: 28}}, }, - testutil.TC(): { + { `pod{namespace="not-exists"}`, "", &parser.VectorSelector{ @@ -256,7 +256,7 @@ func TestParser(t *testing.T) { parser.MustLabelMatcher(labels.MatchEqual, commonModel.MetricNameLabel, "pod")}, PosRange: parser.PositionRange{Start: 0, End: 27}}, }, - testutil.TC(): { + { `pod{namespace="namespace01",pod="nginx"}`, "", &parser.VectorSelector{ @@ -267,7 +267,7 @@ func TestParser(t *testing.T) { parser.MustLabelMatcher(labels.MatchEqual, commonModel.MetricNameLabel, "pod")}, PosRange: parser.PositionRange{Start: 0, End: 40}}, }, - testutil.TC(): { + { `pod{namespace="namespace01",pod="nginx-*"}`, "", &parser.VectorSelector{ @@ -278,7 +278,7 @@ func TestParser(t *testing.T) { parser.MustLabelMatcher(labels.MatchEqual, commonModel.MetricNameLabel, "pod")}, PosRange: parser.PositionRange{Start: 0, End: 42}}, }, - testutil.TC(): { + { `pod{namespace="namespace01",container="nginx"}`, "", &parser.VectorSelector{ @@ -289,7 +289,7 @@ func TestParser(t *testing.T) { parser.MustLabelMatcher(labels.MatchEqual, commonModel.MetricNameLabel, "pod")}, PosRange: parser.PositionRange{Start: 0, End: 46}}, }, - testutil.TC(): { + { `pod{namespace="namespace*",container="nginx"}`, "", &parser.VectorSelector{ @@ -302,7 +302,7 @@ func TestParser(t *testing.T) { }, // MatrixSelector - testutil.TC(): { + { `pod{namespace="namespace01",pod="nginx-*"}[3m]`, "", &parser.MatrixSelector{ @@ -317,7 +317,7 @@ func TestParser(t *testing.T) { }, // Call - testutil.TC(): { + { `count_over_time(pod{namespace="namespace01",pod="nginx-*"}[3m])`, "", &parser.Call{ @@ -337,7 +337,7 @@ func TestParser(t *testing.T) { Range: 180000000000, EndPos: 62}}, PosRange: parser.PositionRange{Start: 0, End: 63}}, }, - testutil.TC(): { + { `count_over_time(pod{}[3m])`, "", &parser.Call{ @@ -356,7 +356,7 @@ func TestParser(t *testing.T) { }, // BinaryExpr - testutil.TC(): { + { `count_over_time(pod{}[3m]) > 10`, "", &parser.BinaryExpr{ @@ -378,7 +378,7 @@ func TestParser(t *testing.T) { Val: 10, PosRange: parser.PositionRange{Start: 29, End: 31}}}, }, - testutil.TC(): { + { `count_over_time(pod{}[3m]) < 10`, "", &parser.BinaryExpr{ @@ -400,7 +400,7 @@ func TestParser(t *testing.T) { Val: 10, PosRange: parser.PositionRange{Start: 29, End: 31}}}, }, - testutil.TC(): { + { `count_over_time(pod{}[3m]) == 21`, "", &parser.BinaryExpr{ @@ -426,7 +426,7 @@ func TestParser(t *testing.T) { }, // ######## ERROR - testutil.TC(): { + { `pod{namespace="namespace01"} "`, "1:30: parse error: unterminated quoted string", &parser.VectorSelector{ @@ -437,7 +437,7 @@ func TestParser(t *testing.T) { PosRange: parser.PositionRange{Start: 0, End: 28}, }, }, - testutil.TC(): { + { `pod{namespace="namespace01"} hello`, "1:30: parse error: unexpected identifier \"hello\"", &parser.VectorSelector{ @@ -448,7 +448,7 @@ func TestParser(t *testing.T) { PosRange: parser.PositionRange{Start: 0, End: 28}, }, }, - testutil.TC(): { + { `pod{namespace="namespace01"} "hello`, "1:30: parse error: unterminated quoted string", &parser.VectorSelector{ @@ -459,7 +459,7 @@ func TestParser(t *testing.T) { PosRange: parser.PositionRange{Start: 0, End: 28}, }, }, - testutil.TC(): { + { `pod{namespace="namespace01"} "hello"`, "1:30: parse error: unexpected string \"\\\"hello\\\"\"", &parser.VectorSelector{ @@ -471,8 +471,8 @@ func TestParser(t *testing.T) { }, }, } - for name, tc := range testCases { - t.Run(name+" "+tc.input, func(t *testing.T) { + for i, tc := range testCases { + t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { expr, err := parser.ParseExpr(tc.input) if tc.wantError == "" { assert.NoError(t, err) diff --git a/letheql/prom_parse_tset.go b/letheql/prom_parse_tset.go deleted file mode 100644 index bb1aa19..0000000 --- a/letheql/prom_parse_tset.go +++ /dev/null @@ -1,63 +0,0 @@ -package letheql - -import ( - "fmt" - "testing" - - "github.com/kuoss/lethe/logs/logStore" - "github.com/prometheus/prometheus/promql/parser" - "github.com/stretchr/testify/assert" -) - -func TestPromqlParse(t *testing.T) { - - tests := map[string]struct { - query string - want QueryData - }{ - "01": {query: `pod{namespace="namespace01"}`, - want: QueryData{ResultType: "logs", Logs: []logStore.LogLine{logStore.PodLog{Name: "", Time: "2009-11-10T21:00:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T21:01:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T21:02:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:56:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:56:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:56:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:57:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:57:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:57:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " lerom from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:59:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " lerom ipsum"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:59:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}}}}, - "02": {query: `pod{namespace="not-exists"}`, - want: QueryData{ResultType: ValueTypeLogs, Logs: []logStore.LogLine{}}}, - "03": {query: `pod{namespace="namespace01",pod="nginx"}`, - want: QueryData{ResultType: "logs", Logs: []logStore.LogLine{}, Scalar: 0}}, - "04": {query: `pod{namespace="namespace01",pod="nginx-*"}`, - want: QueryData{ResultType: "logs", Logs: []logStore.LogLine{logStore.PodLog{Name: "", Time: "2009-11-10T21:00:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T21:01:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T21:02:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " lerom from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:59:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " lerom ipsum"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:59:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}}}}, - "05": {query: `pod{namespace="namespace01",container="nginx"}`, - want: QueryData{ResultType: "logs", Logs: []logStore.LogLine{logStore.PodLog{Name: "", Time: "2009-11-10T21:00:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T21:01:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T21:02:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:59:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " lerom ipsum"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:59:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}}}}, - "06": {query: `pod{namespace="namespace*",container="nginx"}`, - want: QueryData{ResultType: "logs", Logs: []logStore.LogLine{logStore.PodLog{Name: "", Time: "2009-11-10T21:00:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T21:01:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T21:02:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " lerom ipsum"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:59:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " lerom ipsum"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:59:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}}}}, - "07": {query: `pod{namespace="namespace01",pod="nginx-*"}[3m]`, - want: QueryData{ResultType: "logs", Logs: []logStore.LogLine{logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " lerom from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:59:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " lerom ipsum"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:59:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}}}}, - "08": {query: `pod`, - want: QueryData{ResultType: "logs", Logs: []logStore.LogLine{logStore.PodLog{Name: "", Time: "2009-11-10T21:00:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T21:01:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T21:02:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:56:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:56:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:56:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:57:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:57:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:57:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " lerom from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " lerom ipsum"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " lerom from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:59:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " lerom ipsum"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:59:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}}}}, - "09": {query: `pod{}`, - want: QueryData{ResultType: "logs", Logs: []logStore.LogLine{logStore.PodLog{Name: "", Time: "2009-11-10T21:00:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T21:01:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T21:02:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:56:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:56:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:56:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:57:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:57:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:57:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " lerom from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " lerom ipsum"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " lerom from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:59:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " lerom ipsum"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:59:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}}}}, - "10": {query: `count_over_time(pod{namespace="namespace01",pod="nginx-*"}[3m])`, - want: QueryData{ResultType: "scalar", Logs: nil, Scalar: 5}}, - "11": {query: `1`, - want: QueryData{ResultType: ValueTypeScalar, Logs: nil, Scalar: 1}}, - "12": {query: `count_over_time(pod{}[3m])`, - want: QueryData{ResultType: "scalar", Logs: nil, Scalar: 20}}, - "13": {query: `count_over_time(pod{}[3m]) > 10`, - want: QueryData{ResultType: ValueTypeScalar, Logs: nil, Scalar: 1}}, - "14": {query: `count_over_time(pod{}[3m]) < 10`, - want: QueryData{ResultType: ValueTypeScalar, Logs: nil, Scalar: 0}}, - "15": {query: `count_over_time(pod{}[3m]) == 21`, - want: QueryData{ResultType: ValueTypeScalar, Logs: nil, Scalar: 0}}, - } - for name, tt := range tests { - t.Run(name+" "+tt.query, func(subt *testing.T) { - expr, err := parser.ParseExpr(tt.query) - if err != nil { - return - } - fmt.Printf("%#v", expr) - got, err := ProcQuery(tt.query, TimeRange{}) - if err != nil { - subt.Fatalf("query: %s error: %s", name, tt.query) - } - assert.Equal(subt, tt.want, got) - }) - } -} diff --git a/letheql/query.go b/letheql/query.go index 65cc644..86ff2a1 100644 --- a/letheql/query.go +++ b/letheql/query.go @@ -1,393 +1,45 @@ package letheql import ( - "errors" - "fmt" - "log" - "reflect" - "time" + "context" - "github.com/kuoss/lethe/logs/filter" - "github.com/kuoss/lethe/logs/logStore" - - "github.com/VictoriaMetrics/metricsql" - "github.com/kuoss/lethe/clock" -) - -type LeafType string - -const ( - LeafTypeAuditLogRequest LeafType = "AuditLogRequest" - LeafTypeEventLogRequest LeafType = "EventLogRequest" - LeafTypeNodeLogRequest LeafType = "NodeLogRequest" - LeafTypePodLogRequest LeafType = "PodLogRequest" -) - -const ( - LeafTypeScalarResult LeafType = "ScalarResult" - LeafTypeLogsResult LeafType = "LogsResult" + "github.com/kuoss/lethe/letheql/parser" + "github.com/prometheus/prometheus/storage" ) -type Leaf struct { - LeafType LeafType - LogRequest LogRequest - ScalarResult float64 - LogsResult []logStore.LogLine - Function string - TimeRange TimeRange - Keyword string -} - -type LogRequest struct { - // AuditSearchParams logs.AuditSearchParams - EventSearchParams logStore.EventSearchParams - NodeSearchParams logStore.NodeSearchParams - PodSearchParams logStore.PodSearchParams - DurationSeconds int - Function string -} - -type TimeRange struct { - Start time.Time - End time.Time -} - type Query interface { - Exec() []string + Cancel() + Close() + Exec(ctx context.Context) *Result + Statement() parser.Statement + String() string } type query struct { - q string - filter filter.Filter - keyword string - engine *Engine -} - -func (q *query) String() string { - return q.q + q string + queryable storage.Queryable + stmt parser.Statement + cancel func() + ng *Engine } -func (q *query) Exec() []string { - - // q.engine.exec() - // to do - return nil -} - -func ProcQuery(queryString string, timeRange TimeRange) (QueryData, error) { - - // log.Printf("ProcQuery: queryString=%s, timeRange=%s\n", queryString, timeRange) - engine := &Engine{} - query, err := engine.newQuery(queryString) - if err != nil { - return QueryData{}, nil +func (q *query) Cancel() { + if q.cancel != nil { + q.cancel() } - - expr, err := metricsql.Parse(query.q) - if err != nil { - log.Printf("parse query failed. err: %v\n", err) - return QueryData{}, err - } - - leaf, err := resolveExpr(expr, Leaf{TimeRange: timeRange, Keyword: query.keyword}) - if err != nil { - log.Printf("resolve Expr failed. err: %v\n", err) - return QueryData{}, err - } - - leaf, err = resolveLeaf(leaf, query.filter) - if err != nil { - log.Printf("resolve leaf failed. err: %v\n", err) - return QueryData{}, err - } - - queryData, err := getQueryDataFromLeaf(leaf) - - if err != nil { - // log.Println("ProcQuery: getQueryDataFromLeaf: err=", err) - return QueryData{}, err - } - return queryData, nil -} - -func getQueryDataFromLeaf(leaf Leaf) (QueryData, error) { - var queryData QueryData - switch leaf.LeafType { - case LeafTypeLogsResult: - queryData.ResultType = ValueTypeLogs - queryData.Logs = leaf.LogsResult - case LeafTypeScalarResult: - queryData.ResultType = ValueTypeScalar - queryData.Scalar = leaf.ScalarResult - default: - return queryData, errors.New("log request not resolved") - } - return queryData, nil } -func resolveLeaf(leaf Leaf, filter filter.Filter) (Leaf, error) { - - if leaf.LeafType != LeafTypeAuditLogRequest && - leaf.LeafType != LeafTypeEventLogRequest && - leaf.LeafType != LeafTypeNodeLogRequest && - leaf.LeafType != LeafTypePodLogRequest { - return leaf, nil - } - req := leaf.LogRequest - // DurationSeconds, TimeRange{} => DurationSeconds, EndTime - now := clock.GetNow() - if leaf.TimeRange.End.IsZero() { - leaf.TimeRange.End = now - } - if leaf.TimeRange.Start.IsZero() { - leaf.TimeRange.Start = leaf.TimeRange.End.Add(time.Duration(-40*24) * time.Hour) - } - if leaf.TimeRange.End == leaf.TimeRange.Start { - return Leaf{}, errors.New("end time and start time are the same") - } - if leaf.TimeRange.End.Before(leaf.TimeRange.Start) { - return Leaf{}, errors.New("end time is earlier than start time") - } - durationSecondsFromTimeRange := int(leaf.TimeRange.End.Sub(leaf.TimeRange.Start) / 1000 / 1000 / 1000) - if durationSecondsFromTimeRange > 40*86400 { - durationSecondsFromTimeRange = 40 * 86400 - } - durationSeconds := req.DurationSeconds - // fmt.Println("leaf.TimeRange.Start=", leaf.TimeRange.Start, "leaf.TimeRange.End=", leaf.TimeRange.End) - // fmt.Println("durationSeconds=", durationSeconds, "durationSecondsFromTimeRange=", durationSecondsFromTimeRange) - if durationSeconds == 0 || (durationSecondsFromTimeRange != 0 && durationSecondsFromTimeRange < durationSeconds) { - durationSeconds = durationSecondsFromTimeRange - } - logSearch := logStore.LogSearch{ - DurationSeconds: durationSeconds, - EndTime: leaf.TimeRange.End, - Keyword: leaf.Keyword, - Filter: filter, - } - switch leaf.LeafType { +func (q *query) Close() {} - case LeafTypeNodeLogRequest: - logSearch.LogType = logStore.NodeLog{Name: logStore.NODE_TYPE} - logSearch.TargetPattern = req.NodeSearchParams.Node - logSearch.NodeSearchParams = req.NodeSearchParams - - case LeafTypePodLogRequest: - logSearch.LogType = logStore.PodLog{Name: logStore.POD_TYPE} - logSearch.TargetPattern = req.PodSearchParams.Namespace - logSearch.PodSearchParams = req.PodSearchParams - } - - ls := logStore.New() - switch req.Function { - case "": - result, err := ls.GetLogs(logSearch) - if err != nil { - return Leaf{}, err - } - leaf.LogsResult = result.Logs - leaf.LeafType = LeafTypeLogsResult - case "count_over_time": - logSearch.IsCounting = true - result, err := ls.GetLogs(logSearch) - if err != nil { - return Leaf{}, err - } - leaf.ScalarResult = float64(result.Count) - leaf.LeafType = LeafTypeScalarResult - default: - return leaf, fmt.Errorf("not supported function: %s", req.Function) - } - return leaf, nil -} - -func resolveExpr(expr metricsql.Expr, leaf Leaf) (Leaf, error) { - // fmt.Printf("resolveExpr: %#T %#v\n", expr, leaf.TimeRange) - switch v := expr.(type) { - case *metricsql.BinaryOpExpr: - return procBinaryOpExpr(v, leaf) - case *metricsql.FuncExpr: - return procFuncExpr(v, leaf) - case *metricsql.MetricExpr: - return procMetricExpr(v, leaf) - case *metricsql.NumberExpr: - return procNumberExpr(v, leaf) - case *metricsql.RollupExpr: - return procRollupExpr(v, leaf) - } - return leaf, nil -} - -func procFuncExpr(expr *metricsql.FuncExpr, leaf Leaf) (Leaf, error) { - leaf.Function = expr.Name - newLeaf := Leaf{} - for _, arg := range expr.Args { - var err error - newLeaf, err = resolveExpr(arg, leaf) - if err != nil { - return Leaf{}, err - } - // fmt.Printf("procFuncExpr: newLeaf=%#v\n", newLeaf) - } - return newLeaf, nil -} - -func procBinaryOpExpr(expr *metricsql.BinaryOpExpr, leaf Leaf) (Leaf, error) { - // TODO: should be vector not scalar - var leftLeaf, rightLeaf Leaf - var err error - leftLeaf, err = resolveExpr(expr.Left, leaf) - if err != nil { - return Leaf{}, err - } - rightLeaf, err = resolveExpr(expr.Right, leaf) - if err != nil { - return Leaf{}, err - } - //todo filter parameter ? - leftLeaf, err = resolveLeaf(leftLeaf, filter.TempExportFilter{}) - if err != nil { - return Leaf{}, err - } - //todo filter parameter ? - rightLeaf, err = resolveLeaf(rightLeaf, filter.TempExportFilter{}) - if err != nil { - return Leaf{}, err - } - if leftLeaf.LeafType != LeafTypeScalarResult || rightLeaf.LeafType != LeafTypeScalarResult { - return Leaf{}, errors.New("not allowed leafType for operator") - } - leaf.LeafType = LeafTypeScalarResult - leaf.ScalarResult = 0 - switch expr.Op { - case ">": - if leftLeaf.ScalarResult > rightLeaf.ScalarResult { - leaf.ScalarResult = 1 - } - case "<": - if leftLeaf.ScalarResult < rightLeaf.ScalarResult { - leaf.ScalarResult = 1 - } - case "==": - if leftLeaf.ScalarResult == rightLeaf.ScalarResult { - leaf.ScalarResult = 1 - } - case "!=": - if leftLeaf.ScalarResult != rightLeaf.ScalarResult { - leaf.ScalarResult = 1 - } - default: - return leaf, fmt.Errorf("not supported operator: %s", expr.Op) - } - return leaf, nil -} - -func procMetricExpr(expr *metricsql.MetricExpr, leaf Leaf) (Leaf, error) { - if len(expr.LabelFilters) < 1 { - return Leaf{}, errors.New("must have one or more labels") - } - // fmt.Printf("expr=[%#v] LabelFilters=[%#v]\n", expr.LabelFilters, expr) - if expr.LabelFilters[0].Label != "__name__" { - return Leaf{}, errors.New("a log name must be specified") - } - switch expr.LabelFilters[0].Value { - case "audit": - return procAuditExpr(expr, leaf) - case "event": - return procEventExpr(expr, leaf) - case "node": - return procNodeExpr(expr, leaf) - case "pod": - return procPodExpr(expr, leaf) - } - return Leaf{}, errors.New("unknown log name") -} - -func procAuditExpr(expr *metricsql.MetricExpr, leaf Leaf) (Leaf, error) { - leaf.LeafType = LeafTypeAuditLogRequest - leaf.LogRequest = LogRequest{} - return leaf, nil -} - -func procEventExpr(expr *metricsql.MetricExpr, leaf Leaf) (Leaf, error) { - var namespace, typ, reason, object, count string - for _, l := range expr.LabelFilters { - switch l.Label { - case "namespace": - namespace = l.Value - case "type": - typ = l.Value - case "reason": - reason = l.Value - case "object": - object = l.Value - case "count": - count = l.Value - } - } - leaf.LeafType = LeafTypeEventLogRequest - leaf.LogRequest = LogRequest{EventSearchParams: logStore.EventSearchParams{Namespace: namespace, Type: typ, Reason: reason, Object: object, Count: count}, Function: leaf.Function} - return leaf, nil -} - -func procNodeExpr(expr *metricsql.MetricExpr, leaf Leaf) (Leaf, error) { - var node, process string - for _, l := range expr.LabelFilters { - switch l.Label { - case "node": - node = l.Value - case "process": - process = l.Value - } - } - leaf.LeafType = LeafTypeNodeLogRequest - leaf.LogRequest = LogRequest{NodeSearchParams: logStore.NodeSearchParams{Node: logStore.PatternedString(node), Process: logStore.PatternedString(process)}, Function: leaf.Function} - return leaf, nil +func (q *query) Exec(ctx context.Context) *Result { + res, warnings, err := q.ng.exec(ctx, q) + return &Result{Err: err, Value: res, Warnings: warnings} } -func procPodExpr(expr *metricsql.MetricExpr, leaf Leaf) (Leaf, error) { - var namespace, pod, container string - for _, l := range expr.LabelFilters { - switch l.Label { - case "namespace": - // explicitly empty (label exists) => error - if l.Value == "" { - return Leaf{}, errors.New("namespace value cannot be empty") - } - namespace = l.Value - case "pod": - pod = l.Value - case "container": - container = l.Value - case "__name__": - default: - return Leaf{}, errors.New("unknown label " + l.Label) - } - } - // implicit empty (label not exists) => all - if namespace == "" { - namespace = "*" - } - leaf.LeafType = LeafTypePodLogRequest - leaf.LogRequest = LogRequest{PodSearchParams: logStore.PodSearchParams{Namespace: logStore.PatternedString(namespace), Pod: logStore.PatternedString(pod), Container: logStore.PatternedString(container)}, Function: leaf.Function} - return leaf, nil - -} -func procNumberExpr(expr *metricsql.NumberExpr, leaf Leaf) (Leaf, error) { - leaf.LeafType = LeafTypeScalarResult - leaf.ScalarResult = expr.N - return leaf, nil +func (q *query) Statement() parser.Statement { + return q.stmt } -func procRollupExpr(expr *metricsql.RollupExpr, leaf Leaf) (Leaf, error) { - if reflect.ValueOf(expr.Window).Type().String() != "*metricsql.DurationExpr" { - return Leaf{}, errors.New("not duration expr") - } - leaf, err := resolveExpr(expr.Expr, leaf) - if err != nil { - return Leaf{}, err - } - if leaf.LeafType == LeafTypePodLogRequest || leaf.LeafType == LeafTypeNodeLogRequest || leaf.LeafType == LeafTypeEventLogRequest { - leaf.LogRequest.DurationSeconds = int(expr.Window.Duration(0) / 1000) - } - return leaf, nil +func (q *query) String() string { + return q.q } diff --git a/letheql/query_test.go b/letheql/query_test.go deleted file mode 100644 index b970ceb..0000000 --- a/letheql/query_test.go +++ /dev/null @@ -1,188 +0,0 @@ -package letheql - -import ( - "testing" - "time" - - "github.com/kuoss/lethe/logs/logStore" - - "github.com/stretchr/testify/assert" - - "github.com/kuoss/lethe/clock" - _ "github.com/kuoss/lethe/storage/driver/filesystem" - "github.com/kuoss/lethe/testutil" -) - -func init() { - testutil.Init() - testutil.SetTestLogFiles() -} - -func Test_Query_Success(t *testing.T) { - - tests := map[string]struct { - query string - want QueryData - }{ - "01": {query: `pod{namespace="namespace01"}`, - want: QueryData{ResultType: "logs", Logs: []logStore.LogLine{logStore.PodLog{Name: "", Time: "2009-11-10T21:00:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T21:01:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T21:02:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:56:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:56:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:56:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:57:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:57:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:57:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " lerom from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:59:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " lerom ipsum"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:59:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}}}}, - "02": {query: `pod{namespace="not-exists"}`, - want: QueryData{ResultType: ValueTypeLogs, Logs: []logStore.LogLine{}}}, - "03": {query: `pod{namespace="namespace01",pod="nginx"}`, - want: QueryData{ResultType: "logs", Logs: []logStore.LogLine{}, Scalar: 0}}, - "04": {query: `pod{namespace="namespace01",pod="nginx-*"}`, - want: QueryData{ResultType: "logs", Logs: []logStore.LogLine{logStore.PodLog{Name: "", Time: "2009-11-10T21:00:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T21:01:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T21:02:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " lerom from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:59:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " lerom ipsum"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:59:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}}}}, - "05": {query: `pod{namespace="namespace01",container="nginx"}`, - want: QueryData{ResultType: "logs", Logs: []logStore.LogLine{logStore.PodLog{Name: "", Time: "2009-11-10T21:00:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T21:01:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T21:02:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:59:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " lerom ipsum"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:59:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}}}}, - "06": {query: `pod{namespace="namespace*",container="nginx"}`, - want: QueryData{ResultType: "logs", Logs: []logStore.LogLine{logStore.PodLog{Name: "", Time: "2009-11-10T21:00:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T21:01:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T21:02:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " lerom ipsum"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:59:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " lerom ipsum"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:59:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}}}}, - "07": {query: `pod{namespace="namespace01",pod="nginx-*"}[3m]`, - want: QueryData{ResultType: "logs", Logs: []logStore.LogLine{logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " lerom from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:59:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " lerom ipsum"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:59:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}}}}, - "08": {query: `pod`, - want: QueryData{ResultType: "logs", Logs: []logStore.LogLine{logStore.PodLog{Name: "", Time: "2009-11-10T21:00:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T21:01:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T21:02:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:56:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:56:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:56:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:57:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:57:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:57:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " lerom from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " lerom ipsum"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " lerom from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:59:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " lerom ipsum"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:59:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}}}}, - "09": {query: `pod{}`, - want: QueryData{ResultType: "logs", Logs: []logStore.LogLine{logStore.PodLog{Name: "", Time: "2009-11-10T21:00:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T21:01:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T21:02:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:56:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:56:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:56:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:57:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:57:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:57:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " lerom from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " lerom ipsum"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " lerom from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:59:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " lerom ipsum"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:59:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}}}}, - "10": {query: `count_over_time(pod{namespace="namespace01",pod="nginx-*"}[3m])`, - want: QueryData{ResultType: "scalar", Logs: nil, Scalar: 5}}, - "11": {query: `1`, - want: QueryData{ResultType: ValueTypeScalar, Logs: nil, Scalar: 1}}, - "12": {query: `count_over_time(pod{}[3m])`, - want: QueryData{ResultType: "scalar", Logs: nil, Scalar: 20}}, - "13": {query: `count_over_time(pod{}[3m]) > 10`, - want: QueryData{ResultType: ValueTypeScalar, Logs: nil, Scalar: 1}}, - "14": {query: `count_over_time(pod{}[3m]) < 10`, - want: QueryData{ResultType: ValueTypeScalar, Logs: nil, Scalar: 0}}, - "15": {query: `count_over_time(pod{}[3m]) == 21`, - want: QueryData{ResultType: ValueTypeScalar, Logs: nil, Scalar: 0}}, - - // todo - // scalar operator same with filter ( != ) - // "operator != count_over_time_with_duration": {query: `count_over_time(pod{}[3m]) != 21`, want: QueryData{ - // ResultType: ValueTypeScalar, - // Logs: nil, - // Scalar: 0, - // }}, - - "16": {query: `pod{namespace="namespace01"} |= hello`, - want: QueryData{ResultType: "logs", Logs: []logStore.LogLine{logStore.PodLog{Name: "", Time: "2009-11-10T21:00:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T21:01:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T21:02:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:56:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:56:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:56:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:57:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:57:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:57:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:59:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}}}}, - "17": {query: `pod{namespace="namespace01"} != hello`, - want: QueryData{ResultType: "logs", Logs: []logStore.LogLine{logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " lerom from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:59:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " lerom ipsum"}}}}, - "18": {query: `pod{namespace="namespace01"} |~ (.ro.*o)`, - want: QueryData{ResultType: ValueTypeLogs, Logs: []logStore.LogLine{logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " lerom from sidecar"}}}}, - "19": {query: `pod{namespace="namespace01"} !~ (.le)`, - want: QueryData{ResultType: "logs", Logs: []logStore.LogLine{logStore.PodLog{Name: "", Time: "2009-11-10T21:00:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T21:01:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T21:02:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:56:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:56:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:56:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:57:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:57:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:57:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:59:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}}}}, - } - - for name, tt := range tests { - t.Run(name+" "+tt.query, func(subt *testing.T) { - got, err := ProcQuery(tt.query, TimeRange{}) - if err != nil { - subt.Fatalf("query: %s error: %s", name, tt.query) - } - assert.Equal(subt, tt.want, got) - }) - } -} - -func ago(m int) time.Time { - return clock.GetNow().Add(time.Duration(-m) * time.Minute) -} -func Test_QueryWithTimeRange(t *testing.T) { - - now := clock.GetNow() - - tests := map[string]struct { - query string - timeRange TimeRange - want QueryData - }{ - // modify test case.. for supoorting sort by time - "01": { - query: `pod{namespace="namespace01",pod="nginx-*"}[3m]`, timeRange: TimeRange{}, - want: QueryData{ResultType: "logs", Logs: []logStore.LogLine{logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " lerom from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:59:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " lerom ipsum"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:59:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}}}, - }, - "02": { - query: `pod{namespace="namespace01",pod="nginx-*"}`, timeRange: TimeRange{Start: ago(999999), End: now}, - want: QueryData{ResultType: "logs", Logs: []logStore.LogLine{logStore.PodLog{Name: "", Time: "2009-11-10T21:00:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T21:01:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T21:02:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " lerom from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:59:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " lerom ipsum"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:59:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}}}, - }, - "03": { - query: `pod{namespace="namespace01",pod="nginx-*"}`, timeRange: TimeRange{Start: ago(1), End: now}, - want: QueryData{ResultType: "logs", Logs: []logStore.LogLine{logStore.PodLog{Name: "", Time: "2009-11-10T22:59:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " lerom ipsum"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:59:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}}}, - }, - "04": {query: `count_over_time(pod{namespace="*"})`, timeRange: TimeRange{Start: ago(1), End: now}, - want: QueryData{ResultType: "scalar", Logs: nil, Scalar: 2}}, - "05": {query: `count_over_time(pod{namespace="*"})`, timeRange: TimeRange{Start: ago(2), End: now}, - want: QueryData{ResultType: "scalar", Logs: nil, Scalar: 17}}, - "06": {query: `count_over_time(pod{namespace="*"})`, timeRange: TimeRange{Start: ago(3), End: now}, - want: QueryData{ResultType: ValueTypeScalar, Logs: nil, Scalar: 20}}, - } - - for name, tt := range tests { - t.Run(name+"_"+tt.query, func(subt *testing.T) { - got, err := ProcQuery(tt.query, tt.timeRange) - if err != nil { - subt.Fatalf("query: %s error: %s", name, tt.query) - } - assert.Equal(subt, tt.want, got) - }) - } -} - -func Test_QueryFail(t *testing.T) { - - var now = clock.GetNow() - tests := map[string]struct { - query string - timeRange TimeRange - want string - }{ - // modify test case.. for supoorting sort by time - "01": {query: `pod{namespace=""}`, timeRange: TimeRange{}, - want: "namespace value cannot be empty"}, - "02": {query: `pod{foo=""}`, timeRange: TimeRange{}, - want: "unknown label foo"}, - "03": {query: `{namespace="hello"}`, timeRange: TimeRange{}, - want: "a log name must be specified"}, - "04": {query: `count_over_time(pod{namespace="*"})`, timeRange: TimeRange{Start: ago(0), End: now}, - want: "end time and start time are the same"}, - } - - for name, tt := range tests { - t.Run(name+"_"+tt.query, func(subt *testing.T) { - _, err := ProcQuery(tt.query, tt.timeRange) - if assert.Error(subt, err) { - assert.Equal(subt, tt.want, err.Error()) - } - }) - } -} - -func TestNewQuery(t *testing.T) { - - e := &Engine{} - - tests := map[string]struct { - query string - want *query - }{ - `pod metric with namespace label matcher`: { - query: `pod{namespace="namespace01"}`, - want: &query{ - q: `pod{namespace="namespace01"}`, - filter: nil, - keyword: "", - engine: e, - }, - }, - } - - for name, tt := range tests { - t.Run(name+"_"+tt.query, func(subt *testing.T) { - query, err := e.newQuery(tt.query) - if err != nil { - subt.Fatalf("%s test failed. build new Query with err: %v", tt.query, err) - } - assert.Equal(subt, query, tt.want) - }) - } -} diff --git a/letheql/types.go b/letheql/types.go deleted file mode 100644 index a6af04b..0000000 --- a/letheql/types.go +++ /dev/null @@ -1,27 +0,0 @@ -package letheql - -import "github.com/kuoss/lethe/logs/logStore" - -type ParsedQuery struct { - Type string - Labels []Label - Keyword string -} - -type Label struct { - Key string - Value string -} - -type ValueType string - -const ( - ValueTypeScalar ValueType = "scalar" - ValueTypeLogs ValueType = "logs" -) - -type QueryData struct { - ResultType ValueType `json:"resultType"` - Logs []logStore.LogLine `json:"logs,omitempty"` - Scalar float64 `json:"scalar,omitempty"` -} diff --git a/letheql/value.go b/letheql/value.go new file mode 100644 index 0000000..92462c2 --- /dev/null +++ b/letheql/value.go @@ -0,0 +1,356 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// package promql +package letheql + +import ( + "encoding/json" + "errors" + "fmt" + "strconv" + "strings" + + "github.com/kuoss/lethe/letheql/model" + "github.com/kuoss/lethe/letheql/parser" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" +) + +func (Matrix) Type() parser.ValueType { return parser.ValueTypeMatrix } +func (Vector) Type() parser.ValueType { return parser.ValueTypeVector } +func (Scalar) Type() parser.ValueType { return parser.ValueTypeScalar } +func (String) Type() parser.ValueType { return parser.ValueTypeString } + +// String represents a string value. +type String struct { + T int64 + V string +} + +func (s String) String() string { + return s.V +} + +func (s String) MarshalJSON() ([]byte, error) { + return json.Marshal([...]interface{}{float64(s.T) / 1000, s.V}) +} + +// Scalar is a data point that's explicitly not associated with a metric. +type Scalar struct { + T int64 + V float64 +} + +func (s Scalar) String() string { + v := strconv.FormatFloat(s.V, 'f', -1, 64) + return fmt.Sprintf("scalar: %v @[%v]", v, s.T) +} + +func (s Scalar) MarshalJSON() ([]byte, error) { + v := strconv.FormatFloat(s.V, 'f', -1, 64) + return json.Marshal([...]interface{}{float64(s.T) / 1000, v}) +} + +// Series is a stream of data points belonging to a metric. +type Series struct { + Metric labels.Labels `json:"metric"` + Floats []FPoint `json:"values,omitempty"` + Histograms []HPoint `json:"histograms,omitempty"` +} + +func (s Series) String() string { + // TODO(beorn7): This currently renders floats first and then + // histograms, each sorted by timestamp. Maybe, in mixed series, that's + // fine. Maybe, however, primary sorting by timestamp is preferred, in + // which case this has to be changed. + vals := make([]string, 0, len(s.Floats)+len(s.Histograms)) + for _, f := range s.Floats { + vals = append(vals, f.String()) + } + for _, h := range s.Histograms { + vals = append(vals, h.String()) + } + return fmt.Sprintf("%s =>\n%s", s.Metric, strings.Join(vals, "\n")) +} + +// FPoint represents a single float data point for a given timestamp. +type FPoint struct { + T int64 + F float64 +} + +func (p FPoint) String() string { + s := strconv.FormatFloat(p.F, 'f', -1, 64) + return fmt.Sprintf("%s @[%v]", s, p.T) +} + +// MarshalJSON implements json.Marshaler. +// +// JSON marshaling is only needed for the HTTP API. Since FPoint is such a +// frequently marshaled type, it gets an optimized treatment directly in +// web/api/v1/api.go. Therefore, this method is unused within Prometheus. It is +// still provided here as convenience for debugging and for other users of this +// code. Also note that the different marshaling implementations might lead to +// slightly different results in terms of formatting and rounding of the +// timestamp. +func (p FPoint) MarshalJSON() ([]byte, error) { + v := strconv.FormatFloat(p.F, 'f', -1, 64) + return json.Marshal([...]interface{}{float64(p.T) / 1000, v}) +} + +// HPoint represents a single histogram data point for a given timestamp. +// H must never be nil. +type HPoint struct { + T int64 + H *histogram.FloatHistogram +} + +func (p HPoint) String() string { + return fmt.Sprintf("%s @[%v]", p.H.String(), p.T) +} + +// MarshalJSON implements json.Marshaler. +// +// JSON marshaling is only needed for the HTTP API. Since HPoint is such a +// frequently marshaled type, it gets an optimized treatment directly in +// web/api/v1/api.go. Therefore, this method is unused within Prometheus. It is +// still provided here as convenience for debugging and for other users of this +// code. Also note that the different marshaling implementations might lead to +// slightly different results in terms of formatting and rounding of the +// timestamp. +func (p HPoint) MarshalJSON() ([]byte, error) { + h := struct { + Count string `json:"count"` + Sum string `json:"sum"` + Buckets [][]interface{} `json:"buckets,omitempty"` + }{ + Count: strconv.FormatFloat(p.H.Count, 'f', -1, 64), + Sum: strconv.FormatFloat(p.H.Sum, 'f', -1, 64), + } + it := p.H.AllBucketIterator() + for it.Next() { + bucket := it.At() + if bucket.Count == 0 { + continue // No need to expose empty buckets in JSON. + } + boundaries := 2 // Exclusive on both sides AKA open interval. + if bucket.LowerInclusive { + if bucket.UpperInclusive { + boundaries = 3 // Inclusive on both sides AKA closed interval. + } else { + boundaries = 1 // Inclusive only on lower end AKA right open. + } + } else { + if bucket.UpperInclusive { + boundaries = 0 // Inclusive only on upper end AKA left open. + } + } + bucketToMarshal := []interface{}{ + boundaries, + strconv.FormatFloat(bucket.Lower, 'f', -1, 64), + strconv.FormatFloat(bucket.Upper, 'f', -1, 64), + strconv.FormatFloat(bucket.Count, 'f', -1, 64), + } + h.Buckets = append(h.Buckets, bucketToMarshal) + } + return json.Marshal([...]interface{}{float64(p.T) / 1000, h}) +} + +// Sample is a single sample belonging to a metric. It represents either a float +// sample or a histogram sample. If H is nil, it is a float sample. Otherwise, +// it is a histogram sample. +type Sample struct { + T int64 + F float64 + H *histogram.FloatHistogram + + Metric labels.Labels +} + +func (s Sample) String() string { + var str string + if s.H == nil { + p := FPoint{T: s.T, F: s.F} + str = p.String() + } else { + p := HPoint{T: s.T, H: s.H} + str = p.String() + } + return fmt.Sprintf("%s => %s", s.Metric, str) +} + +// MarshalJSON is mirrored in web/api/v1/api.go with jsoniter because FPoint and +// HPoint wouldn't be marshaled with jsoniter otherwise. +func (s Sample) MarshalJSON() ([]byte, error) { + if s.H == nil { + f := struct { + M labels.Labels `json:"metric"` + F FPoint `json:"value"` + }{ + M: s.Metric, + F: FPoint{T: s.T, F: s.F}, + } + return json.Marshal(f) + } + h := struct { + M labels.Labels `json:"metric"` + H HPoint `json:"histogram"` + }{ + M: s.Metric, + H: HPoint{T: s.T, H: s.H}, + } + return json.Marshal(h) +} + +// Vector is basically only an an alias for []Sample, but the contract is that +// in a Vector, all Samples have the same timestamp. +type Vector []Sample + +func (vec Vector) String() string { + entries := make([]string, len(vec)) + for i, s := range vec { + entries[i] = s.String() + } + return strings.Join(entries, "\n") +} + +// ContainsSameLabelset checks if a vector has samples with the same labelset +// Such a behavior is semantically undefined +// https://github.com/prometheus/prometheus/issues/4562 +func (vec Vector) ContainsSameLabelset() bool { + switch len(vec) { + case 0, 1: + return false + case 2: + return vec[0].Metric.Hash() == vec[1].Metric.Hash() + default: + l := make(map[uint64]struct{}, len(vec)) + for _, ss := range vec { + hash := ss.Metric.Hash() + if _, ok := l[hash]; ok { + return true + } + l[hash] = struct{}{} + } + return false + } +} + +// Matrix is a slice of Series that implements sort.Interface and +// has a String method. +type Matrix []Series + +func (m Matrix) String() string { + // TODO(fabxc): sort, or can we rely on order from the querier? + strs := make([]string, len(m)) + + for i, ss := range m { + strs[i] = ss.String() + } + + return strings.Join(strs, "\n") +} + +// TotalSamples returns the total number of samples in the series within a matrix. +func (m Matrix) TotalSamples() int { + numSamples := 0 + for _, series := range m { + numSamples += len(series.Floats) + len(series.Histograms) + } + return numSamples +} + +func (m Matrix) Len() int { return len(m) } +func (m Matrix) Less(i, j int) bool { return labels.Compare(m[i].Metric, m[j].Metric) < 0 } +func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } + +// ContainsSameLabelset checks if a matrix has samples with the same labelset. +// Such a behavior is semantically undefined. +// https://github.com/prometheus/prometheus/issues/4562 +func (m Matrix) ContainsSameLabelset() bool { + switch len(m) { + case 0, 1: + return false + case 2: + return m[0].Metric.Hash() == m[1].Metric.Hash() + default: + l := make(map[uint64]struct{}, len(m)) + for _, ss := range m { + hash := ss.Metric.Hash() + if _, ok := l[hash]; ok { + return true + } + l[hash] = struct{}{} + } + return false + } +} + +// Result holds the resulting value of an execution or an error +// if any occurred. +type Result struct { + Err error + Value parser.Value + // Warnings storage.Warnings + Warnings model.Warnings +} + +// Vector returns a Vector if the result value is one. An error is returned if +// the result was an error or the result value is not a Vector. +func (r *Result) Vector() (Vector, error) { + if r.Err != nil { + return nil, r.Err + } + v, ok := r.Value.(Vector) + if !ok { + return nil, errors.New("query result is not a Vector") + } + return v, nil +} + +// Matrix returns a Matrix. An error is returned if +// the result was an error or the result value is not a Matrix. +func (r *Result) Matrix() (Matrix, error) { + if r.Err != nil { + return nil, r.Err + } + v, ok := r.Value.(Matrix) + if !ok { + return nil, errors.New("query result is not a range Vector") + } + return v, nil +} + +// Scalar returns a Scalar value. An error is returned if +// the result was an error or the result value is not a Scalar. +func (r *Result) Scalar() (Scalar, error) { + if r.Err != nil { + return Scalar{}, r.Err + } + v, ok := r.Value.(Scalar) + if !ok { + return Scalar{}, errors.New("query result is not a Scalar") + } + return v, nil +} + +func (r *Result) String() string { + if r.Err != nil { + return r.Err.Error() + } + if r.Value == nil { + return "" + } + return r.Value.String() +} diff --git a/logs/filter/filter.go b/logs/filter/filter.go deleted file mode 100644 index 57539a1..0000000 --- a/logs/filter/filter.go +++ /dev/null @@ -1,129 +0,0 @@ -package filter - -import ( - "errors" - "regexp" - "strings" -) - -const ( - include = "|=" - exclude = "!=" - includeRegex = "|~" - excludeRegex = "!~" -) - -func IsFilterExist(query string) (ok bool, filter string, err error) { - // move to heap Filterlist - filterList := []string{include, exclude, includeRegex, excludeRegex} - var filtersInQuery []string - for _, v := range filterList { - if strings.Contains(query, v) { - filtersInQuery = append(filtersInQuery, v) - } - } - switch len(filtersInQuery) { - case 0: - return false, "", nil - case 1: - return true, filtersInQuery[0], nil - } - return false, "", errors.New("filter must be only one or no filters") -} - -func FromQuery(query string) (Filter, error) { - _, filterType, _ := IsFilterExist(query) - parts := strings.Split(query, filterType) - switch filterType { - case include: - return &includeFilter{keyword: strings.TrimSpace(parts[1])}, nil - case exclude: - return &excludeFilter{keyword: strings.TrimSpace(parts[1])}, nil - case includeRegex: - f := &includeRegexFilter{keyword: strings.TrimSpace(parts[1])} - if f.isRegexFilter() { - return f, nil - } - return &includeRegexFilter{}, errors.New("wrong regex expression") - case excludeRegex: - f := &excludeRegexFilter{keyword: strings.TrimSpace(parts[1])} - if f.isRegexFilter() { - return f, nil - } - return &excludeRegexFilter{}, errors.New("wrong regex expression") - } - return nil, errors.New("there is wrong filter in query") -} - -type Filter interface { - Match(string) bool -} - -type RegexFilter interface { - isRegex() bool -} - -// for just test build -type TempExportFilter struct{} - -func (f TempExportFilter) Match(line string) bool { - return true -} - -type includeFilter struct { - keyword string -} - -func (f *includeFilter) Match(line string) bool { - return strings.Contains(line, f.keyword) -} - -type excludeFilter struct { - keyword string -} - -func (f *excludeFilter) Match(line string) bool { - return !strings.Contains(line, f.keyword) -} - -type includeRegexFilter struct { - regex *regexp.Regexp - keyword string -} - -func (f *includeRegexFilter) Match(line string) bool { - return f.regex.MatchString(line) -} - -func (f *includeRegexFilter) isRegexFilter() bool { - compile, err := regexp.Compile(f.keyword) - f.regex = compile - if err != nil { - return false - } - if err != nil { - return false - } - return true -} - -type excludeRegexFilter struct { - regex *regexp.Regexp - keyword string -} - -func (f *excludeRegexFilter) Match(line string) bool { - return !f.regex.MatchString(line) -} - -func (f *excludeRegexFilter) isRegexFilter() bool { - compile, err := regexp.Compile(f.keyword) - f.regex = compile - if err != nil { - return false - } - if err != nil { - return false - } - return true -} diff --git a/logs/filter/filter_test.go b/logs/filter/filter_test.go deleted file mode 100644 index e698db0..0000000 --- a/logs/filter/filter_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package filter - -import ( - "regexp" - "testing" - - "github.com/stretchr/testify/assert" -) - -/* -"with namespace01 and include hello keyword": {query: `pod{namespace="namespace01"} |= hello`, want: `{"resultType":"logs","logs":["2009-11-10T22:56:00.000000Z[namespace01|apache-75675f5897-7ci7o|httpd] hello from sidecar","2009-11-10T22:56:00.000000Z[namespace01|apache-75675f5897-7ci7o|httpd] hello from sidecar","2009-11-10T22:56:00.000000Z[namespace01|apache-75675f5897-7ci7o|httpd] hello from sidecar","2009-11-10T22:57:00.000000Z[namespace01|apache-75675f5897-7ci7o|httpd] hello from sidecar","2009-11-10T22:57:00.000000Z[namespace01|apache-75675f5897-7ci7o|httpd] hello from sidecar","2009-11-10T22:57:00.000000Z[namespace01|apache-75675f5897-7ci7o|httpd] hello from sidecar","2009-11-10T22:58:00.000000Z[namespace01|nginx-deployment-75675f5897-7ci7o|sidecar] hello from sidecar","2009-11-10T22:58:00.000000Z[namespace01|nginx-deployment-75675f5897-7ci7o|sidecar] hello from sidecar","2009-11-10T22:59:00.000000Z[namespace01|nginx-deployment-75675f5897-7ci7o|nginx] hello world","2009-11-10T23:00:00.000000Z[namespace01|nginx-deployment-75675f5897-7ci7o|nginx] hello world"]}`}, - "with namespace01 and exclude hello keyword": {query: `pod{namespace="namespace01"} != hello`, want: `{"resultType":"logs","logs":["2009-11-10T22:58:00.000000Z[namespace01|nginx-deployment-75675f5897-7ci7o|sidecar] lerom from sidecar","2009-11-10T22:59:00.000000Z[namespace01|nginx-deployment-75675f5897-7ci7o|nginx] lerom ipsum"]}`}, - "with namespace01 and includeRegex keyword": {query: `pod{namespace="namespace01"} |~ (.ro.*o)`, want: `{"resultType":"logs","logs":["2009-11-10T22:58:00.000000Z[namespace01|nginx-deployment-75675f5897-7ci7o|sidecar] lerom from sidecar"]}`}, - "with namespace01 and excludeRegex keyword": {query: `pod{namespace="namespace01"} !~ (.le)`, want: `{"resultType":"logs","logs":["2009-11-10T22:56:00.000000Z[namespace01|apache-75675f5897-7ci7o|httpd] hello from sidecar","2009-11-10T22:56:00.000000Z[namespace01|apache-75675f5897-7ci7o|httpd] hello from sidecar","2009-11-10T22:56:00.000000Z[namespace01|apache-75675f5897-7ci7o|httpd] hello from sidecar","2009-11-10T22:57:00.000000Z[namespace01|apache-75675f5897-7ci7o|httpd] hello from sidecar","2009-11-10T22:57:00.000000Z[namespace01|apache-75675f5897-7ci7o|httpd] hello from sidecar","2009-11-10T22:57:00.000000Z[namespace01|apache-75675f5897-7ci7o|httpd] hello from sidecar","2009-11-10T22:58:00.000000Z[namespace01|nginx-deployment-75675f5897-7ci7o|sidecar] hello from sidecar","2009-11-10T22:58:00.000000Z[namespace01|nginx-deployment-75675f5897-7ci7o|sidecar] hello from sidecar","2009-11-10T22:59:00.000000Z[namespace01|nginx-deployment-75675f5897-7ci7o|nginx] hello world","2009-11-10T23:00:00.000000Z[namespace01|nginx-deployment-75675f5897-7ci7o|nginx] hello world"]}`}, - -*/ - -func TestGetFilterFromQuery(t *testing.T) { - tests := map[string]struct { - query string - filter Filter - }{ - "include filter": {query: `pod{namespace="namespace01"} |= hello`, filter: &includeFilter{keyword: "hello"}}, - "exclude filter": {query: `pod{namespace="namespace01"} != hello`, filter: &excludeFilter{keyword: "hello"}}, - - "include regex filter": {query: `pod{namespace="namespace01"} |~ (.ro.*o)`, filter: &includeRegexFilter{ - regex: regexp.MustCompile(`(.ro.*o)`), - keyword: `(.ro.*o)`}, - }, - "exclude regex filter": {query: `pod{namespace="namespace01"} !~ (.ro.*o)`, filter: &excludeRegexFilter{ - regex: regexp.MustCompile(`(.ro.*o)`), - keyword: `(.ro.*o)`}, - }, - } - - for name, tt := range tests { - t.Run(name, func(subt *testing.T) { - got, err := FromQuery(tt.query) - if err != nil { - subt.Fatalf("query: %s err: %s", name, err.Error()) - } - - assert.Equal(subt, tt.filter, got) - }) - } -} diff --git a/logs/logStore/logStore.go b/logs/logStore/logStore.go deleted file mode 100644 index cc945a9..0000000 --- a/logs/logStore/logStore.go +++ /dev/null @@ -1,347 +0,0 @@ -package logStore - -import ( - "bufio" - "fmt" - "path/filepath" - "sort" - "strings" - "time" - - "github.com/kuoss/lethe/logs/filter" - - "github.com/kuoss/lethe/clock" - "github.com/kuoss/lethe/storage/driver" - "github.com/kuoss/lethe/storage/driver/factory" - - "github.com/kuoss/lethe/config" -) - -type EventSearchParams struct { - Namespace string - Type string - Reason string - Object string - Count string -} - -type NodeSearchParams struct { - Nodes []string - Node PatternedString - Process PatternedString -} - -type PodSearchParams struct { - Namespaces []string - Namespace PatternedString - Pod PatternedString - Container PatternedString -} - -// now Patternable can handle only one '*' and starts with substring -// exampel) nignx-*, namespace*, -type Patternable interface { - Patterned() bool -} -type PatternedString string - -func (ps PatternedString) Patterned() bool { - return strings.Contains(string(ps), "*") -} - -func (ps PatternedString) PatternMatch(s string) bool { - if string(ps) == "" { - return true - } - if ps.Patterned() { - return strings.Contains(s, ps.withoutPattern()) - } - return ps.withoutPattern() == s -} - -// withoutPattern return pattern removed string, -// if it has not pattern return its own string -func (ps PatternedString) withoutPattern() string { - psString := string(ps) - if !ps.Patterned() { - return psString - } - //todo - //nginx-* vs nginx-.* (regex) - pos := strings.IndexRune(psString, '*') - if pos > 0 && psString[pos-1] == '.' { - return psString[0 : pos-1] - } - return psString[0:pos] -} - -type LogSearch struct { - LogType LogLine // audit | event | pod | node - TargetPattern PatternedString // audit | event | | - Targets []string - // AuditSearchParams AuditSearchParams - EventSearchParams EventSearchParams - PodSearchParams PodSearchParams - NodeSearchParams NodeSearchParams - Keyword string - DurationSeconds int - EndTime time.Time - StartTime time.Time - IsCounting bool - Filter filter.Filter -} - -type Result struct { - IsCounting bool - Logs []LogLine - Count int -} - -type LogStore struct { - driver driver.StorageDriver -} - -func New() *LogStore { - d, _ := factory.Get("filesystem", map[string]interface{}{"RootDirectory": config.GetLogDataPath()}) - return &LogStore{driver: d} -} - -func (ls *LogStore) GetLogs(logSearch LogSearch) (Result, error) { - // fmt.Printf("logSearch= %+v", logSearch) - //fmt.Printf("root directory from driver: %s\n", ls.driver.RootDirectory()) - - logTypePath := filepath.Join(ls.driver.RootDirectory(), logSearch.LogType.GetName()) - targets, err := ls.driver.List(logTypePath) - //fmt.Printf("log Type: %s, targets: %v", logTypePath, targets) - - if err != nil { - return Result{IsCounting: false, Logs: nil}, nil - } - - var matchedTarget []string - - if logSearch.TargetPattern.Patterned() { - var patternMatched []string - for _, t := range targets { - //todo - _, candidates := filepath.Split(t) - if strings.Contains(candidates, logSearch.TargetPattern.withoutPattern()) { - patternMatched = append(patternMatched, candidates) - } - } - matchedTarget = patternMatched - } else { - matchedTarget = append(matchedTarget, string(logSearch.TargetPattern)) - } - - rangeParamInit(&logSearch) - - // from here only check matchedTarget - var timeFilteredFiles []string - for _, dir := range matchedTarget { - timeFilteredFiles = append(timeFilteredFiles, timeFilter(filepath.Join(logTypePath, dir), &logSearch, ls.driver)...) - } - - logs := logFromTarget(timeFilteredFiles, logSearch, config.GetLimit(), ls.driver) - - sort.SliceStable(*logs, func(i, j int) bool { - l := *logs - x := l[i].getTime() - y := l[j].getTime() - return x < y - }) - - return Result{Logs: *logs, Count: len(*logs)}, nil -} - -func rangeParamInit(search *LogSearch) { - if search.EndTime.IsZero() { - now := clock.GetNow() - search.EndTime = now - } - - if search.DurationSeconds == 0 { - search.StartTime = search.EndTime.Add(time.Duration(-100*24) * time.Hour) // 10 days ago - } else { - search.StartTime = search.EndTime.Add(time.Duration(-search.DurationSeconds) * time.Second) - } -} - -// todo -// limit check here? -func logFromTarget(files []string, search LogSearch, limit int, driver driver.StorageDriver) (logs *[]LogLine) { - - logs = &[]LogLine{} - - sort.Strings(files) - for _, file := range files { - limit -= checkTarget(file, search, logs, driver) - if limit < 0 { - return logs - } - } - return logs -} - -// This function process line by line -// very Dependent on performance -func checkTarget(file string, search LogSearch, logs *[]LogLine, driver driver.StorageDriver) (logSize int) { - switch search.LogType.GetName() { - case "audit": - case "event": - case NODE_TYPE: - - //todo process() - rc, _ := driver.Reader(file) - sc := bufio.NewScanner(rc) - for sc.Scan() { - // 2009-11-10T23:00:00.000000Z[namespace01|nginx-deployment-75675f5897-7ci7o|nginx] hello world [ddd12wewe] - // 2009-11-10T22:58:00.000000Z[node01|dockerd] hello from sidecar - line := sc.Text() - //todo error handling - timeFromLog, withoutTime, _ := timeParse(line) - if search.StartTime.After(timeFromLog) || search.EndTime.Before(timeFromLog) { - continue - } - - node, process, bulk, err := parseHierarchyNode(line) - if err != nil { - return 0 - } - - if !search.NodeSearchParams.Node.PatternMatch(node) || !search.NodeSearchParams.Process.PatternMatch(process) { - continue - } - - withoutMeta := strings.TrimPrefix(withoutTime, bulk) - - nodeLog := NodeLog{ - Time: timeFromLog.Format(time.RFC3339Nano), - Node: node, - Process: process, - Log: withoutMeta, - } - - //todo filtering here? - if search.Filter != nil { - if search.Filter.Match(line) { - *logs = append(*logs, nodeLog) - } - } else { - *logs = append(*logs, nodeLog) - } - } - return len(*logs) - - case POD_TYPE: - //todo process() - rc, _ := driver.Reader(file) - sc := bufio.NewScanner(rc) - for sc.Scan() { - // 2009-11-10T23:00:00.000000Z[namespace01|nginx-deployment-75675f5897-7ci7o|nginx] hello world [ddd12wewe] - line := sc.Text() - //todo error handling - timeFromLog, withoutTime, _ := timeParse(line) - if search.StartTime.After(timeFromLog) || search.EndTime.Before(timeFromLog) { - continue - } - - ns, pod, container, bulk, err := parseHierarchyPod(line) - if err != nil { - return 0 - } - - if !search.PodSearchParams.Namespace.PatternMatch(ns) || !search.PodSearchParams.Pod.PatternMatch(pod) || !search.PodSearchParams.Container.PatternMatch(container) { - continue - } - //todo filtering here? - - withoutMeta := strings.TrimPrefix(withoutTime, bulk) - - podLog := PodLog{ - Time: timeFromLog.Format(time.RFC3339Nano), - Namespace: ns, - Pod: pod, - Container: container, - Log: withoutMeta, - } - - if search.Filter != nil { - if search.Filter.Match(line) { - *logs = append(*logs, podLog) - } - } else { - *logs = append(*logs, podLog) - } - } - return len(*logs) - } - return 0 -} - -func timeParse(line string) (time.Time, string, error) { - s := line[0:strings.IndexRune(line, '[')] - parsed, err := time.Parse(time.RFC3339Nano, s) - if err != nil { - return time.Time{}, "", err - } - //parsed, withoutTime,error - return parsed, strings.TrimPrefix(line, s), nil -} - -func timeFilter(directory string, search *LogSearch, driver driver.StorageDriver) (filteredFiles []string) { - - files, err := driver.List(directory) - if err != nil { - return - } - - for _, file := range files { - filename := filepath.Base(file) - if rangeCheckFromFilename(filename, search.StartTime, search.EndTime) { - filteredFiles = append(filteredFiles, file) - } - } - return filteredFiles -} - -func rangeCheckFromFilename(name string, start time.Time, end time.Time) bool { - fileStart, err := time.Parse(time.RFC3339, strings.Replace(name[0:13], "_", "T", 1)+":00:00Z") - if err != nil { - //to do - return false - } - - fileEnd := fileStart.Add(time.Duration(3599) * time.Second) // per hour for one logs - - if start.After(fileEnd) || end.Before(fileStart) { - // out of range - return false - } - // except the whole range return true - return true -} - -// todo -// go to PodLog type private fucntion -func parseHierarchyPod(line string) (namespace, pod, container, bulk string, err error) { - bulk = line[strings.IndexRune(line, '[')+1 : strings.IndexRune(line, ']')] - parts := strings.Split(bulk, "|") - if len(parts) != 3 { - return namespace, pod, container, "[" + bulk + "]", fmt.Errorf("log line not follow [{ns}|{pod}|{container}]. : %s", line) - } - namespace, pod, container = parts[0], parts[1], parts[2] - return namespace, pod, container, "[" + bulk + "]", nil -} - -// todo -// go to NodeLog type private fucntion -func parseHierarchyNode(line string) (node, process, bulk string, err error) { - bulk = line[strings.IndexRune(line, '[')+1 : strings.IndexRune(line, ']')] - parts := strings.Split(bulk, "|") - if len(parts) != 2 { - return node, process, "[" + bulk + "]", fmt.Errorf("log line not follow [{node}|{process}]. : %s", line) - } - node, process = parts[0], parts[1] - return node, process, "[" + bulk + "]", nil -} diff --git a/logs/logStore/logStore_test.go b/logs/logStore/logStore_test.go deleted file mode 100644 index 14f91c5..0000000 --- a/logs/logStore/logStore_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package logStore - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -var hello1 PatternedString = "hello" -var hello2 PatternedString = "hello*" -var hello3 PatternedString = "hello.*" - -func Test_Patterned(t *testing.T) { - assert.Equal(t, hello1.Patterned(), false) - assert.Equal(t, hello2.Patterned(), true) - assert.Equal(t, hello3.Patterned(), true) -} - -func Test_PatternMatch(t *testing.T) { - assert.Equal(t, hello1.PatternMatch("hello world"), false) - assert.Equal(t, hello2.PatternMatch("hello world"), true) - assert.Equal(t, hello3.PatternMatch("hello world"), true) -} - -func Test_withoutPattern(t *testing.T) { - assert.Equal(t, hello1.withoutPattern(), "hello") - assert.Equal(t, hello2.withoutPattern(), "hello") - assert.Equal(t, hello3.withoutPattern(), "hello") -} - -func Test_New(t *testing.T) { - var logStore *LogStore = New() - assert.Equal(t, logStore, New()) -} diff --git a/logs/logStore/types.go b/logs/logStore/types.go deleted file mode 100644 index 053a71f..0000000 --- a/logs/logStore/types.go +++ /dev/null @@ -1,67 +0,0 @@ -package logStore - -import "fmt" - -const ( - AUDIT_TYPE = "audit" - EVENT_TYPE = "event" - POD_TYPE = "pod" - NODE_TYPE = "node" -) - -type LogLine interface { - GetName() string - process() int - getTime() string - CompactRaw() string -} - -type NodeLog struct { - Name string `json:"-"` - Time string `json:"time,omitempty"` - Node string `json:"node,omitempty"` - Process string `json:"process,omitempty"` - Log string `json:"log,omitempty"` -} - -func (log NodeLog) CompactRaw() string { - return fmt.Sprintf("%s[%s|%s] %s", log.Time, log.Node, log.Process, log.Log) -} - -func (log NodeLog) process() int { - return 0 -} - -func (log NodeLog) GetName() string { - return log.Name -} - -func (log NodeLog) getTime() string { - return log.Time -} - -type PodLog struct { - Name string `json:"-"` - Time string `json:"time"` - Namespace string `json:"namespace"` - Pod string `json:"pod"` - Container string `json:"container"` - Log string `json:"log"` -} - -func (log PodLog) CompactRaw() string { - return fmt.Sprintf("%s[%s|%s|%s] %s", log.Time, log.Namespace, log.Pod, log.Container, log.Log) -} - -func (log PodLog) process() int { - - return 0 -} - -func (log PodLog) GetName() string { - return log.Name -} - -func (log PodLog) getTime() string { - return log.Time -} diff --git a/logs/rotator/cleansing.go b/logs/rotator/cleansing.go deleted file mode 100644 index 636099b..0000000 --- a/logs/rotator/cleansing.go +++ /dev/null @@ -1,34 +0,0 @@ -package rotator - -import ( - "fmt" - "log" - "os" - "path/filepath" - - "github.com/kuoss/lethe/config" -) - -func (rotator *Rotator) Cleansing() { - rotator.cleansingLogFiles("host") - rotator.cleansingLogFiles("kube") -} - -func (rotator *Rotator) cleansingLogFiles(prefix string) { - files, err := filepath.Glob(fmt.Sprintf("%s/%s.*", config.GetLogDataPath(), prefix)) - if err != nil { - fmt.Printf("error on cleansingLogFiles(%s): %s", prefix, err) - return - } - if len(files) < 1 { - return - } - log.Printf("Warning: need cleansing log files(%s).\n", prefix) - for _, file := range files { - log.Printf("deleting logs... %s", file) - e := os.Remove(file) - if e != nil { - log.Printf("error on deleting logs... %s", file) - } - } -} diff --git a/logs/rotator/delete.go b/logs/rotator/delete.go deleted file mode 100644 index 36d3bbe..0000000 --- a/logs/rotator/delete.go +++ /dev/null @@ -1,77 +0,0 @@ -package rotator - -import ( - "fmt" - "sort" - "strings" - - "github.com/kuoss/common/logger" - "github.com/kuoss/lethe/clock" - "github.com/kuoss/lethe/config" - "github.com/kuoss/lethe/util" -) - -// DELETE -func (rotator *Rotator) DeleteByAge() error { - retentionTime := config.Viper().GetString("retention.time") - duration, err := util.GetDurationFromAge(retentionTime) - if err != nil { - return fmt.Errorf("error on GetDurationFromAge: %w", err) - } - point := strings.Replace(clock.GetNow().Add(-duration).UTC().String()[0:13], " ", "_", 1) - files, err := rotator.ListFiles() - if err != nil { - return fmt.Errorf("error on ListFiles: %w", err) - } - if len(files) < 1 { - logger.Infof("DeleteByAge( < %s): no files. done.", point) - return nil - } - sort.Slice(files, func(i, j int) bool { - return files[i].Name < files[j].Name - }) - - for _, file := range files { - if file.Name < point { - logger.Infof("DeleteByAge(%s < %s): %s", file.Name, point, file.FullPath) - err := rotator.driver.Delete(file.FullPath) - if err != nil { - logger.Errorf("error on Delete: %s", err) - continue - } - } - } - logger.Infof("DeleteByAge(%s): Done\n", point) - return nil -} - -func (rotator *Rotator) DeleteBySize() error { - retentionSizeBytes, err := util.StringToBytes(config.Viper().GetString("retention.size")) - if err != nil { - return fmt.Errorf("error on StringToBytes: %w", err) - } - files, err := rotator.ListFiles() - if err != nil { - return fmt.Errorf("error on ListFiles: %w", err) - } - sort.Slice(files, func(i, j int) bool { - return files[i].Name < files[j].Name - }) - - for _, file := range files { - usedBytes, err := rotator.GetUsedBytes(config.GetLogDataPath()) - if err != nil { - return fmt.Errorf("error on GetUsedBytes: %w", err) - } - if usedBytes < retentionSizeBytes { - logger.Infof("DeleteBySize(%d < %d): DONE", usedBytes, retentionSizeBytes) - return nil - } - logger.Infof("DeleteBySize(%d > %d): %s", usedBytes, retentionSizeBytes, file.FullPath) - err = rotator.driver.Delete(file.FullPath) - if err != nil { - logger.Errorf("error on Delte: %s", err) - } - } - return nil -} diff --git a/logs/rotator/delete_test.go b/logs/rotator/delete_test.go deleted file mode 100644 index d7f6a09..0000000 --- a/logs/rotator/delete_test.go +++ /dev/null @@ -1,141 +0,0 @@ -package rotator - -import ( - "testing" - - "github.com/kuoss/lethe/config" - "github.com/kuoss/lethe/testutil" - "github.com/stretchr/testify/assert" -) - -func Test_DeleteByAge_10d(t *testing.T) { - testutil.Init() - testutil.SetTestLogFiles() - - config.Viper().Set("retention.time", "20d") - err := NewRotator().DeleteByAge() - assert.NoError(t, err) - - assert.FileExists(t, "./tmp/log/node/node01/2009-11-10_21.log") - assert.FileExists(t, "./tmp/log/node/node01/2009-11-10_22.log") - - assert.FileExists(t, "./tmp/log/node/node02/2009-11-01_00.log") - assert.FileExists(t, "./tmp/log/node/node02/2009-11-10_21.log") - - assert.NoFileExists(t, "./tmp/log/pod/namespace01/2000-01-01_00.log") - assert.FileExists(t, "./tmp/log/pod/namespace01/2009-11-10_21.log") - assert.FileExists(t, "./tmp/log/pod/namespace01/2009-11-10_22.log") - - assert.NoFileExists(t, "./tmp/log/pod/namespace02/0000-00-00_00.log") - assert.NoFileExists(t, "./tmp/log/pod/namespace02/2009-11-10_21.log") -} - -func Test_DeleteByAge_1d(t *testing.T) { - testutil.Init() - testutil.SetTestLogFiles() - - config.Viper().Set("retention.time", "2d") - config.Viper().Set("retention.size", "100m") - err := NewRotator().DeleteByAge() - assert.NoError(t, err) - - assert.FileExists(t, "./tmp/log/node/node01/2009-11-10_21.log") - assert.FileExists(t, "./tmp/log/node/node01/2009-11-10_22.log") - - assert.NoFileExists(t, "./tmp/log/node/node02/2009-11-01_00.log") - assert.FileExists(t, "./tmp/log/node/node02/2009-11-10_21.log") - - assert.NoFileExists(t, "./tmp/log/pod/namespace01/2000-01-01_00.log") - assert.FileExists(t, "./tmp/log/pod/namespace01/2009-11-10_21.log") - assert.FileExists(t, "./tmp/log/pod/namespace01/2009-11-10_22.log") - - assert.NoFileExists(t, "./tmp/log/pod/namespace02/0000-00-00_00.log") - assert.NoFileExists(t, "./tmp/log/pod/namespace02/2009-11-10_21.log") -} - -func Test_DeleteByAge_1h(t *testing.T) { - testutil.SetTestLogFiles() - - config.Viper().Set("retention.time", "1h") - config.Viper().Set("retention.size", "100m") - err := NewRotator().DeleteByAge() - assert.NoError(t, err) - - assert.NoFileExists(t, "./tmp/log/node/node01/2009-11-10_21.log") - assert.FileExists(t, "./tmp/log/node/node01/2009-11-10_22.log") - - assert.NoFileExists(t, "./tmp/log/node/node02/2009-11-01_00.log") - assert.NoFileExists(t, "./tmp/log/node/node02/2009-11-10_21.log") - - assert.NoFileExists(t, "./tmp/log/pod/namespace01/2000-01-01_00.log") - // assert.NoFileExists(t, "./tmp/log/pod/namespace01/2009-11-10_21.log") - assert.FileExists(t, "./tmp/log/pod/namespace01/2009-11-10_22.log") - - assert.NoFileExists(t, "./tmp/log/pod/namespace02/0000-00-00_00.log") - assert.NoFileExists(t, "./tmp/log/pod/namespace02/2009-11-10_21.log") -} - -func Test_DeleteBySize_1m(t *testing.T) { - testutil.SetTestLogFiles() - - config.Viper().Set("retention.size", "1m") - err := NewRotator().DeleteBySize() - assert.NoError(t, err) - - assert.FileExists(t, "./tmp/log/node/node01/2009-11-10_21.log") - assert.FileExists(t, "./tmp/log/node/node01/2009-11-10_22.log") - - assert.FileExists(t, "./tmp/log/node/node02/2009-11-01_00.log") - assert.FileExists(t, "./tmp/log/node/node02/2009-11-10_21.log") - - assert.FileExists(t, "./tmp/log/pod/namespace01/2000-01-01_00.log") - assert.FileExists(t, "./tmp/log/pod/namespace01/2009-11-10_21.log") - assert.FileExists(t, "./tmp/log/pod/namespace01/2009-11-10_22.log") - - assert.FileExists(t, "./tmp/log/pod/namespace02/0000-00-00_00.log") - assert.NoFileExists(t, "./tmp/log/pod/namespace02/2009-11-10_21.log") -} - -func Test_DeleteBySize_3k(t *testing.T) { - testutil.Init() - testutil.SetTestLogFiles() - - config.Viper().Set("retention.size", "3k") - err := NewRotator().DeleteBySize() - assert.NoError(t, err) - - assert.NoFileExists(t, "./tmp/log/node/node01/2009-11-10_21.log") - assert.FileExists(t, "./tmp/log/node/node01/2009-11-10_22.log") - - assert.NoFileExists(t, "./tmp/log/node/node02/2009-11-01_00.log") - assert.NoFileExists(t, "./tmp/log/node/node02/2009-11-10_21.log") - - assert.NoFileExists(t, "./tmp/log/pod/namespace01/2000-01-01_00.log") - assert.FileExists(t, "./tmp/log/pod/namespace01/2009-11-10_21.log") - assert.FileExists(t, "./tmp/log/pod/namespace01/2009-11-10_22.log") - - assert.NoFileExists(t, "./tmp/log/pod/namespace02/0000-00-00_00.log") - assert.NoFileExists(t, "./tmp/log/pod/namespace02/2009-11-10_21.log") -} - -func Test_DeleteBySize_2k(t *testing.T) { - testutil.Init() - testutil.SetTestLogFiles() - - config.Viper().Set("retention.size", "2k") - err := NewRotator().DeleteBySize() - assert.NoError(t, err) - - assert.NoFileExists(t, "./tmp/log/node/node01/2009-11-10_21.log") - assert.NoFileExists(t, "./tmp/log/node/node01/2009-11-10_22.log") - - assert.NoFileExists(t, "./tmp/log/node/node02/2009-11-01_00.log") - assert.NoFileExists(t, "./tmp/log/node/node02/2009-11-10_21.log") - - assert.NoFileExists(t, "./tmp/log/pod/namespace01/2000-01-01_00.log") - assert.NoFileExists(t, "./tmp/log/pod/namespace01/2009-11-10_21.log") - assert.NoFileExists(t, "./tmp/log/pod/namespace01/2009-11-10_22.log") - - assert.NoFileExists(t, "./tmp/log/pod/namespace02/0000-00-00_00.log") - assert.NoFileExists(t, "./tmp/log/pod/namespace02/2009-11-10_21.log") -} diff --git a/logs/rotator/list.go b/logs/rotator/list.go deleted file mode 100644 index 9780a71..0000000 --- a/logs/rotator/list.go +++ /dev/null @@ -1,154 +0,0 @@ -package rotator - -import ( - "fmt" - "os" - "path/filepath" - - "github.com/kuoss/lethe/storage" -) - -// LogFile -type LogFile struct { - FullPath string - SubPath string - LogType string - Target string - Name string - Extension string - Size int64 -} - -type LogDir struct { - FullPath string - SubPath string - LogType string - Target string - FileCount int - FirstFile string - LastFile string - Size int64 - LastForward string -} - -func (rotator *Rotator) ListFiles() (logFiles []LogFile, err error) { - rootDirectory := rotator.driver.RootDirectory() - fileInfos, err := rotator.driver.Walk(rootDirectory) - if err != nil { - err = fmt.Errorf("error on Walk: %e", err) - return - } - - for _, fileInfo := range fileInfos { - logPath := storage.LogPath{RootDirectory: rootDirectory} - var rel string - rel, err = filepath.Rel(logPath.RootDirectory, fileInfo.Path()) - if err != nil { - err = fmt.Errorf("error on Rel: %w", err) - return - } - logPath.SetFullPath(rel) - - if logPath.Depth() == storage.FILE { - logFiles = append(logFiles, LogFile{ - FullPath: logPath.FullPath(), - SubPath: logPath.Filename(), - LogType: logPath.LogType(), - Target: logPath.Target(), - Name: logPath.Filename(), - Extension: filepath.Ext(logPath.Filename()), - Size: fileInfo.Size(), - }) - } - } - return -} - -func (rotator *Rotator) ListDirs() []LogDir { - var logDirs []LogDir - - rootDirecotry := rotator.driver.RootDirectory() - directories, err := rotator.driver.WalkDir(rootDirecotry) - if err != nil { - fmt.Println(err) - return logDirs - } - for _, dir := range directories { - logPath := storage.LogPath{RootDirectory: rootDirecotry} - logPath.SetFullPath(dir) - if logPath.Depth() == storage.TARGET { - logDirs = append(logDirs, LogDir{ - LogType: logPath.LogType(), - Target: logPath.Target(), - SubPath: dir, - FullPath: logPath.FullPath(), - }) - } - } - return logDirs -} - -func (rotator *Rotator) ListDirsWithSize() []LogDir { - logDirs := rotator.ListDirs() - for i, logDir := range logDirs { - var size int64 - size, err := rotator.DirSize(logDir.FullPath) - if err != nil { - fmt.Printf("Warning: cannot get size of directory: %s\n", logDir.FullPath) - } - logDirs[i].Size = size - - files, err := os.ReadDir(logDir.FullPath) - if err != nil { - fmt.Printf("Warning: cannot get logs count of directory: %s\n", logDir.FullPath) - continue - } - fileCount := len(files) - logDirs[i].FileCount = fileCount - if fileCount > 0 { - logDirs[i].FirstFile = files[0].Name() - logDirs[i].LastFile = files[fileCount-1].Name() - } - } - return logDirs -} - -func (rotator *Rotator) DirSize(path string) (int64, error) { - - files, err := rotator.driver.List(path) - if err != nil { - return 0, err - } - var size int64 - for _, file := range files { - info, err := rotator.driver.Stat(file) - if err != nil { - return 0, err - } - size += info.Size() - } - return size, err -} - -// ListTargets method returns ListDirWithSize() + LastForward(timestamp) -func (rotator *Rotator) ListTargets() []LogDir { - - logDirs := rotator.ListDirsWithSize() - for i, logDir := range logDirs { - if logDir.LastFile == "" { - continue - } - - b, err := rotator.driver.GetContent(filepath.Join(logDir.FullPath, logDir.LastFile)) - if err != nil { - fmt.Println(err) - return nil - } - content := string(b) - - // todo - // if timestamp ? - logDirs[i].LastForward = content[:20] - } - return logDirs -} diff --git a/logs/rotator/list_test.go b/logs/rotator/list_test.go deleted file mode 100644 index 5ab5030..0000000 --- a/logs/rotator/list_test.go +++ /dev/null @@ -1,66 +0,0 @@ -package rotator - -import ( - "fmt" - "runtime" - "testing" - - "github.com/kuoss/lethe/testutil" - "github.com/stretchr/testify/assert" -) - -func init() { - testutil.Init() -} - -func TestListDirs(t *testing.T) { - testutil.SetTestLogFiles() - - got := NewRotator().ListDirs() - - actual := fmt.Sprintf("%v", got) - expected := "[{tmp/log/node/node01 node/node01 node node01 0 0 } {tmp/log/node/node02 node/node02 node node02 0 0 } {tmp/log/pod/namespace01 pod/namespace01 pod namespace01 0 0 } {tmp/log/pod/namespace02 pod/namespace02 pod namespace02 0 0 }]" - if runtime.GOOS == "windows" { - expected = "[{tmp\\log\\node\\node01 node\\node01 node node01 0 0 } {tmp\\log\\node\\node02 node\\node02 node node02 0 0 } {tmp\\log\\pod\\namespace01 pod\\namespace01 pod namespace01 0 0 } {tmp\\log\\pod\\namespace02 pod\\namespace02 pod namespace02 0 0 }]" - } - assert.Equal(t, expected, actual) -} - -func TestListDirWithSize(t *testing.T) { - testutil.SetTestLogFiles() - - got := NewRotator().ListDirsWithSize() - - actual := fmt.Sprintf("%v", got) - expected := "[{tmp/log/node/node01 node/node01 node node01 2 2009-11-10_21.log 2009-11-10_22.log 1234 } {tmp/log/node/node02 node/node02 node node02 2 2009-11-01_00.log 2009-11-10_21.log 1116 } {tmp/log/pod/namespace01 pod/namespace01 pod namespace01 4 2000-01-01_00.log 2029-11-10_23.log 2620 } {tmp/log/pod/namespace02 pod/namespace02 pod namespace02 2 0000-00-00_00.log 2009-11-10_22.log 1137 }]" - if runtime.GOOS == "windows" { - expected = "[{tmp\\log\\node\\node01 node\\node01 node node01 2 2009-11-10_21.log 2009-11-10_22.log 1248 } {tmp\\log\\node\\node02 node\\node02 node node02 2 2009-11-01_00.log 2009-11-10_21.log 1128 } {tmp\\log\\pod\\namespace01 pod\\namespace01 pod namespace01 4 2000-01-01_00.log 2029-11-10_23.log 2646 } {tmp\\log\\pod\\namespace02 pod\\namespace02 pod namespace02 2 0000-00-00_00.log 2009-11-10_22.log 1151 }]" - } - assert.Equal(t, expected, actual) -} - -func TestListFiles(t *testing.T) { - testutil.SetTestLogFiles() - - want := "[{tmp/log/node/node01/2009-11-10_21.log 2009-11-10_21.log node node01 2009-11-10_21.log .log 1057} {tmp/log/node/node01/2009-11-10_22.log 2009-11-10_22.log node node01 2009-11-10_22.log .log 177} {tmp/log/node/node02/2009-11-01_00.log 2009-11-01_00.log node node02 2009-11-01_00.log .log 0} {tmp/log/node/node02/2009-11-10_21.log 2009-11-10_21.log node node02 2009-11-10_21.log .log 1116} {tmp/log/pod/namespace01/2000-01-01_00.log 2000-01-01_00.log pod namespace01 2000-01-01_00.log .log 1031} {tmp/log/pod/namespace01/2009-11-10_21.log 2009-11-10_21.log pod namespace01 2009-11-10_21.log .log 279} {tmp/log/pod/namespace01/2009-11-10_22.log 2009-11-10_22.log pod namespace01 2009-11-10_22.log .log 1031} {tmp/log/pod/namespace01/2029-11-10_23.log 2029-11-10_23.log pod namespace01 2029-11-10_23.log .log 279} {tmp/log/pod/namespace02/0000-00-00_00.log 0000-00-00_00.log pod namespace02 0000-00-00_00.log .log 12} {tmp/log/pod/namespace02/2009-11-10_22.log 2009-11-10_22.log pod namespace02 2009-11-10_22.log .log 1125}]" - if runtime.GOOS == "windows" { - want = "[{tmp\\log\\node\\node01\\2009-11-10_21.log 2009-11-10_21.log node node01 2009-11-10_21.log .log 1068} {tmp\\log\\node\\node01\\2009-11-10_22.log 2009-11-10_22.log node node01 2009-11-10_22.log .log 180} {tmp\\log\\node\\node02\\2009-11-01_00.log 2009-11-01_00.log node node02 2009-11-01_00.log .log 0} {tmp\\log\\node\\node02\\2009-11-10_21.log 2009-11-10_21.log node node02 2009-11-10_21.log .log 1128} {tmp\\log\\pod\\namespace01\\2000-01-01_00.log 2000-01-01_00.log pod namespace01 2000-01-01_00.log .log 1041} {tmp\\log\\pod\\namespace01\\2009-11-10_21.log 2009-11-10_21.log pod namespace01 2009-11-10_21.log .log 282} {tmp\\log\\pod\\namespace01\\2009-11-10_22.log 2009-11-10_22.log pod namespace01 2009-11-10_22.log .log 1041} {tmp\\log\\pod\\namespace01\\2029-11-10_23.log 2029-11-10_23.log pod namespace01 2029-11-10_23.log .log 282} {tmp\\log\\pod\\namespace02\\0000-00-00_00.log 0000-00-00_00.log pod namespace02 0000-00-00_00.log .log 14} {tmp\\log\\pod\\namespace02\\2009-11-10_22.log 2009-11-10_22.log pod namespace02 2009-11-10_22.log .log 1137}]" - } - - got, err := NewRotator().ListFiles() - assert.NoError(t, err) - assert.Equal(t, want, fmt.Sprintf("%v", got)) -} - -func TestListTargets(t *testing.T) { - testutil.SetTestLogFiles() - - got := NewRotator().ListTargets() - - actual := fmt.Sprintf("%v", got) - expected := "[{tmp/log/node/node01 node/node01 node node01 2 2009-11-10_21.log 2009-11-10_22.log 1234 2009-11-10T23:00:00.} {tmp/log/node/node02 node/node02 node node02 2 2009-11-01_00.log 2009-11-10_21.log 1116 2009-11-10T21:58:00.} {tmp/log/pod/namespace01 pod/namespace01 pod namespace01 4 2000-01-01_00.log 2029-11-10_23.log 2620 2009-11-10T23:00:00.} {tmp/log/pod/namespace02 pod/namespace02 pod namespace02 2 0000-00-00_00.log 2009-11-10_22.log 1137 2009-11-10T22:58:00.}]" - if runtime.GOOS == "windows" { - expected = "[{tmp\\log\\node\\node01 node\\node01 node node01 2 2009-11-10_21.log 2009-11-10_22.log 1248 2009-11-10T23:00:00.} {tmp\\log\\node\\node02 node\\node02 node node02 2 2009-11-01_00.log 2009-11-10_21.log 1128 2009-11-10T21:58:00.} {tmp\\log\\pod\\namespace01 pod\\namespace01 pod namespace01 4 2000-01-01_00.log 2029-11-10_23.log 2646 2009-11-10T23:00:00.} {tmp\\log\\pod\\namespace02 pod\\namespace02 pod namespace02 2 0000-00-00_00.log 2009-11-10_22.log 1151 2009-11-10T22:58:00.}]" - } - assert.Equal(t, expected, actual) -} diff --git a/logs/rotator/rotator.go b/logs/rotator/rotator.go deleted file mode 100644 index f4b9be7..0000000 --- a/logs/rotator/rotator.go +++ /dev/null @@ -1,48 +0,0 @@ -package rotator - -import ( - "log" - "time" - - "github.com/kuoss/common/logger" - "github.com/kuoss/lethe/config" - "github.com/kuoss/lethe/storage/driver" - "github.com/kuoss/lethe/storage/driver/factory" -) - -type Rotator struct { - driver driver.StorageDriver -} - -func NewRotator() *Rotator { - d, _ := factory.Get(config.Viper().GetString("storage.driver"), map[string]interface{}{"RootDirectory": config.GetLogDataPath()}) - return &Rotator{driver: d} -} - -func (rotator *Rotator) Start(interval time.Duration) { - go rotator.routineLoop(interval) -} - -func (rotator *Rotator) routineLoop(interval time.Duration) { - for { - rotator.RunOnce() - log.Printf("routineLoop... sleep %s\n", interval) - time.Sleep(interval) - } -} - -func (rotator *Rotator) RunOnce() { - var err error - - err = rotator.DeleteByAge() - if err != nil { - logger.Errorf("error on DeleteByAge: %s", err) - } - - err = rotator.DeleteBySize() - if err != nil { - logger.Errorf("error on DeleteBySize: %s", err) - } - - rotator.Cleansing() -} diff --git a/logs/rotator/rotator_test.go b/logs/rotator/rotator_test.go deleted file mode 100644 index 32a8e77..0000000 --- a/logs/rotator/rotator_test.go +++ /dev/null @@ -1,73 +0,0 @@ -package rotator - -import ( - "testing" - - "github.com/kuoss/lethe/config" - "github.com/kuoss/lethe/testutil" - "github.com/stretchr/testify/assert" -) - -func Test_RoutineDelete_100m_50k(t *testing.T) { - testutil.SetTestLogFiles() - - config.Viper().Set("retention.time", "100m") - config.Viper().Set("retention.size", "50k") - NewRotator().RunOnce() - - assert.FileExists(t, "./tmp/log/node/node01/2009-11-10_21.log") - assert.FileExists(t, "./tmp/log/node/node01/2009-11-10_22.log") - - assert.NoFileExists(t, "./tmp/log/node/node02/2009-11-01_00.log") - assert.FileExists(t, "./tmp/log/node/node02/2009-11-10_21.log") - - assert.NoFileExists(t, "./tmp/log/pod/namespace01/2000-01-01_00.log") - assert.FileExists(t, "./tmp/log/pod/namespace01/2009-11-10_21.log") - assert.FileExists(t, "./tmp/log/pod/namespace01/2009-11-10_22.log") - - assert.NoFileExists(t, "./tmp/log/pod/namespace02/0000-00-00_00.log") - assert.FileExists(t, "./tmp/log/pod/namespace02/2009-11-10_22.log") - -} - -func Test_RoutineDelete_100m_3k(t *testing.T) { - testutil.SetTestLogFiles() - - config.Viper().Set("retention.time", "100m") - config.Viper().Set("retention.size", "3k") - NewRotator().RunOnce() - - assert.NoFileExists(t, "./tmp/log/node/node01/2009-11-10_21.log") - assert.FileExists(t, "./tmp/log/node/node01/2009-11-10_22.log") - - assert.NoFileExists(t, "./tmp/log/node/node02/2009-11-01_00.log") - assert.NoFileExists(t, "./tmp/log/node/node02/2009-11-10_21.log") - - assert.NoFileExists(t, "./tmp/log/pod/namespace01/2000-01-01_00.log") - assert.FileExists(t, "./tmp/log/pod/namespace01/2009-11-10_21.log") - assert.FileExists(t, "./tmp/log/pod/namespace01/2009-11-10_22.log") - - assert.NoFileExists(t, "./tmp/log/pod/namespace02/0000-00-00_00.log") - assert.FileExists(t, "./tmp/log/pod/namespace02/2009-11-10_22.log") -} - -func Test_RoutineDelete_1d(t *testing.T) { - testutil.SetTestLogFiles() - - config.Viper().Set("retention.time", "1d") - config.Viper().Set("retention.size", "100g") - NewRotator().RunOnce() - - assert.FileExists(t, "./tmp/log/node/node01/2009-11-10_21.log") - assert.FileExists(t, "./tmp/log/node/node01/2009-11-10_22.log") - - assert.NoFileExists(t, "./tmp/log/node/node02/2009-11-01_00.log") - assert.FileExists(t, "./tmp/log/node/node02/2009-11-10_21.log") - - assert.NoFileExists(t, "./tmp/log/pod/namespace01/2000-01-01_00.log") - assert.FileExists(t, "./tmp/log/pod/namespace01/2009-11-10_21.log") - assert.FileExists(t, "./tmp/log/pod/namespace01/2009-11-10_22.log") - - assert.NoFileExists(t, "./tmp/log/pod/namespace02/0000-00-00_00.log") - assert.NoFileExists(t, "./tmp/log/pod/namespace02/2009-11-10_21.log") -} diff --git a/logs/rotator/used_size.go b/logs/rotator/used_size.go deleted file mode 100644 index b54e4c2..0000000 --- a/logs/rotator/used_size.go +++ /dev/null @@ -1,24 +0,0 @@ -package rotator - -import ( - "github.com/kuoss/lethe/config" -) - -func (rotator *Rotator) GetUsedBytes(path string) (int, error) { - if config.Viper().GetString("retention.sizingStrategy") == "disk" { - return rotator.GetDiskUsedBytes(path) - } - return rotator.GetFilesUsedBytes(path) -} - -func (rotator *Rotator) GetFilesUsedBytes(path string) (int, error) { - fileInfos, err := rotator.driver.Walk(path) - if err != nil { - return 0, err - } - var size int - for _, fileInfo := range fileInfos { - size += int(fileInfo.Size()) - } - return size, err -} diff --git a/logs/rotator/used_size_disk_linux.go b/logs/rotator/used_size_disk_linux.go deleted file mode 100644 index 8f2863d..0000000 --- a/logs/rotator/used_size_disk_linux.go +++ /dev/null @@ -1,17 +0,0 @@ -//go:build linux -// +build linux - -package rotator - -import ( - "golang.org/x/sys/unix" -) - -func (rotator *Rotator) GetDiskUsedBytes(path string) (int, error) { - var stat unix.Statfs_t - err := unix.Statfs(path, &stat) - if err != nil { - return 0, err - } - return int(stat.Blocks - stat.Bavail*uint64(stat.Bsize)), nil -} diff --git a/logs/rotator/used_size_disk_windows.go b/logs/rotator/used_size_disk_windows.go deleted file mode 100644 index cdeb272..0000000 --- a/logs/rotator/used_size_disk_windows.go +++ /dev/null @@ -1,25 +0,0 @@ -//go:build windows -// +build windows - -package rotator - -import ( - "fmt" -) - -// "golang.org/x/sys/windows" - -func (rotator *Rotator) GetDiskUsedBytes(path string) (int, error) { - fmt.Println("Currently getDiskAvailableBytes is not supported on Windows.") - return 99999999, nil - // var free, total, avail uint64 - // pathPtr, err := windows.UTF16PtrFromString(path) - // if err != nil { - // return 0, err - // } - // err = windows.GetDiskFreeSpaceEx(pathPtr, &free, &total, &avail) - // if err != nil { - // return 0, err - // } - // return int(total - avail), nil -} diff --git a/logs/rotator/used_size_test.go b/logs/rotator/used_size_test.go deleted file mode 100644 index 980fd13..0000000 --- a/logs/rotator/used_size_test.go +++ /dev/null @@ -1,20 +0,0 @@ -package rotator - -import ( - "testing" - - "github.com/kuoss/lethe/testutil" - "github.com/stretchr/testify/assert" -) - -func Test_GetDiskUsedBytes(t *testing.T) { - testutil.SetTestLogFiles() - - rotator := NewRotator() - - actual, err := rotator.GetDiskUsedBytes(rotator.driver.RootDirectory()) - if err != nil { - t.Fatal(err) - } - assert.NotZero(t, actual) -} diff --git a/main.go b/main.go index fb5671d..84b8dee 100644 --- a/main.go +++ b/main.go @@ -5,26 +5,43 @@ import ( "github.com/kuoss/common/logger" "github.com/kuoss/lethe/config" - "github.com/kuoss/lethe/logs/rotator" + "github.com/kuoss/lethe/handler" + "github.com/kuoss/lethe/rotator" + "github.com/kuoss/lethe/storage/fileservice" + "github.com/kuoss/lethe/storage/logservice" + "github.com/kuoss/lethe/storage/queryservice" ) var ( - Version = "unknown" + Version = "development" ) func main() { + logger.Infof("🌊 lethe starting... version: %s", Version) - err := config.LoadConfig() + + // config + cfg, err := config.New(Version) + if err != nil { + logger.Fatalf("new config err: %s", err.Error()) + } + + // services + fileService, err := fileservice.New(cfg) if err != nil { - logger.Fatalf("error on LoadConfig: %s", err) + logger.Fatalf("new fileservice err: %s", err.Error()) } + logService := logservice.New(fileService) + queryService := queryservice.New(logService) - rotator := rotator.NewRotator() + // start rotator + rotator := rotator.New(cfg, fileService) rotator.Start(time.Duration(20) * time.Minute) // 20 minutes - router := NewRouter() - err = router.Run(config.GetWebListenAddress()) + // run handler + h := handler.New(cfg, fileService, queryService) + err = h.Run() if err != nil { - logger.Fatalf("error on Run: %s", err) + logger.Fatalf("handler run err: %s", err.Error()) } } diff --git a/rotator/rotator.go b/rotator/rotator.go new file mode 100644 index 0000000..4cf5aeb --- /dev/null +++ b/rotator/rotator.go @@ -0,0 +1,42 @@ +package rotator + +import ( + "time" + + "github.com/kuoss/common/logger" + "github.com/kuoss/lethe/config" + "github.com/kuoss/lethe/storage/fileservice" +) + +type Rotator struct { + config *config.Config + fileService *fileservice.FileService +} + +func New(cfg *config.Config, fileService *fileservice.FileService) *Rotator { + return &Rotator{cfg, fileService} +} + +func (r *Rotator) Start(interval time.Duration) { + go r.routineLoop(interval) +} + +func (r *Rotator) routineLoop(interval time.Duration) { + for { + r.RunOnce() + logger.Infof("routineLoop... sleep %s", interval) + time.Sleep(interval) + } +} + +func (r *Rotator) RunOnce() { + err := r.fileService.DeleteByAge() + if err != nil { + logger.Errorf("deleteByAge err: %s", err.Error()) + } + err = r.fileService.DeleteBySize() + if err != nil { + logger.Errorf("deleteBySize err: %s", err.Error()) + } + r.fileService.Clean() +} diff --git a/rotator/rotator_test.go b/rotator/rotator_test.go new file mode 100644 index 0000000..bd6e422 --- /dev/null +++ b/rotator/rotator_test.go @@ -0,0 +1,103 @@ +package rotator + +import ( + "fmt" + "testing" + "time" + + "github.com/kuoss/lethe/clock" + "github.com/kuoss/lethe/config" + "github.com/kuoss/lethe/storage/fileservice" + "github.com/kuoss/lethe/util/testutil" + "github.com/stretchr/testify/assert" +) + +var ( + rotator1 *Rotator +) + +func init() { + testutil.ChdirRoot() + testutil.ResetLogData() + clock.SetPlaygroundMode(true) + + cfg, err := config.New("test") + if err != nil { + panic(err) + } + cfg.SetLogDataPath("tmp/rotator_rotator_test") + fileService, err := fileservice.New(cfg) + if err != nil { + panic(err) + } + rotator1 = New(cfg, fileService) +} + +func TestRunOnce(t *testing.T) { + testCases := []struct { + retentionSize int + retentionTime time.Duration + want []fileservice.LogFile + }{ + { + 0, + 0, + []fileservice.LogFile{ + {Fullpath: "tmp/rotator_rotator_test/node/node01/2009-11-10_21.log", Subpath: "node/node01/2009-11-10_21.log", LogType: "node", Target: "node01", Name: "2009-11-10_21.log", Extension: ".log", Size: 1057}, + {Fullpath: "tmp/rotator_rotator_test/node/node01/2009-11-10_22.log", Subpath: "node/node01/2009-11-10_22.log", LogType: "node", Target: "node01", Name: "2009-11-10_22.log", Extension: ".log", Size: 177}, + {Fullpath: "tmp/rotator_rotator_test/node/node02/2009-11-01_00.log", Subpath: "node/node02/2009-11-01_00.log", LogType: "node", Target: "node02", Name: "2009-11-01_00.log", Extension: ".log", Size: 0}, + {Fullpath: "tmp/rotator_rotator_test/node/node02/2009-11-10_21.log", Subpath: "node/node02/2009-11-10_21.log", LogType: "node", Target: "node02", Name: "2009-11-10_21.log", Extension: ".log", Size: 1116}, + {Fullpath: "tmp/rotator_rotator_test/pod/namespace01/2000-01-01_00.log", Subpath: "pod/namespace01/2000-01-01_00.log", LogType: "pod", Target: "namespace01", Name: "2000-01-01_00.log", Extension: ".log", Size: 1031}, + {Fullpath: "tmp/rotator_rotator_test/pod/namespace01/2009-11-10_21.log", Subpath: "pod/namespace01/2009-11-10_21.log", LogType: "pod", Target: "namespace01", Name: "2009-11-10_21.log", Extension: ".log", Size: 279}, + {Fullpath: "tmp/rotator_rotator_test/pod/namespace01/2009-11-10_22.log", Subpath: "pod/namespace01/2009-11-10_22.log", LogType: "pod", Target: "namespace01", Name: "2009-11-10_22.log", Extension: ".log", Size: 1031}, + {Fullpath: "tmp/rotator_rotator_test/pod/namespace01/2029-11-10_23.log", Subpath: "pod/namespace01/2029-11-10_23.log", LogType: "pod", Target: "namespace01", Name: "2029-11-10_23.log", Extension: ".log", Size: 279}, + {Fullpath: "tmp/rotator_rotator_test/pod/namespace02/0000-00-00_00.log", Subpath: "pod/namespace02/0000-00-00_00.log", LogType: "pod", Target: "namespace02", Name: "0000-00-00_00.log", Extension: ".log", Size: 12}, + {Fullpath: "tmp/rotator_rotator_test/pod/namespace02/2009-11-10_22.log", Subpath: "pod/namespace02/2009-11-10_22.log", LogType: "pod", Target: "namespace02", Name: "2009-11-10_22.log", Extension: ".log", Size: 1125}}, + }, + { + 10, // 10 bytes + 100 * time.Hour, // 100 hours + []fileservice.LogFile{}, + }, + { + 2 * 1024, // 2 KiB + 100 * time.Hour, // 100 hours + []fileservice.LogFile{ + {Fullpath: "tmp/rotator_rotator_test/pod/namespace01/2029-11-10_23.log", Subpath: "pod/namespace01/2029-11-10_23.log", LogType: "pod", Target: "namespace01", Name: "2029-11-10_23.log", Extension: ".log", Size: 279}, + {Fullpath: "tmp/rotator_rotator_test/pod/namespace02/2009-11-10_22.log", Subpath: "pod/namespace02/2009-11-10_22.log", LogType: "pod", Target: "namespace02", Name: "2009-11-10_22.log", Extension: ".log", Size: 1125}}, + }, + { + 3 * 1024, // 3 KiB + 100 * time.Hour, // 100 hours + []fileservice.LogFile{ + {Fullpath: "tmp/rotator_rotator_test/node/node01/2009-11-10_22.log", Subpath: "node/node01/2009-11-10_22.log", LogType: "node", Target: "node01", Name: "2009-11-10_22.log", Extension: ".log", Size: 177}, + {Fullpath: "tmp/rotator_rotator_test/pod/namespace01/2009-11-10_21.log", Subpath: "pod/namespace01/2009-11-10_21.log", LogType: "pod", Target: "namespace01", Name: "2009-11-10_21.log", Extension: ".log", Size: 279}, + {Fullpath: "tmp/rotator_rotator_test/pod/namespace01/2009-11-10_22.log", Subpath: "pod/namespace01/2009-11-10_22.log", LogType: "pod", Target: "namespace01", Name: "2009-11-10_22.log", Extension: ".log", Size: 1031}, + {Fullpath: "tmp/rotator_rotator_test/pod/namespace01/2029-11-10_23.log", Subpath: "pod/namespace01/2029-11-10_23.log", LogType: "pod", Target: "namespace01", Name: "2029-11-10_23.log", Extension: ".log", Size: 279}, + {Fullpath: "tmp/rotator_rotator_test/pod/namespace02/2009-11-10_22.log", Subpath: "pod/namespace02/2009-11-10_22.log", LogType: "pod", Target: "namespace02", Name: "2009-11-10_22.log", Extension: ".log", Size: 1125}}, + }, + { + 100 * 1024 * 1024 * 1024, // 100 GiB + 2 * 24 * time.Hour, // 2 days + []fileservice.LogFile{ + {Fullpath: "tmp/rotator_rotator_test/node/node01/2009-11-10_21.log", Subpath: "node/node01/2009-11-10_21.log", LogType: "node", Target: "node01", Name: "2009-11-10_21.log", Extension: ".log", Size: 1057}, + {Fullpath: "tmp/rotator_rotator_test/node/node01/2009-11-10_22.log", Subpath: "node/node01/2009-11-10_22.log", LogType: "node", Target: "node01", Name: "2009-11-10_22.log", Extension: ".log", Size: 177}, + {Fullpath: "tmp/rotator_rotator_test/node/node02/2009-11-10_21.log", Subpath: "node/node02/2009-11-10_21.log", LogType: "node", Target: "node02", Name: "2009-11-10_21.log", Extension: ".log", Size: 1116}, + {Fullpath: "tmp/rotator_rotator_test/pod/namespace01/2009-11-10_21.log", Subpath: "pod/namespace01/2009-11-10_21.log", LogType: "pod", Target: "namespace01", Name: "2009-11-10_21.log", Extension: ".log", Size: 279}, + {Fullpath: "tmp/rotator_rotator_test/pod/namespace01/2009-11-10_22.log", Subpath: "pod/namespace01/2009-11-10_22.log", LogType: "pod", Target: "namespace01", Name: "2009-11-10_22.log", Extension: ".log", Size: 1031}, + {Fullpath: "tmp/rotator_rotator_test/pod/namespace01/2029-11-10_23.log", Subpath: "pod/namespace01/2029-11-10_23.log", LogType: "pod", Target: "namespace01", Name: "2029-11-10_23.log", Extension: ".log", Size: 279}, + {Fullpath: "tmp/rotator_rotator_test/pod/namespace02/2009-11-10_22.log", Subpath: "pod/namespace02/2009-11-10_22.log", LogType: "pod", Target: "namespace02", Name: "2009-11-10_22.log", Extension: ".log", Size: 1125}}, + }, + } + for i, tc := range testCases { + t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { + rotator1.config.SetRetentionSize(tc.retentionSize) + rotator1.config.SetRetentionTime(tc.retentionTime) + rotator1.RunOnce() + got, err := rotator1.fileService.ListFiles() + assert.NoError(t, err) + assert.Equal(t, tc.want, got) + testutil.ResetLogData() + }) + } +} diff --git a/routes.go b/routes.go deleted file mode 100644 index 28e7b93..0000000 --- a/routes.go +++ /dev/null @@ -1,33 +0,0 @@ -package main - -import ( - "github.com/gin-gonic/gin" - "github.com/kuoss/lethe/handlers" - "github.com/kuoss/lethe/logs/rotator" -) - -func NewRouter() *gin.Engine { - r := gin.Default() - routesRootGroup(r) - routesAPIV1Group(r) - return r -} - -func routesRootGroup(r *gin.Engine) { - - r.GET("/ping", func(c *gin.Context) { - c.JSON(200, gin.H{"message": "pong"}) - }) - -} - -func routesAPIV1Group(r *gin.Engine) { - - v1 := r.Group("api/v1") - h := handlers.LetheHandler{Rotator: rotator.NewRotator()} - - v1.GET("query", h.Query) - v1.GET("query_range", h.QueryRange) - v1.GET("metadata", h.Metadata) - v1.GET("targets", h.Target) -} diff --git a/routes_test.go b/routes_test.go deleted file mode 100644 index 4edd4c2..0000000 --- a/routes_test.go +++ /dev/null @@ -1,230 +0,0 @@ -package main - -import ( - "encoding/json" - "fmt" - "io" - "net/http" - "net/http/httptest" - "testing" - "time" - - "github.com/kuoss/lethe/logs/logStore" - - "github.com/gin-gonic/gin" - "github.com/kuoss/lethe/testutil" - "github.com/stretchr/testify/assert" -) - -var r *gin.Engine -var w *httptest.ResponseRecorder - -type Params map[string]string - -func init() { - testutil.Init() - testutil.SetTestLogFiles() - r = NewRouter() - w = httptest.NewRecorder() -} - -func GET(url string, params Params) string { - req, _ := http.NewRequest("GET", url, nil) - - if params != nil { - q := req.URL.Query() - for k, v := range params { - q.Add(k, v) - } - req.URL.RawQuery = q.Encode() - } - r.ServeHTTP(w, req) - body, _ := io.ReadAll(w.Body) - return string(body) -} - -func Test_Routes_Query(t *testing.T) { - prefix := "01_" - tests := map[string]struct { - url string - param Params - want string - }{ - // modify test case.. for supoorting sort by time - "01_ping": {url: "/ping", param: nil, - want: "{\"message\":\"pong\"}"}, - "02_empty_query": {url: "/api/v1/query", param: Params{"query": ``}, - want: "{\"error\":\"empty query\",\"status\":\"error\"}"}, - "03_unknown_query": {url: "/api/v1/query", param: Params{"query": `hello`}, - want: "{\"error\":\"unknown log name\",\"status\":\"error\"}"}, - "04_query_namespace01": {url: "/api/v1/query", param: Params{"query": `pod{namespace="namespace01"}`}, - want: "{\"data\":{\"result\":[\"2009-11-10T21:00:00Z[namespace01|nginx-deployment-75675f5897-7ci7o|nginx] hello world\",\"2009-11-10T21:01:00Z[namespace01|nginx-deployment-75675f5897-7ci7o|nginx] hello world\",\"2009-11-10T21:02:00Z[namespace01|nginx-deployment-75675f5897-7ci7o|nginx] hello world\",\"2009-11-10T22:56:00Z[namespace01|apache-75675f5897-7ci7o|httpd] hello from sidecar\",\"2009-11-10T22:56:00Z[namespace01|apache-75675f5897-7ci7o|httpd] hello from sidecar\",\"2009-11-10T22:56:00Z[namespace01|apache-75675f5897-7ci7o|httpd] hello from sidecar\",\"2009-11-10T22:57:00Z[namespace01|apache-75675f5897-7ci7o|httpd] hello from sidecar\",\"2009-11-10T22:57:00Z[namespace01|apache-75675f5897-7ci7o|httpd] hello from sidecar\",\"2009-11-10T22:57:00Z[namespace01|apache-75675f5897-7ci7o|httpd] hello from sidecar\",\"2009-11-10T22:58:00Z[namespace01|nginx-deployment-75675f5897-7ci7o|sidecar] hello from sidecar\",\"2009-11-10T22:58:00Z[namespace01|nginx-deployment-75675f5897-7ci7o|sidecar] lerom from sidecar\",\"2009-11-10T22:58:00Z[namespace01|nginx-deployment-75675f5897-7ci7o|sidecar] hello from sidecar\",\"2009-11-10T22:59:00Z[namespace01|nginx-deployment-75675f5897-7ci7o|nginx] lerom ipsum\",\"2009-11-10T22:59:00Z[namespace01|nginx-deployment-75675f5897-7ci7o|nginx] hello world\"],\"resultType\":\"logs\"},\"status\":\"success\"}"}, - "05_query_namespace02": {url: "/api/v1/query", param: Params{"query": `pod{namespace="namespace02"}`}, - want: "{\"data\":{\"result\":[\"2009-11-10T22:58:00Z[namespace02|nginx-deployment-75675f5897-7ci7o|nginx] hello world\",\"2009-11-10T22:58:00Z[namespace02|nginx-deployment-75675f5897-7ci7o|nginx] lerom ipsum\",\"2009-11-10T22:58:00Z[namespace02|nginx-deployment-75675f5897-7ci7o|nginx] hello world\",\"2009-11-10T22:58:00Z[namespace02|nginx-deployment-75675f5897-7ci7o|sidecar] hello from sidecar\",\"2009-11-10T22:58:00Z[namespace02|nginx-deployment-75675f5897-7ci7o|sidecar] lerom from sidecar\",\"2009-11-10T22:58:00Z[namespace02|nginx-deployment-75675f5897-7ci7o|sidecar] hello from sidecar\",\"2009-11-10T22:58:00Z[namespace02|apache-75675f5897-7ci7o|httpd] hello from sidecar\",\"2009-11-10T22:58:00Z[namespace02|apache-75675f5897-7ci7o|httpd] hello from sidecar\",\"2009-11-10T22:58:00Z[namespace02|apache-75675f5897-7ci7o|httpd] hello from sidecar\",\"2009-11-10T22:58:00Z[namespace02|apache-75675f5897-7ci7o|httpd] hello from sidecar\",\"2009-11-10T22:58:00Z[namespace02|apache-75675f5897-7ci7o|httpd] hello from sidecar\",\"2009-11-10T22:58:00Z[namespace02|apache-75675f5897-7ci7o|httpd] hello from sidecar\"],\"resultType\":\"logs\"},\"status\":\"success\"}"}, - "06_query_namespace03": {url: "/api/v1/query", param: Params{"query": `pod{namespace="namespace03"}`}, - want: "{\"data\":{\"result\":null,\"resultType\":\"logs\"},\"status\":\"success\"}"}, - "07_query_with_duration": {url: "/api/v1/query", param: Params{"query": `pod{namespace="namespace01"}[2m]`}, - want: "{\"data\":{\"result\":[\"2009-11-10T22:58:00Z[namespace01|nginx-deployment-75675f5897-7ci7o|sidecar] hello from sidecar\",\"2009-11-10T22:58:00Z[namespace01|nginx-deployment-75675f5897-7ci7o|sidecar] lerom from sidecar\",\"2009-11-10T22:58:00Z[namespace01|nginx-deployment-75675f5897-7ci7o|sidecar] hello from sidecar\",\"2009-11-10T22:59:00Z[namespace01|nginx-deployment-75675f5897-7ci7o|nginx] lerom ipsum\",\"2009-11-10T22:59:00Z[namespace01|nginx-deployment-75675f5897-7ci7o|nginx] hello world\"],\"resultType\":\"logs\"},\"status\":\"success\"}"}, - "08_query_with_count_over_time_function": {url: "/api/v1/query", param: Params{"query": `count_over_time(pod{namespace="namespace01"}[2m])`}, - want: "{\"data\":{\"result\":[{\"value\":5}],\"resultType\":\"vector\"},\"status\":\"success\"}"}, - "09_query_with_count_over_time_function_and_binary_operator": {url: "/api/v1/query", param: Params{"query": `count_over_time(pod{namespace="namespace02"}[2m])>0`}, - want: "{\"data\":{\"result\":[{\"value\":1}],\"resultType\":\"vector\"},\"status\":\"success\"}"}, - "10_query_namespace03_with_count_over_time_function_and_binary_operator": {url: "/api/v1/query", param: Params{"query": `count_over_time(pod{namespace="namespace03"}[2m])>0`}, - want: "{\"data\":{\"result\":[],\"resultType\":\"vector\"},\"status\":\"success\"}"}, - } - - for name, tt := range tests { - t.Run(prefix+name, func(subt *testing.T) { - got := GET(tt.url, tt.param) - assert.Equal(subt, tt.want, got) - }) - } -} - -func Test_Routes_QueryRange_Without_StartEnd(t *testing.T) { - prefix := "02_" - tests := map[string]struct { - url string - param Params - want string - }{ - // modify test case.. for supoorting sort by time - "01_empty_query": {url: "/api/v1/query_range", param: Params{"query": ``}, - want: "{\"error\":\"empty query\",\"status\":\"error\"}"}, - "02_unknown_query": {url: "/api/v1/query_range", param: Params{"query": `hello`}, - want: "{\"error\":\"empty query\",\"status\":\"error\"}"}, - "03_query_namespace01": {url: "/api/v1/query_range", param: Params{"query": `pod{namespace="namespace01"}`}, - want: "{\"error\":\"empty query\",\"status\":\"error\"}"}, - "04_query_namespace02": {url: "/api/v1/query_range", param: Params{"query": `pod{namespace="namespace02"}`}, - want: "{\"error\":\"empty query\",\"status\":\"error\"}"}, - "05_query_namespace03": {url: "/api/v1/query_range", param: Params{"query": `pod{namespace="namespace03"}`}, - want: "{\"error\":\"empty query\",\"status\":\"error\"}"}, - - "06_query_with_duration": {url: "/api/v1/query_range", param: Params{"query": `pod{namespace="namespace02"}[2m]`}, - want: "{\"error\":\"empty query\",\"status\":\"error\"}"}, - "07_query_with_count_over_time_function": {url: "/api/v1/query_range", param: Params{"query": `count_over_time(pod{namespace="namespace01"}[2m])`}, - want: "{\"error\":\"empty query\",\"status\":\"error\"}"}, - "08_query_with_count_over_time_function_and_binary_operator": {url: "/api/v1/query_range", param: Params{"query": `count_over_time(pod{namespace="namespace01"}[2m])>0`}, - want: "{\"error\":\"empty query\",\"status\":\"error\"}"}, - "09_query_namespace03_with_count_over_time_function_and_binary_operator": {url: "/api/v1/query_range", param: Params{"query": `count_over_time(pod{namespace="namespace03"}[2m])>0`}, - want: "{\"error\":\"empty query\",\"status\":\"error\"}"}, - } - - for name, tt := range tests { - t.Run(prefix+name, func(subt *testing.T) { - t.Log(tt.url, tt.param) - got := GET(tt.url, tt.param) - assert.Equal(subt, tt.want, got) - }) - } -} - -func Test_Routes_QueryRange_With_StartEnd(t *testing.T) { - startTime, _ := time.Parse(time.RFC3339, "2009-11-10T22:57:00Z") - endTime, _ := time.Parse(time.RFC3339, "2009-11-10T22:58:00Z") - start := fmt.Sprint(startTime.Unix()) - end := fmt.Sprint(endTime.Unix()) - - prefix := "03_" - tests := map[string]struct { - url string - param Params - want string - }{ - // modify test case.. for supoorting sort by time - "01_empty_query": {url: "/api/v1/query_range", param: Params{"start": start, "end": end, "query": ``}, - want: "{\"error\":\"empty query\",\"status\":\"error\"}"}, - "02_unknown_query": {url: "/api/v1/query_range", param: Params{"start": start, "end": end, "query": `hello`}, - want: "{\"error\":\"unknown log name\",\"status\":\"error\"}"}, - "03_query_namespace01": {url: "/api/v1/query_range", param: Params{"start": start, "end": end, "query": `pod{namespace="namespace01"}`}, - want: "{\"data\":{\"result\":[\"2009-11-10T22:57:00Z[namespace01|apache-75675f5897-7ci7o|httpd] hello from sidecar\",\"2009-11-10T22:57:00Z[namespace01|apache-75675f5897-7ci7o|httpd] hello from sidecar\",\"2009-11-10T22:57:00Z[namespace01|apache-75675f5897-7ci7o|httpd] hello from sidecar\",\"2009-11-10T22:58:00Z[namespace01|nginx-deployment-75675f5897-7ci7o|sidecar] hello from sidecar\",\"2009-11-10T22:58:00Z[namespace01|nginx-deployment-75675f5897-7ci7o|sidecar] lerom from sidecar\",\"2009-11-10T22:58:00Z[namespace01|nginx-deployment-75675f5897-7ci7o|sidecar] hello from sidecar\"],\"resultType\":\"logs\"},\"status\":\"success\"}"}, - "04_query_namespace02": {url: "/api/v1/query_range", param: Params{"start": start, "end": end, "query": `pod{namespace="namespace02"}`}, - want: "{\"data\":{\"result\":[\"2009-11-10T22:58:00Z[namespace02|nginx-deployment-75675f5897-7ci7o|nginx] hello world\",\"2009-11-10T22:58:00Z[namespace02|nginx-deployment-75675f5897-7ci7o|nginx] lerom ipsum\",\"2009-11-10T22:58:00Z[namespace02|nginx-deployment-75675f5897-7ci7o|nginx] hello world\",\"2009-11-10T22:58:00Z[namespace02|nginx-deployment-75675f5897-7ci7o|sidecar] hello from sidecar\",\"2009-11-10T22:58:00Z[namespace02|nginx-deployment-75675f5897-7ci7o|sidecar] lerom from sidecar\",\"2009-11-10T22:58:00Z[namespace02|nginx-deployment-75675f5897-7ci7o|sidecar] hello from sidecar\",\"2009-11-10T22:58:00Z[namespace02|apache-75675f5897-7ci7o|httpd] hello from sidecar\",\"2009-11-10T22:58:00Z[namespace02|apache-75675f5897-7ci7o|httpd] hello from sidecar\",\"2009-11-10T22:58:00Z[namespace02|apache-75675f5897-7ci7o|httpd] hello from sidecar\",\"2009-11-10T22:58:00Z[namespace02|apache-75675f5897-7ci7o|httpd] hello from sidecar\",\"2009-11-10T22:58:00Z[namespace02|apache-75675f5897-7ci7o|httpd] hello from sidecar\",\"2009-11-10T22:58:00Z[namespace02|apache-75675f5897-7ci7o|httpd] hello from sidecar\"],\"resultType\":\"logs\"},\"status\":\"success\"}"}, - "05_query_namespace03": {url: "/api/v1/query_range", param: Params{"start": start, "end": end, "query": `pod{namespace="namespace03"}`}, - want: "{\"data\":{\"result\":null,\"resultType\":\"logs\"},\"status\":\"success\"}"}, - - "06_query_with_duration": {url: "/api/v1/query_range", param: Params{"start": start, "end": end, "query": `pod{namespace="namespace01"}[2m]`}, - want: "{\"data\":{\"result\":[\"2009-11-10T22:57:00Z[namespace01|apache-75675f5897-7ci7o|httpd] hello from sidecar\",\"2009-11-10T22:57:00Z[namespace01|apache-75675f5897-7ci7o|httpd] hello from sidecar\",\"2009-11-10T22:57:00Z[namespace01|apache-75675f5897-7ci7o|httpd] hello from sidecar\",\"2009-11-10T22:58:00Z[namespace01|nginx-deployment-75675f5897-7ci7o|sidecar] hello from sidecar\",\"2009-11-10T22:58:00Z[namespace01|nginx-deployment-75675f5897-7ci7o|sidecar] lerom from sidecar\",\"2009-11-10T22:58:00Z[namespace01|nginx-deployment-75675f5897-7ci7o|sidecar] hello from sidecar\"],\"resultType\":\"logs\"},\"status\":\"success\"}"}, - "07_query_with_count_over_time_function": {url: "/api/v1/query_range", param: Params{"start": start, "end": end, "query": `count_over_time(pod{namespace="namespace01"}[2m])`}, - want: "{\"data\":{\"result\":[{\"value\":6}],\"resultType\":\"vector\"},\"status\":\"success\"}"}, - "08_query_with_count_over_time_function_and_binary_operator": {url: "/api/v1/query_range", param: Params{"start": start, "end": end, "query": `count_over_time(pod{namespace="namespace01"}[2m])>0`}, - want: "{\"data\":{\"result\":[{\"value\":1}],\"resultType\":\"vector\"},\"status\":\"success\"}"}, - "09_query_namespace03_with_count_over_time_function_and_binary_operator": {url: "/api/v1/query_range", param: Params{"start": start, "end": end, "query": `count_over_time(pod{namespace="namespace03"}[2m])>0`}, - want: "{\"data\":{\"result\":[],\"resultType\":\"vector\"},\"status\":\"success\"}"}, - } - - for name, tt := range tests { - t.Run(prefix+name, func(subt *testing.T) { - t.Log(tt.url, tt.param) - got := GET(tt.url, tt.param) - assert.Equal(subt, tt.want, got) - }) - } -} - -type TempDTO struct { - Status string `json:"status,omitempty"` - Data struct { - ResultType string `json:"resultType,omitempty"` - Result []logStore.PodLog `json:"result,omitempty"` - } `json:"data"` -} - -func Test_Routes_Query_json(t *testing.T) { - prefix := "04_" - tests := map[string]struct { - url string - param Params - want TempDTO - }{ - // modify test case.. for supoorting sort by time - "04_query_namespace01": {url: "/api/v1/query", param: Params{"query": `pod{namespace="namespace01"}`, "logFormat": "json"}, - want: TempDTO{Status: "success", Data: struct { - ResultType string "json:\"resultType,omitempty\"" - Result []logStore.PodLog "json:\"result,omitempty\"" - }{ResultType: "logs", Result: []logStore.PodLog{logStore.PodLog{Name: "", Time: "2009-11-10T21:00:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T21:01:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T21:02:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:56:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:56:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:56:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:57:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:57:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:57:00Z", Namespace: "namespace01", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " lerom from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:59:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " lerom ipsum"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:59:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}}}}}, - "05_query_namespace02": {url: "/api/v1/query", param: Params{"query": `pod{namespace="namespace02"}`, "logFormat": "json"}, - want: TempDTO{Status: "success", Data: struct { - ResultType string "json:\"resultType,omitempty\"" - Result []logStore.PodLog "json:\"result,omitempty\"" - }{ResultType: "logs", Result: []logStore.PodLog{logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " lerom ipsum"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " lerom from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace02", Pod: "apache-75675f5897-7ci7o", Container: "httpd", Log: " hello from sidecar"}}}}}, - "06_query_namespace03": {url: "/api/v1/query", param: Params{"query": `pod{namespace="namespace03"}`, "logFormat": "json"}, - want: TempDTO{Status: "success", Data: struct { - ResultType string "json:\"resultType,omitempty\"" - Result []logStore.PodLog "json:\"result,omitempty\"" - }{ResultType: "logs", Result: []logStore.PodLog{}}}}, - "07_query_with_duration": {url: "/api/v1/query", param: Params{"query": `pod{namespace="namespace01"}[2m]`, "logFormat": "json"}, - want: TempDTO{Status: "success", Data: struct { - ResultType string "json:\"resultType,omitempty\"" - Result []logStore.PodLog "json:\"result,omitempty\"" - }{ResultType: "logs", Result: []logStore.PodLog{logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " lerom from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:58:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "sidecar", Log: " hello from sidecar"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:59:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " lerom ipsum"}, logStore.PodLog{Name: "", Time: "2009-11-10T22:59:00Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: " hello world"}}}}}, - "08_query_with_count_over_time_function": {url: "/api/v1/query", param: Params{"query": `count_over_time(pod{namespace="namespace01"}[2m])`, "logFormat": "json"}, - want: TempDTO{Status: "success", Data: struct { - ResultType string "json:\"resultType,omitempty\"" - Result []logStore.PodLog "json:\"result,omitempty\"" - }{ResultType: "vector", Result: []logStore.PodLog{logStore.PodLog{Name: "", Time: "", Namespace: "", Pod: "", Container: "", Log: ""}}}}}, - "09_query_with_count_over_time_function_and_binary_operator": {url: "/api/v1/query", param: Params{"query": `count_over_time(pod{namespace="namespace02"}[2m])>0`, "logFormat": "json"}, - want: TempDTO{Status: "success", Data: struct { - ResultType string "json:\"resultType,omitempty\"" - Result []logStore.PodLog "json:\"result,omitempty\"" - }{ResultType: "vector", Result: []logStore.PodLog{logStore.PodLog{Name: "", Time: "", Namespace: "", Pod: "", Container: "", Log: ""}}}}}, - "10_query_namespace03_with_count_over_time_function_and_binary_operator": {url: "/api/v1/query", param: Params{"query": `count_over_time(pod{namespace="namespace03"}[2m])>0`, "logFormat": "json"}, - want: TempDTO{Status: "success", Data: struct { - ResultType string "json:\"resultType,omitempty\"" - Result []logStore.PodLog "json:\"result,omitempty\"" - }{ResultType: "vector", Result: []logStore.PodLog{}}}, - }, - } - - for name, tt := range tests { - t.Run(prefix+name, func(subt *testing.T) { - got := GET(tt.url, tt.param) - var gotDTO TempDTO - err := json.Unmarshal([]byte(got), &gotDTO) - if err != nil { - t.Fail() - } - assert.Equal(subt, tt.want, gotDTO) - }) - } -} diff --git a/storage/driver/storagedriver.go b/storage/driver/driver.go similarity index 58% rename from storage/driver/storagedriver.go rename to storage/driver/driver.go index 8ae9136..c300140 100644 --- a/storage/driver/storagedriver.go +++ b/storage/driver/driver.go @@ -2,10 +2,9 @@ package driver import ( "io" - "time" ) -type StorageDriver interface { +type Driver interface { Name() string GetContent(string) ([]byte, error) PutContent(string, []byte) error @@ -17,20 +16,5 @@ type StorageDriver interface { Delete(string) error Walk(string) ([]FileInfo, error) WalkDir(string) ([]string, error) - //WalkDirWithDepth(from string, depth int) ([]string, error) RootDirectory() string } - -type FileWriter interface { - io.WriteCloser - Size() int64 - Cancel() error - Commit() error -} - -type FileInfo interface { - Path() string - Size() int64 - ModTime() time.Time - IsDir() bool -} diff --git a/storage/error.go b/storage/driver/error.go similarity index 85% rename from storage/error.go rename to storage/driver/error.go index c2ece5c..2d8bbad 100644 --- a/storage/error.go +++ b/storage/driver/error.go @@ -1,9 +1,10 @@ -package storage +package driver import "fmt" type PathNotFoundError struct { Path string + Err error } func (err PathNotFoundError) Error() string { diff --git a/storage/driver/error_test.go b/storage/driver/error_test.go new file mode 100644 index 0000000..f048a57 --- /dev/null +++ b/storage/driver/error_test.go @@ -0,0 +1,13 @@ +package driver + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestError(t *testing.T) { + err := PathNotFoundError{Path: "/tmp"} + got := err.Error() + assert.Equal(t, "Path not found: /tmp", got) +} diff --git a/storage/driver/factory/factory.go b/storage/driver/factory/factory.go index f3c809d..474a421 100644 --- a/storage/driver/factory/factory.go +++ b/storage/driver/factory/factory.go @@ -3,30 +3,30 @@ package factory import ( "fmt" - "github.com/kuoss/lethe/storage/driver" + storagedriver "github.com/kuoss/lethe/storage/driver" ) var driverFactories = make(map[string]StorageDriverFactory) type StorageDriverFactory interface { - Create(parameters map[string]interface{}) (driver.StorageDriver, error) + Create(parameters map[string]interface{}) (storagedriver.Driver, error) } -func Register(name string, factory StorageDriverFactory) { +func Register(name string, factory StorageDriverFactory) error { if factory == nil { - panic("Must not provide nil StorageDriverFactory") + return fmt.Errorf("factory is nil") } - _, registered := driverFactories[name] - if registered { - panic(fmt.Sprintf("StorageDriverFactory named %s already registered", name)) + if _, exist := driverFactories[name]; exist { + return fmt.Errorf("factory name duplicated: %s", name) } driverFactories[name] = factory + return nil } -func Get(name string, parameters map[string]interface{}) (driver.StorageDriver, error) { +func Get(name string, parameters map[string]interface{}) (storagedriver.Driver, error) { driverFactory, ok := driverFactories[name] if !ok { - return nil, fmt.Errorf("invalid StorageDriver named %s", name) + return nil, fmt.Errorf("invalid driver name: %s", name) } return driverFactory.Create(parameters) } diff --git a/storage/driver/factory/factory_test.go b/storage/driver/factory/factory_test.go new file mode 100644 index 0000000..ff46f6a --- /dev/null +++ b/storage/driver/factory/factory_test.go @@ -0,0 +1,116 @@ +package factory + +import ( + "fmt" + "testing" + + "github.com/kuoss/lethe/storage/driver" + "github.com/kuoss/lethe/storage/driver/factory/fake" + "github.com/stretchr/testify/assert" +) + +// fakeDriverFactory here: to avoid imports cycle +type fakeDriverFactory struct{} + +func (factory *fakeDriverFactory) Create(parameters map[string]interface{}) (driver.Driver, error) { + return fake.New(), nil +} + +var ( + fakeDriverFactory1 = &fakeDriverFactory{} +) + +func unregisterAll() { + driverFactories = map[string]StorageDriverFactory{} +} + +func TestRegister(t *testing.T) { + assert.NotNil(t, fakeDriverFactory1) + testCases := []struct { + name string + factory StorageDriverFactory + wantError string + wantNames []string + }{ + { + "", nil, + "factory is nil", + []string{}, + }, + { + "", fakeDriverFactory1, + "", + []string{""}, + }, + { + "fake1", fakeDriverFactory1, + "", + []string{"fake1"}, + }, + } + for i, tc := range testCases { + t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { + unregisterAll() + err := Register(tc.name, tc.factory) + if tc.wantError == "" { + assert.NoError(t, err) + } else { + assert.EqualError(t, err, tc.wantError) + } + names := []string{} + for k := range driverFactories { + names = append(names, k) + } + assert.Equal(t, tc.wantNames, names) + }) + } +} + +func TestRegister_NameDuplicated(t *testing.T) { + unregisterAll() + err := Register("fake1", fakeDriverFactory1) + assert.NoError(t, err) + err = Register("fake1", fakeDriverFactory1) + assert.EqualError(t, err, "factory name duplicated: fake1") +} + +func TestGet(t *testing.T) { + unregisterAll() + err := Register("fake1", fakeDriverFactory1) + assert.NoError(t, err) + + testCases := []struct { + name string + parameters map[string]interface{} + want string + wantError string + }{ + { + "", map[string]interface{}{}, + "", + "invalid driver name: ", + }, + { + "fake1", map[string]interface{}{}, + "&fake.driver{}", + "", + }, + { + "fake2", map[string]interface{}{}, + "", + "invalid driver name: fake2", + }, + } + for i, tc := range testCases { + t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { + driver, err := Get(tc.name, tc.parameters) + got := fmt.Sprintf("%#v", driver) + if tc.wantError == "" { + assert.NoError(t, err) + } else { + assert.EqualError(t, err, tc.wantError) + } + assert.Equal(t, tc.want, got) + }) + } +} diff --git a/storage/driver/factory/fake/driver.go b/storage/driver/factory/fake/driver.go new file mode 100644 index 0000000..2253b31 --- /dev/null +++ b/storage/driver/factory/fake/driver.go @@ -0,0 +1,53 @@ +package fake + +import ( + "io" + + storagedriver "github.com/kuoss/lethe/storage/driver" +) + +const ( + driverName = "fake" +) + +type driver struct{} + +func New() storagedriver.Driver { + return &driver{} +} +func (*driver) Name() string { + return driverName +} +func (*driver) GetContent(string) ([]byte, error) { + return []byte{}, nil +} +func (*driver) PutContent(string, []byte) error { + return nil +} +func (*driver) Reader(string) (io.ReadCloser, error) { + return &io.PipeReader{}, nil +} +func (*driver) Stat(string) (storagedriver.FileInfo, error) { + return nil, nil +} +func (*driver) List(string) ([]string, error) { + return []string{}, nil +} +func (*driver) Move(string, string) error { + return nil +} +func (*driver) Delete(s string) error { + return nil +} +func (*driver) Walk(string) ([]storagedriver.FileInfo, error) { + return nil, nil +} +func (*driver) WalkDir(string) ([]string, error) { + return []string{}, nil +} +func (*driver) Writer(string) (storagedriver.FileWriter, error) { + return nil, nil +} +func (*driver) RootDirectory() string { + return "" +} diff --git a/storage/driver/factory/fake/driver_test.go b/storage/driver/factory/fake/driver_test.go new file mode 100644 index 0000000..dbde884 --- /dev/null +++ b/storage/driver/factory/fake/driver_test.go @@ -0,0 +1,86 @@ +package fake + +import ( + "fmt" + "io" + "testing" + + storagedriver "github.com/kuoss/lethe/storage/driver" + "github.com/stretchr/testify/assert" +) + +var ( + driver1 = New() +) + +func TestNew(t *testing.T) { + assert.NotNil(t, driver1) + assert.Equal(t, "*fake.driver", fmt.Sprintf("%T", driver1)) +} + +func TestName(t *testing.T) { + got := driver1.Name() + assert.Equal(t, "fake", got) +} + +func TestGetContent(t *testing.T) { + got, err := driver1.GetContent("") + assert.NoError(t, err) + assert.Equal(t, []byte{}, got) +} + +func TestPutContent(t *testing.T) { + err := driver1.PutContent("", []byte{}) + assert.NoError(t, err) +} + +func TestReader(t *testing.T) { + got, err := driver1.Reader("") + assert.NoError(t, err) + assert.Equal(t, &io.PipeReader{}, got) +} + +func TestStat(t *testing.T) { + got, err := driver1.Stat("") + assert.NoError(t, err) + assert.Equal(t, nil, got) +} + +func TestList(t *testing.T) { + got, err := driver1.List("") + assert.NoError(t, err) + assert.Equal(t, []string{}, got) +} + +func TestMove(t *testing.T) { + err := driver1.Move("", "") + assert.NoError(t, err) +} + +func TestDelete(t *testing.T) { + err := driver1.Delete("") + assert.NoError(t, err) +} + +func TestWalk(t *testing.T) { + got, err := driver1.Walk("") + assert.NoError(t, err) + assert.Equal(t, ([]storagedriver.FileInfo)(nil), got) +} + +func TestWalkDir(t *testing.T) { + got, err := driver1.WalkDir("") + assert.NoError(t, err) + assert.Equal(t, []string{}, got) +} + +func TestWriter(t *testing.T) { + got, err := driver1.Writer("") + assert.NoError(t, err) + assert.Equal(t, nil, got) +} + +func TestRootDirectory(t *testing.T) { + got := driver1.RootDirectory() + assert.Equal(t, "", got) +} diff --git a/storage/driver/fileinfo.go b/storage/driver/fileinfo.go new file mode 100644 index 0000000..8850a14 --- /dev/null +++ b/storage/driver/fileinfo.go @@ -0,0 +1,12 @@ +package driver + +import ( + "time" +) + +type FileInfo interface { + Fullpath() string + Size() int64 + ModTime() time.Time + IsDir() bool +} diff --git a/testutil/testenv_disk_linux.go b/storage/driver/filesystem/disk/disk.go similarity index 91% rename from testutil/testenv_disk_linux.go rename to storage/driver/filesystem/disk/disk.go index 91baaf6..2d789e9 100644 --- a/testutil/testenv_disk_linux.go +++ b/storage/driver/filesystem/disk/disk.go @@ -1,7 +1,7 @@ //go:build linux // +build linux -package testutil +package disk import ( "fmt" @@ -11,7 +11,7 @@ import ( "golang.org/x/sys/unix" ) -func getDiskAvailableBytes(path string) (string, error) { +func GetDiskAvailableBytes(path string) (string, error) { // get absolute path absPath, err := filepath.Abs(path) if err != nil { diff --git a/storage/driver/filesystem/disk/disk_windows.go b/storage/driver/filesystem/disk/disk_windows.go new file mode 100644 index 0000000..bfcea24 --- /dev/null +++ b/storage/driver/filesystem/disk/disk_windows.go @@ -0,0 +1,23 @@ +//go:build windows +// +build windows + +package disk + +import ( + "fmt" + + "golang.org/x/sys/windows" +) + +func GetDiskAvailableBytes(path string) (string, error) { + var free, total, available uint64 + pathPtr, err := windows.UTF16PtrFromString(path) + if err != nil { + return "", fmt.Errorf("cannot get utf16ptr from string [%s]: %s", path, err) + } + err = windows.GetDiskFreeSpaceEx(pathPtr, &free, &total, &available) + if err != nil { + return "", fmt.Errorf("cannot get disk free space for [%s]: %s", path, err) + } + return fmt.Sprintf("%d", available), nil +} diff --git a/storage/driver/filesystem/driver.go b/storage/driver/filesystem/driver.go index e2a51b3..613a9dc 100644 --- a/storage/driver/filesystem/driver.go +++ b/storage/driver/filesystem/driver.go @@ -9,38 +9,27 @@ import ( "os" "path" "path/filepath" - "time" + "strings" - "github.com/kuoss/lethe/storage" storagedriver "github.com/kuoss/lethe/storage/driver" - "github.com/kuoss/lethe/storage/driver/factory" ) const ( - driverName = "filesystem" - defaultRootDirectory = "/tmp/log" + driverName = "filesystem" ) -type DriverParameters struct { +type Params struct { RootDirectory string } -func init() { - factory.Register(driverName, &filesystemDriverFactory{}) -} - type driver struct { rootDirectory string } -func New(params DriverParameters) storagedriver.StorageDriver { - //return &driver{rootDirectory: defaultRootDirectory} - if params.RootDirectory != "" { - return &driver{rootDirectory: params.RootDirectory} - } - - return &driver{rootDirectory: defaultRootDirectory} +func New(params Params) storagedriver.Driver { + return &driver{rootDirectory: params.RootDirectory} } + func (d *driver) RootDirectory() string { return d.rootDirectory } @@ -49,12 +38,12 @@ func (d *driver) Name() string { return driverName } -func (d *driver) fullPath(subPath string) string { - return path.Join(d.rootDirectory, subPath) +func (d *driver) fullPath(subpath string) string { + return path.Join(d.rootDirectory, subpath) } -func (d *driver) GetContent(path string) ([]byte, error) { - rc, err := d.Reader(path) +func (d *driver) GetContent(subpath string) ([]byte, error) { + rc, err := d.Reader(subpath) if err != nil { return nil, err } @@ -68,41 +57,44 @@ func (d *driver) GetContent(path string) ([]byte, error) { return p, nil } -func (d *driver) PutContent(subPath string, content []byte) error { - writer, err := d.Writer(subPath) +func (d *driver) PutContent(subpath string, content []byte) error { + writer, err := d.Writer(subpath) if err != nil { return err } defer writer.Close() _, err = io.Copy(writer, bytes.NewReader(content)) if err != nil { - _ = writer.Cancel() - return err + err2 := writer.Cancel() + if err2 != nil { + return fmt.Errorf("copy err: %w, cancel err: %w", err, err2) + } + return fmt.Errorf("copy err: %w", err) + } + err = writer.Commit() + if err != nil { + return fmt.Errorf("commit err: %w", err) } - return writer.Commit() + return nil } -func (d *driver) Reader(path string) (io.ReadCloser, error) { - file, err := os.OpenFile(path, os.O_RDONLY, 0644) +func (d *driver) Reader(subpath string) (io.ReadCloser, error) { + file, err := os.OpenFile(d.fullPath(subpath), os.O_RDONLY, 0644) if err != nil { - if os.IsNotExist(err) { - return nil, storage.PathNotFoundError{Path: path} - } - return nil, err + return nil, storagedriver.PathNotFoundError{Path: subpath, Err: fmt.Errorf("openFile err: %w", err)} } - //TODO seek - + // TODO: seek return file, nil } -func (d *driver) Writer(subPath string) (storagedriver.FileWriter, error) { - fullPath := d.fullPath(subPath) - parentDir := path.Dir(fullPath) - if err := os.MkdirAll(parentDir, 0777); err != nil { +func (d *driver) Writer(subpath string) (storagedriver.FileWriter, error) { + fullpath := d.fullPath(subpath) + dir := path.Dir(fullpath) + if err := os.MkdirAll(dir, 0777); err != nil { return nil, err } - fp, err := os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE, 0666) + fp, err := os.OpenFile(fullpath, os.O_WRONLY|os.O_CREATE, 0666) if err != nil { return nil, err } @@ -113,255 +105,114 @@ func (d *driver) Writer(subPath string) (storagedriver.FileWriter, error) { return writer, nil } -func (d *driver) Stat(path string) (storagedriver.FileInfo, error) { - fi, err := os.Stat(path) +func (d *driver) Stat(subpath string) (storagedriver.FileInfo, error) { + fullpath := d.fullPath(subpath) + fi, err := os.Stat(fullpath) if err != nil { - if os.IsNotExist(err) { - return nil, storage.PathNotFoundError{Path: path} - } - return nil, err + return nil, storagedriver.PathNotFoundError{Path: subpath, Err: fmt.Errorf("stat err: %w", err)} } - - return fileInfo{ - path: path, - FileInfo: fi, + return FileInfo{ + fullpath: fullpath, + osFileInfo: fi, }, nil } -func (d *driver) List(path string) ([]string, error) { - - dir, err := os.Open(path) +func (d *driver) List(subpath string) ([]string, error) { + fullpath := d.fullPath(subpath) + dir, err := os.Open(fullpath) if err != nil { - if os.IsNotExist(err) { - return nil, storage.PathNotFoundError{Path: path} - } - return nil, err + return nil, storagedriver.PathNotFoundError{Path: subpath, Err: fmt.Errorf("open err: %w", err)} } defer dir.Close() fileNames, err := dir.Readdirnames(0) if err != nil { - return nil, err + return nil, fmt.Errorf("readdirnames err: %w", err) } keys := make([]string, 0, len(fileNames)) for _, fileName := range fileNames { - keys = append(keys, filepath.Join(path, fileName)) + keys = append(keys, filepath.Join(subpath, fileName)) } return keys, nil } -func (d *driver) Move(sourcePath, destPath string) error { +func (d *driver) Move(sourcePath, targetPath string) error { + if len(strings.Split(sourcePath, string(os.PathSeparator))) < 1 { + return fmt.Errorf("moving 0-1 depth directory is not allowed") + } source := d.fullPath(sourcePath) - dest := d.fullPath(destPath) + target := d.fullPath(targetPath) - if _, err := os.Stat(source); os.IsNotExist(err) { - return storage.PathNotFoundError{Path: sourcePath} + if _, err := os.Stat(source); err != nil { + return storagedriver.PathNotFoundError{Path: sourcePath, Err: fmt.Errorf("stat err: %w", err)} } - if err := os.MkdirAll(path.Dir(dest), 0755); err != nil { - return err + if err := os.MkdirAll(path.Dir(target), 0755); err != nil { + return fmt.Errorf("mkdirAll err: %w", err) } // TODO check windows //Rename replaces it - err := os.Rename(source, dest) + err := os.Rename(source, target) return err } -func (d *driver) Delete(path string) error { - _, err := os.Stat(path) - if err != nil && !os.IsNotExist(err) { - return err - } else if err != nil { - return storage.PathNotFoundError{Path: path} +func (d *driver) Delete(subpath string) error { + if len(strings.Split(subpath, string(os.PathSeparator))) < 2 { + return fmt.Errorf("deleting 0-1 depth directory is not allowed") } - err = os.RemoveAll(path) - return err + fullpath := d.fullPath(subpath) + _, err := os.Stat(fullpath) + if err != nil { + return storagedriver.PathNotFoundError{Path: subpath, Err: fmt.Errorf("stat err: %w", err)} + } + err = os.RemoveAll(fullpath) + if err != nil { + return fmt.Errorf("removeAll err: %w", err) + } + return nil } // return only files -func (d *driver) Walk(from string) ([]storagedriver.FileInfo, error) { - - var infos []storagedriver.FileInfo - - err := filepath.Walk(from, func(path string, info os.FileInfo, err error) error { +func (d *driver) Walk(subpath string) ([]storagedriver.FileInfo, error) { + fullpath := d.fullPath(subpath) + infos := []storagedriver.FileInfo{} + err := filepath.Walk(fullpath, func(path string, info os.FileInfo, err error) error { if err != nil { - fmt.Printf("prevent panic by handling failure accessing a path %q: %v\n", path, err) - return err + return fmt.Errorf("walkFunc err: %w", err) } if !info.IsDir() { - infos = append(infos, fileInfo{ - FileInfo: info, - path: path, - }) + infos = append(infos, FileInfo{info, path}) } return nil }) - if err != nil { - fmt.Printf("error walking the path %q: %v\n", from, err) - return []storagedriver.FileInfo{}, err + return infos, fmt.Errorf("walk err: %w", err) } return infos, nil } // WalkDir method return only directories -func (d *driver) WalkDir(from string) ([]string, error) { +func (d *driver) WalkDir(subpath string) ([]string, error) { + fullpath := d.fullPath(subpath) dirs := []string{} - - err := filepath.WalkDir(from, func(path string, dir fs.DirEntry, err error) error { - + err := filepath.WalkDir(fullpath, func(path string, dir fs.DirEntry, err error) error { if err != nil { - fmt.Printf("prevent panic by handling failure accessing a path %q: %v\n", path, err) - return err + return fmt.Errorf("walkDirFunc err: %w", err) } - //fmt.Printf("visited logs or dir: %q\n", path) - if dir.IsDir() { - rel, _ := filepath.Rel(from, path) - if err != nil { - return err - } - dirs = append(dirs, rel) + if !dir.IsDir() { + return nil } - return nil - }) - if err != nil { - fmt.Printf("error walking the path %q: %v\n", from, err) - return dirs, err - } - return dirs, nil -} - -//todo if listing every file for acquire target info makes performance issue, the consider interface only directories, not file -/* -func (d *driver) WalkDirWithDepth(from string, depth int) ([]string, error) { - dirs := []string{} - - err := filepath.WalkDir(from, func(path string, d fs.DirEntry, err error) error { - + subpath, err := filepath.Rel(fullpath, path) if err != nil { - fmt.Printf("prevent panic by handling failure accessing a path %q: %v\n", path, err) - return err - } - rel, _ := filepath.Rel(from, path) - if strings.Count(rel, string(os.PathSeparator)) > depth { - return filepath.SkipDir + return fmt.Errorf("rel err: %w", err) } - dirs = append(dirs, path) + dirs = append(dirs, subpath) return nil }) if err != nil { - fmt.Printf("error walking the path %q: %v\n", from, err) - return dirs, err + return dirs, fmt.Errorf("walkdir err: %w", err) } return dirs, nil } - -*/ - -// For object-storage backend -type fileWriter struct { - file *os.File - size int64 - bw *bufio.Writer - closed bool - committed bool - cancelled bool -} - -func (fw *fileWriter) Write(p []byte) (int, error) { - if fw.closed { - return 0, fmt.Errorf("already closed") - } else if fw.committed { - return 0, fmt.Errorf("already committed") - } else if fw.cancelled { - return 0, fmt.Errorf("already cancelled") - } - n, err := fw.bw.Write(p) - fw.size += int64(n) - return n, err -} - -func (fw *fileWriter) Size() int64 { - return fw.size -} - -func (fw *fileWriter) Close() error { - if fw.closed { - return fmt.Errorf("already closed") - } - - if err := fw.bw.Flush(); err != nil { - return err - } - - if err := fw.file.Sync(); err != nil { - return err - } - - if err := fw.file.Close(); err != nil { - return err - } - fw.closed = true - return nil -} - -func (fw *fileWriter) Cancel() error { - if fw.closed { - return fmt.Errorf("already closed") - } - - fw.cancelled = true - fw.file.Close() - return os.Remove(fw.file.Name()) -} - -func (fw *fileWriter) Commit() error { - if fw.closed { - return fmt.Errorf("already closed") - } else if fw.committed { - return fmt.Errorf("already committed") - } else if fw.cancelled { - return fmt.Errorf("already cancelled") - } - - if err := fw.bw.Flush(); err != nil { - return err - } - - if err := fw.file.Sync(); err != nil { - return err - } - - fw.committed = true - return nil -} - -// for compile -var _ storagedriver.FileInfo = fileInfo{} - -type fileInfo struct { - os.FileInfo - path string -} - -func (fi fileInfo) Path() string { - return fi.path -} - -func (fi fileInfo) Size() int64 { - if fi.IsDir() { - return 0 - } - - return fi.FileInfo.Size() -} - -func (fi fileInfo) ModTime() time.Time { - return fi.FileInfo.ModTime() -} - -func (fi fileInfo) IsDir() bool { - return fi.FileInfo.IsDir() -} diff --git a/storage/driver/filesystem/driver_test.go b/storage/driver/filesystem/driver_test.go index 574fb3d..cdddfa5 100644 --- a/storage/driver/filesystem/driver_test.go +++ b/storage/driver/filesystem/driver_test.go @@ -2,16 +2,523 @@ package filesystem import ( "fmt" + "sort" "testing" + + storagedriver "github.com/kuoss/lethe/storage/driver" + "github.com/kuoss/lethe/util/testutil" + "github.com/stretchr/testify/assert" +) + +var ( + driver1 storagedriver.Driver + logDataPath_driver = "tmp/storage_driver_filesystem_driver_test" ) +func init() { + testutil.ChdirRoot() + testutil.ResetLogData() + driver1 = New(Params{RootDirectory: logDataPath_driver}) +} + +func TestNew(t *testing.T) { + testCases := []struct { + params Params + want *driver + }{ + { + Params{}, + &driver{rootDirectory: ""}, + }, + { + Params{RootDirectory: "asdf"}, + &driver{rootDirectory: "asdf"}, + }, + { + Params{RootDirectory: "/data/log"}, + &driver{rootDirectory: "/data/log"}, + }, + } + for i, tc := range testCases { + t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { + got := New(tc.params) + assert.Equal(t, tc.want, got) + }) + } +} + +func TestRootDirectory(t *testing.T) { + got := driver1.RootDirectory() + assert.Equal(t, logDataPath_driver, got) +} + +func TestName(t *testing.T) { + got := driver1.Name() + assert.Equal(t, "filesystem", got) +} + +func TestGetContent(t *testing.T) { + testCases := []struct { + path string + want string + wantError string + }{ + { + "", + "", + "read tmp/storage_driver_filesystem_driver_test: is a directory", + }, + { + "node", + "", + "read tmp/storage_driver_filesystem_driver_test/node: is a directory", + }, + { + "pod/namespace01/2009-11-10_21.log", + "2009-11-10T21:00:00.000000Z[namespace01|nginx-deployment-75675f5897-7ci7o|nginx] hello world\n2009-11-10T21:01:00.000000Z[namespace01|nginx-deployment-75675f5897-7ci7o|nginx] hello world\n2009-11-10T21:02:00.000000Z[namespace01|nginx-deployment-75675f5897-7ci7o|nginx] hello world\n", + "", + }, + } + for i, tc := range testCases { + t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { + got, err := driver1.GetContent(tc.path) + if tc.wantError == "" { + assert.NoError(t, err) + } else { + assert.EqualError(t, err, tc.wantError) + } + assert.Equal(t, tc.want, string(got)) + }) + } +} + +func TestPutContent(t *testing.T) { + testCases := []struct { + path string + content string + wantError string + wantError2 string + want string + }{ + { + "", "", + "open tmp/storage_driver_filesystem_driver_test: is a directory", + "", + "", + }, + { + "node", "", + "open tmp/storage_driver_filesystem_driver_test/node: is a directory", + "", + "", + }, + { + "pod/namespace01/test1.log", "hello", + "", + "", + "hello", + }, + { + "pod/namespace01/2009-11-10_21.log", "hello", + "", + "", + "hello11-10T21:00:00.000000Z[namespace01|nginx-deployment-75675f5897-7ci7o|nginx] hello world\n2009-11-10T21:01:00.000000Z[namespace01|nginx-deployment-75675f5897-7ci7o|nginx] hello world\n2009-11-10T21:02:00.000000Z[namespace01|nginx-deployment-75675f5897-7ci7o|nginx] hello world\n", + }, + } + for i, tc := range testCases { + t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { + err := driver1.PutContent(tc.path, ([]byte)(tc.content)) + if tc.wantError == "" { + assert.NoError(t, err) + } else { + assert.EqualError(t, err, tc.wantError) + return + } + got, err := driver1.GetContent(tc.path) + if tc.wantError2 == "" { + assert.NoError(t, err) + } else { + assert.EqualError(t, err, tc.wantError2) + return + } + assert.Equal(t, tc.want, string(got)) + }) + } + testutil.ResetLogData() +} + +func TestReader(t *testing.T) { + testCases := []struct { + path string + wantError string + }{ + { + "", + "", + }, + { + "node", + "", + }, + { + "pod/namespace01/2009-11-10_21.log", + "", + }, + } + for i, tc := range testCases { + t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { + got, err := driver1.Reader(tc.path) + if tc.wantError == "" { + assert.NoError(t, err) + assert.NotEmpty(t, got) + } else { + assert.EqualError(t, err, tc.wantError) + assert.Nil(t, got) + } + }) + } +} + +func TestWriter(t *testing.T) { + testCases := []struct { + path string + wantError string + }{ + { + "", + "open tmp/storage_driver_filesystem_driver_test: is a directory", + }, + { + "hello", + "", + }, + { + "node", + "open tmp/storage_driver_filesystem_driver_test/node: is a directory", + }, + { + "pod/namespace01/2009-11-10_21.log", + "", + }, + } + for i, tc := range testCases { + t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { + got, err := driver1.Writer(tc.path) + if tc.wantError == "" { + assert.NoError(t, err) + assert.NotEmpty(t, got) + } else { + assert.EqualError(t, err, tc.wantError) + assert.Nil(t, got) + } + }) + } + testutil.ResetLogData() +} + +func TestStat(t *testing.T) { + testCases := []struct { + path string + want string // fullpath + wantError string + }{ + { + "", + "tmp/storage_driver_filesystem_driver_test", + "", + }, + { + "hello", + "", + "Path not found: hello", + }, + { + "node", + "tmp/storage_driver_filesystem_driver_test/node", + "", + }, + { + "pod", + "tmp/storage_driver_filesystem_driver_test/pod", + "", + }, + { + "pod/namespace01", + "tmp/storage_driver_filesystem_driver_test/pod/namespace01", + "", + }, + { + "pod/namespace01/hello.log", + "", + "Path not found: pod/namespace01/hello.log", + }, + { + "pod/namespace01/2009-11-10_21.log", + "tmp/storage_driver_filesystem_driver_test/pod/namespace01/2009-11-10_21.log", + "", + }, + } + for i, tc := range testCases { + t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { + fi, err := driver1.Stat(tc.path) + if tc.wantError != "" { + assert.EqualError(t, err, tc.wantError) + return + } + assert.NoError(t, err) + got := fi.Fullpath() + assert.Equal(t, tc.want, got) + }) + } +} + +func TestList(t *testing.T) { + testCases := []struct { + path string + want []string + wantError string + }{ + { + "", + []string{"node", "pod"}, + "", + }, + { + "node", + []string{"node/node01", "node/node02"}, + "", + }, + { + "hello", + nil, + "Path not found: hello", + }, + { + "pod", + []string{"pod/namespace01", "pod/namespace02"}, + "", + }, + { + "pod/namespace01", + []string{"pod/namespace01/2000-01-01_00.log", "pod/namespace01/2009-11-10_21.log", "pod/namespace01/2009-11-10_22.log", "pod/namespace01/2029-11-10_23.log"}, + "", + }, + } + for i, tc := range testCases { + t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { + got, err := driver1.List(tc.path) + if tc.wantError == "" { + assert.NoError(t, err) + } else { + assert.EqualError(t, err, tc.wantError) + } + sort.Strings(got) + assert.Equal(t, tc.want, got) + }) + } +} + +func TestMove(t *testing.T) { + testCases := []struct { + a string + b string + wantError string + }{ + { + "", "", + "rename tmp/storage_driver_filesystem_driver_test tmp/storage_driver_filesystem_driver_test: file exists", + }, + { + "hello", "", + "Path not found: hello", + }, + { + "", "hello", + "rename tmp/storage_driver_filesystem_driver_test tmp/storage_driver_filesystem_driver_test/hello: invalid argument", + }, + { + "hello", "hello", + "Path not found: hello", + }, + { + "pod/namespace01/hello.log", "pod/namespace01/hello.log", + "Path not found: pod/namespace01/hello.log", + }, + { + "pod/namespace01/2009-11-10_21.log", "pod/namespace01/2009-11-10_00.log", // move + "", + }, + { + "pod/namespace01/2009-11-10_21.log", "pod/namespace01/2009-11-10_00.log", // duplicate + "Path not found: pod/namespace01/2009-11-10_21.log", + }, + } + for i, tc := range testCases { + t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { + err := driver1.Move(tc.a, tc.b) + if tc.wantError == "" { + assert.NoError(t, err) + } else { + assert.EqualError(t, err, tc.wantError) + } + }) + } + testutil.ResetLogData() +} + +func TestDelete(t *testing.T) { + testCases := []struct { + path string + wantError string + }{ + { + "", + "deleting 0-1 depth directory is not allowed", + }, + { + "node", + "deleting 0-1 depth directory is not allowed", + }, + { + "pod/namespace02", + "", + }, + { + "pod/namespace01/2009-11-10_21.log", // delete + "", + }, + { + "pod/namespace01/2009-11-10_21.log", // duplicate + "Path not found: pod/namespace01/2009-11-10_21.log", + }, + } + for i, tc := range testCases { + t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { + err := driver1.Delete(tc.path) + if tc.wantError == "" { + assert.NoError(t, err) + } else { + assert.EqualError(t, err, tc.wantError) + } + }) + } + testutil.ResetLogData() +} + func TestWalk(t *testing.T) { - d := New(DriverParameters{RootDirectory: "THIS_WILL_OVERRIDE_BY_DRIVER_CODE"}) - infos, err := d.WalkDir("./tmp/log") - if err != nil { - t.Fatalf("err from walk") + testCases := []struct { + path string + want []string + wantError string + }{ + // error + { + "hello", + nil, + "walk err: walkFunc err: lstat tmp/storage_driver_filesystem_driver_test/hello: no such file or directory", + }, + { + "tmp/storage_driver_filesystem_driver_test", + nil, + "walk err: walkFunc err: lstat tmp/storage_driver_filesystem_driver_test/tmp/storage_driver_filesystem_driver_test: no such file or directory", + }, + // ok + { + "", + []string{"tmp/storage_driver_filesystem_driver_test/node/node01/2009-11-10_21.log", "tmp/storage_driver_filesystem_driver_test/node/node01/2009-11-10_22.log", "tmp/storage_driver_filesystem_driver_test/node/node02/2009-11-01_00.log", "tmp/storage_driver_filesystem_driver_test/node/node02/2009-11-10_21.log", "tmp/storage_driver_filesystem_driver_test/pod/namespace01/2000-01-01_00.log", "tmp/storage_driver_filesystem_driver_test/pod/namespace01/2009-11-10_21.log", "tmp/storage_driver_filesystem_driver_test/pod/namespace01/2009-11-10_22.log", "tmp/storage_driver_filesystem_driver_test/pod/namespace01/2029-11-10_23.log", "tmp/storage_driver_filesystem_driver_test/pod/namespace02/0000-00-00_00.log", "tmp/storage_driver_filesystem_driver_test/pod/namespace02/2009-11-10_22.log"}, + "", + }, + { + "node", + []string{"tmp/storage_driver_filesystem_driver_test/node/node01/2009-11-10_21.log", "tmp/storage_driver_filesystem_driver_test/node/node01/2009-11-10_22.log", "tmp/storage_driver_filesystem_driver_test/node/node02/2009-11-01_00.log", "tmp/storage_driver_filesystem_driver_test/node/node02/2009-11-10_21.log"}, + "", + }, + { + "pod", + []string{"tmp/storage_driver_filesystem_driver_test/pod/namespace01/2000-01-01_00.log", "tmp/storage_driver_filesystem_driver_test/pod/namespace01/2009-11-10_21.log", "tmp/storage_driver_filesystem_driver_test/pod/namespace01/2009-11-10_22.log", "tmp/storage_driver_filesystem_driver_test/pod/namespace01/2029-11-10_23.log", "tmp/storage_driver_filesystem_driver_test/pod/namespace02/0000-00-00_00.log", "tmp/storage_driver_filesystem_driver_test/pod/namespace02/2009-11-10_22.log"}, + "", + }, + { + "pod/namespace01", + []string{"tmp/storage_driver_filesystem_driver_test/pod/namespace01/2000-01-01_00.log", "tmp/storage_driver_filesystem_driver_test/pod/namespace01/2009-11-10_21.log", "tmp/storage_driver_filesystem_driver_test/pod/namespace01/2009-11-10_22.log", "tmp/storage_driver_filesystem_driver_test/pod/namespace01/2029-11-10_23.log"}, + "", + }, + { + "pod/namespace01/2009-11-10_21.log", + []string{"tmp/storage_driver_filesystem_driver_test/pod/namespace01/2009-11-10_21.log"}, + "", + }, + } + for i, tc := range testCases { + t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { + infos, err := driver1.Walk(tc.path) + if tc.wantError == "" { + assert.NoError(t, err) + paths := []string{} + for _, info := range infos { + paths = append(paths, info.Fullpath()) + } + assert.Equal(t, tc.want, paths) + } else { + assert.EqualError(t, err, tc.wantError) + } + }) + } +} + +func TestWalkDir(t *testing.T) { + testCases := []struct { + path string + want []string + wantError string + }{ + // error + { + "hello", + nil, + "walkdir err: walkDirFunc err: lstat tmp/storage_driver_filesystem_driver_test/hello: no such file or directory", + }, + { + "tmp/storage_driver_filesystem_driver_test", + nil, + "walkdir err: walkDirFunc err: lstat tmp/storage_driver_filesystem_driver_test/tmp/storage_driver_filesystem_driver_test: no such file or directory", + }, + // ok + { + "", + []string{".", "node", "node/node01", "node/node02", "pod", "pod/namespace01", "pod/namespace02"}, + "", + }, + { + "node", + []string{".", "node01", "node02"}, + "", + }, + { + "pod", + []string{".", "namespace01", "namespace02"}, + "", + }, + { + "pod/namespace01", + []string{"."}, + "", + }, + { + "pod/namespace01/2009-11-10_21.log", + []string{}, + "", + }, } - for _, info := range infos { - fmt.Println(info) + for i, tc := range testCases { + t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { + got, err := driver1.WalkDir(tc.path) + if tc.wantError == "" { + assert.NoError(t, err) + assert.Equal(t, tc.want, got) + } else { + assert.EqualError(t, err, tc.wantError) + } + }) } } diff --git a/storage/driver/filesystem/factory.go b/storage/driver/filesystem/factory.go index 78955b1..b3d4cf9 100644 --- a/storage/driver/filesystem/factory.go +++ b/storage/driver/filesystem/factory.go @@ -1,9 +1,19 @@ package filesystem -import storagedriver "github.com/kuoss/lethe/storage/driver" +import ( + storagedriver "github.com/kuoss/lethe/storage/driver" + "github.com/kuoss/lethe/storage/driver/factory" +) type filesystemDriverFactory struct{} -func (factory *filesystemDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { - return New(DriverParameters{RootDirectory: parameters["RootDirectory"].(string)}), nil +func (factory *filesystemDriverFactory) Create(params map[string]interface{}) (storagedriver.Driver, error) { + return New(Params{RootDirectory: params["RootDirectory"].(string)}), nil +} + +func init() { + err := factory.Register(driverName, &filesystemDriverFactory{}) + if err != nil { + panic(err) + } } diff --git a/storage/driver/filesystem/factory_test.go b/storage/driver/filesystem/factory_test.go new file mode 100644 index 0000000..2972ea4 --- /dev/null +++ b/storage/driver/filesystem/factory_test.go @@ -0,0 +1,15 @@ +package filesystem + +import ( + "testing" + + "github.com/kuoss/lethe/storage/driver/factory" + "github.com/stretchr/testify/assert" +) + +func TestCreate(t *testing.T) { + want := New(Params{RootDirectory: "asdf"}) + got, err := factory.Get("filesystem", map[string]interface{}{"RootDirectory": "asdf"}) + assert.NoError(t, err) + assert.Equal(t, want, got) +} diff --git a/storage/driver/filesystem/fileinfo.go b/storage/driver/filesystem/fileinfo.go new file mode 100644 index 0000000..0a5b074 --- /dev/null +++ b/storage/driver/filesystem/fileinfo.go @@ -0,0 +1,33 @@ +package filesystem + +import ( + "os" + "time" + // "github.com/kuoss/lethe/storagedriver/types" +) + +// var _ types.FileInfo = FileInfo{} + +type FileInfo struct { + osFileInfo os.FileInfo + fullpath string +} + +func (i FileInfo) Fullpath() string { + return i.fullpath +} + +func (i FileInfo) Size() int64 { + if i.IsDir() { + return 0 + } + return i.osFileInfo.Size() +} + +func (i FileInfo) ModTime() time.Time { + return i.osFileInfo.ModTime() +} + +func (fi FileInfo) IsDir() bool { + return fi.osFileInfo.IsDir() +} diff --git a/storage/driver/filesystem/fileinfo_test.go b/storage/driver/filesystem/fileinfo_test.go new file mode 100644 index 0000000..697ff9e --- /dev/null +++ b/storage/driver/filesystem/fileinfo_test.go @@ -0,0 +1,57 @@ +package filesystem + +import ( + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestFileInfo(t *testing.T) { + testCases := []struct { + fullpath string + wantError string + wantFullpath string + wantSize int64 + wantModTimeYear int + wantIsDir bool + }{ + { + "", "stat : no such file or directory", + "", 0, 2023, false, + }, + { + "hello", "stat hello: no such file or directory", + "", 0, 2023, false, + }, + { + "tmp/init/pod", "", + "tmp/init/pod", 0, 2023, true, + }, + { + "tmp/init/pod/namespace01", "", + "tmp/init/pod/namespace01", 0, 2023, true, + }, + { + "tmp/init/pod/namespace01/2009-11-10_21.log", "", + "tmp/init/pod/namespace01/2009-11-10_21.log", 279, 2023, false, + }, + } + for i, tc := range testCases { + t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { + fi, err := os.Stat(tc.fullpath) + if tc.wantError != "" { + assert.EqualError(t, err, tc.wantError) + } else { + assert.NoError(t, err) + + got := FileInfo{fi, tc.fullpath} + assert.Equal(t, tc.wantFullpath, got.Fullpath()) + assert.Equal(t, tc.wantSize, got.Size()) + assert.GreaterOrEqual(t, got.ModTime().Year(), tc.wantModTimeYear) + assert.Equal(t, tc.wantIsDir, got.IsDir()) + } + }) + } +} diff --git a/storage/driver/filesystem/filewriter.go b/storage/driver/filesystem/filewriter.go new file mode 100644 index 0000000..dae1e51 --- /dev/null +++ b/storage/driver/filesystem/filewriter.go @@ -0,0 +1,85 @@ +package filesystem + +import ( + "bufio" + "fmt" + "os" +) + +// For object-storage backend +type fileWriter struct { + file *os.File + size int64 + bw *bufio.Writer + closed bool + committed bool + cancelled bool +} + +func (w *fileWriter) Write(p []byte) (int, error) { + if w.closed { + return 0, fmt.Errorf("already closed") + } else if w.committed { + return 0, fmt.Errorf("already committed") + } else if w.cancelled { + return 0, fmt.Errorf("already cancelled") + } + n, err := w.bw.Write(p) + w.size += int64(n) + return n, err +} + +func (w *fileWriter) Size() int64 { + return w.size +} + +func (w *fileWriter) Close() error { + if w.closed { + return fmt.Errorf("already closed") + } + + if err := w.bw.Flush(); err != nil { + return err + } + + if err := w.file.Sync(); err != nil { + return err + } + + if err := w.file.Close(); err != nil { + return err + } + w.closed = true + return nil +} + +func (w *fileWriter) Cancel() error { + if w.closed { + return fmt.Errorf("already closed") + } + + w.cancelled = true + w.file.Close() + return os.Remove(w.file.Name()) +} + +func (w *fileWriter) Commit() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } else if w.cancelled { + return fmt.Errorf("already cancelled") + } + + if err := w.bw.Flush(); err != nil { + return err + } + + if err := w.file.Sync(); err != nil { + return err + } + + w.committed = true + return nil +} diff --git a/storage/driver/filesystem/filewriter_test.go b/storage/driver/filesystem/filewriter_test.go new file mode 100644 index 0000000..8983324 --- /dev/null +++ b/storage/driver/filesystem/filewriter_test.go @@ -0,0 +1,48 @@ +package filesystem + +import ( + "bufio" + "os" + "testing" + + storagedriver "github.com/kuoss/lethe/storage/driver" + "github.com/kuoss/lethe/util/testutil" + "github.com/stretchr/testify/assert" +) + +func init() { + dir := "tmp/storage_driver_filesystem_filewriter_test" + testutil.ChdirRoot() + err := os.RemoveAll(dir) + if err != nil { + panic(err) + } + err = os.Mkdir(dir, 0755) + if err != nil { + panic(err) + } +} + +func TestFileWriter(t *testing.T) { + f, err := os.Create("tmp/storage_driver_filesystem_filewriter_test/greet.txt") + assert.NoError(t, err) + bw := bufio.NewWriter(f) + + var fw storagedriver.FileWriter = &fileWriter{ + file: f, + size: 0, + bw: bw, + closed: false, + committed: false, + cancelled: false, + } + s := "hello" + n, err := fw.Write([]byte(s)) + assert.NoError(t, err) + assert.Equal(t, len(s), n) + fw.Close() + + content, err := os.ReadFile("tmp/storage_driver_filesystem_filewriter_test/greet.txt") + assert.NoError(t, err) + assert.Equal(t, "hello", string(content)) +} diff --git a/storage/driver/filesystem/init_test.go b/storage/driver/filesystem/init_test.go index 1aa8050..02f8e8e 100644 --- a/storage/driver/filesystem/init_test.go +++ b/storage/driver/filesystem/init_test.go @@ -1,21 +1,10 @@ package filesystem import ( - "log" - "os" - "path" - "runtime" + "github.com/kuoss/lethe/util/testutil" ) func init() { - changeWorkingDirectoryToProjectRoot() -} - -func changeWorkingDirectoryToProjectRoot() { - _, filename, _, _ := runtime.Caller(0) - dir := path.Join(path.Dir(filename), "../../..") - err := os.Chdir(dir) - if err != nil { - log.Fatalf("Cannot change directory to [%s]", dir) - } + testutil.ChdirRoot() + testutil.ResetLogData() } diff --git a/storage/driver/filesystem/paths_test.go b/storage/driver/filesystem/paths_test.go deleted file mode 100644 index 0b1d980..0000000 --- a/storage/driver/filesystem/paths_test.go +++ /dev/null @@ -1,20 +0,0 @@ -package filesystem - -import ( - "fmt" - "os" - "path/filepath" - "testing" - - "github.com/kuoss/lethe/storage" - "github.com/kuoss/lethe/storage/driver/factory" -) - -func TestDepth(t *testing.T) { - userHomeDirectory, _ := os.UserHomeDir() - d, _ := factory.Get("filesystem", map[string]interface{}{"RootDirectory": filepath.Join(userHomeDirectory, "tmp", "log")}) - logPath := storage.LogPath{RootDirectory: d.RootDirectory()} - logPath.SetFullPath(filepath.Join("pod", "namespace01", "2022-11-10_23.log")) - rtn := logPath.Depth() - fmt.Println(rtn) -} diff --git a/storage/driver/filewriter.go b/storage/driver/filewriter.go new file mode 100644 index 0000000..0817fb1 --- /dev/null +++ b/storage/driver/filewriter.go @@ -0,0 +1,12 @@ +package driver + +import ( + "io" +) + +type FileWriter interface { + io.WriteCloser + Size() int64 + Cancel() error + Commit() error +} diff --git a/storage/paths.go b/storage/driver/logpath.go similarity index 66% rename from storage/paths.go rename to storage/driver/logpath.go index 9124c2e..c58f9f7 100644 --- a/storage/paths.go +++ b/storage/driver/logpath.go @@ -1,9 +1,11 @@ -package storage +package driver import ( "os" "path/filepath" "strings" + + "github.com/kuoss/common/logger" ) // /// @@ -24,53 +26,45 @@ import ( // └── namespace02 // └── 2009-11-10_22.log +type Depth int + const ( - TYPE = "TYPE" - TARGET = "TARGET" - FILE = "FILE" - UNKNOWN = "UNKNOWN" + DepthUnknown Depth = iota + DepthType + DepthTarget + DepthFile ) -/* -type pathSpec() interface{ - Stub() -} -*/ - type LogPath struct { - fullPath string + RootDirectory string + Subpath string target string logType string file string - RootDirectory string } -func (l *LogPath) Depth() string { - rel, err := filepath.Rel(l.RootDirectory, l.fullPath) - if err != nil { - return UNKNOWN - } - parts := strings.Split(rel, string(os.PathSeparator)) +func (l *LogPath) Depth() Depth { + parts := strings.Split(l.Subpath, string(os.PathSeparator)) switch len(parts) { case 1: l.logType = parts[0] - return TYPE + return DepthType case 2: l.logType = parts[0] l.target = parts[1] - return TARGET - + return DepthTarget case 3: l.logType = parts[0] l.target = parts[1] l.file = parts[2] - return FILE + return DepthFile } - return UNKNOWN + logger.Warnf("path is too deep. depth: %d, rel: %s", len(parts), l.Subpath) + return DepthUnknown } -func (l *LogPath) FullPath() string { - return l.fullPath +func (l *LogPath) Fullpath() string { + return filepath.Join(l.RootDirectory, l.Subpath) } func (l *LogPath) LogType() string { return l.logType @@ -81,8 +75,3 @@ func (l *LogPath) Target() string { func (l *LogPath) Filename() string { return l.file } - -// just for testing? -func (l *LogPath) SetFullPath(subPath string) { - l.fullPath = filepath.Join(l.RootDirectory, subPath) -} diff --git a/storage/driver/logpath_test.go b/storage/driver/logpath_test.go new file mode 100644 index 0000000..89166db --- /dev/null +++ b/storage/driver/logpath_test.go @@ -0,0 +1,96 @@ +package driver + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +var ( + logPath1 = LogPath{ + RootDirectory: "/tmp/init", + Subpath: "Subpath", + target: "target", + logType: "logType", + file: "file", + } +) + +func TestGetDepth(t *testing.T) { + testCases := []struct { + rootDirectory string + subpath string + want Depth + wantLogPath LogPath + }{ + { + "", "", + DepthType, LogPath{RootDirectory: "", Subpath: "", target: "", logType: "", file: ""}, + }, + { + "log", "", + DepthType, LogPath{RootDirectory: "log", Subpath: "", target: "", logType: "", file: ""}, + }, + { + "", "log", + DepthType, LogPath{RootDirectory: "", Subpath: "log", target: "", logType: "log", file: ""}, + }, + { + "tmp/init", "", + DepthType, LogPath{RootDirectory: "tmp/init", Subpath: "", target: "", logType: "", file: ""}, + }, + { + "tmp/init", "node", + DepthType, LogPath{RootDirectory: "tmp/init", Subpath: "node", target: "", logType: "node", file: ""}, + }, + { + "tmp/init", "hello", + DepthType, LogPath{RootDirectory: "tmp/init", Subpath: "hello", target: "", logType: "hello", file: ""}, + }, + { + "tmp/init", "pod", + DepthType, LogPath{RootDirectory: "tmp/init", Subpath: "pod", target: "", logType: "pod", file: ""}, + }, + { + "tmp/init", "pod/ns1", + DepthTarget, LogPath{RootDirectory: "tmp/init", Subpath: "pod/ns1", target: "ns1", logType: "pod", file: ""}, + }, + { + "tmp/init", "pod/ns1/2022", + DepthFile, LogPath{RootDirectory: "tmp/init", Subpath: "pod/ns1/2022", target: "ns1", logType: "pod", file: "2022"}, + }, + // error + { + "/a", "./pod/ns1/pod1", + DepthUnknown, LogPath{RootDirectory: "/a", Subpath: "./pod/ns1/pod1", target: "", logType: "", file: ""}, + }, + { + "log", "pod/ns1/pod1/asdf", + DepthUnknown, LogPath{RootDirectory: "log", Subpath: "pod/ns1/pod1/asdf", target: "", logType: "", file: ""}, + }, + } + for i, tc := range testCases { + t.Run(fmt.Sprintf("#%d %s %s", i, tc.rootDirectory, tc.subpath), func(t *testing.T) { + logPath := LogPath{RootDirectory: tc.rootDirectory, Subpath: tc.subpath} + got := logPath.Depth() + assert.Equal(t, tc.want, got) + assert.Equal(t, tc.wantLogPath, logPath) + }) + } +} + +func TestLogType(t *testing.T) { + got := logPath1.LogType() + assert.Equal(t, "logType", got) +} + +func TestTarget(t *testing.T) { + got := logPath1.Target() + assert.Equal(t, "target", got) +} + +func TestFilename(t *testing.T) { + got := logPath1.Filename() + assert.Equal(t, "file", got) +} diff --git a/storage/fileservice/clean.go b/storage/fileservice/clean.go new file mode 100644 index 0000000..a461a53 --- /dev/null +++ b/storage/fileservice/clean.go @@ -0,0 +1,33 @@ +package fileservice + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/kuoss/common/logger" +) + +func (s *FileService) Clean() { + s.removeFilesWithPrefix("host") + s.removeFilesWithPrefix("kube") +} + +func (s *FileService) removeFilesWithPrefix(prefix string) { + files, err := filepath.Glob(fmt.Sprintf("%s/%s.*", s.config.LogDataPath(), prefix)) + if err != nil { + logger.Warnf("glob err: %s, prefix: %s", err.Error(), prefix) + return + } + if len(files) < 1 { + return + } + logger.Warnf("cleansing files prefix: %s", prefix) + for _, file := range files { + logger.Infof("remove file: %s", file) + err := os.Remove(file) + if err != nil { + logger.Warnf("remove err: %s, file: %s", err.Error(), file) + } + } +} diff --git a/storage/fileservice/clean_test.go b/storage/fileservice/clean_test.go new file mode 100644 index 0000000..15c46f2 --- /dev/null +++ b/storage/fileservice/clean_test.go @@ -0,0 +1 @@ +package fileservice diff --git a/storage/fileservice/delete.go b/storage/fileservice/delete.go new file mode 100644 index 0000000..d6cc2b8 --- /dev/null +++ b/storage/fileservice/delete.go @@ -0,0 +1,104 @@ +package fileservice + +import ( + "fmt" + "sort" + "strings" + + "github.com/kuoss/common/logger" + "github.com/kuoss/lethe/clock" + "golang.org/x/sys/unix" +) + +func (s *FileService) DeleteByAge() error { + retentionTime := s.config.RetentionTime() + if retentionTime == 0 { + logger.Infof("retentionTime is 0 (DeleteByAge skipped)") + return nil + } + point := strings.Replace(clock.Now().Add(-retentionTime).UTC().String()[0:13], " ", "_", 1) + files, err := s.ListFiles() + if err != nil { + return fmt.Errorf("listFiles err: %w", err) + } + if len(files) < 1 { + logger.Infof("DeleteByAge( < %s): no files. done.", point) + return nil + } + sort.Slice(files, func(i, j int) bool { + return files[i].Name < files[j].Name + }) + + for _, file := range files { + if file.Name < point { + logger.Infof("DeleteByAge(%s < %s): %s", file.Name, point, file.Fullpath) + err := s.driver.Delete(file.Subpath) + if err != nil { + logger.Errorf("delete err: %s", err.Error()) + continue + } + } + } + logger.Infof("DeleteByAge(%s): DONE", point) + return nil +} + +func (s *FileService) DeleteBySize() error { + retentionSize := s.config.RetentionSize() + if retentionSize == 0 { + logger.Infof("retentionSize is 0 (DeleteBySize skipped)") + return nil + } + files, err := s.ListFiles() + if err != nil { + return fmt.Errorf("listFiles err: %w", err) + } + sort.Slice(files, func(i, j int) bool { + return files[i].Name < files[j].Name + }) + + for _, file := range files { + usedBytes, err := s.GetUsedBytes(".") + if err != nil { + return fmt.Errorf("getUsedBytes err: %w", err) + } + if usedBytes < retentionSize { + logger.Infof("DeleteBySize(%d < %d): DONE", usedBytes, retentionSize) + return nil + } + logger.Infof("DeleteBySize(%d > %d): %s", usedBytes, retentionSize, file.Fullpath) + err = s.driver.Delete(file.Subpath) + if err != nil { + logger.Errorf("delete err: %s", err.Error()) + } + } + return nil +} + +func (s *FileService) GetUsedBytes(subpath string) (int, error) { + if s.config.RetentionSizingStrategy() == "disk" { + return s.GetUsedBytesFromDisk(subpath) + } + return s.GetUsedBytesFromFiles(subpath) +} + +func (s *FileService) GetUsedBytesFromFiles(subpath string) (int, error) { + fileInfos, err := s.driver.Walk(subpath) + if err != nil { + return 0, err + } + var size int + for _, fileInfo := range fileInfos { + size += int(fileInfo.Size()) + } + return size, err +} + +func (s *FileService) GetUsedBytesFromDisk(path string) (int, error) { + var stat unix.Statfs_t + err := unix.Statfs(path, &stat) + if err != nil { + return 0, err + } + return int(stat.Blocks - stat.Bavail*uint64(stat.Bsize)), nil +} diff --git a/storage/fileservice/delete_test.go b/storage/fileservice/delete_test.go new file mode 100644 index 0000000..637d8ec --- /dev/null +++ b/storage/fileservice/delete_test.go @@ -0,0 +1,215 @@ +package fileservice + +import ( + "fmt" + "testing" + "time" + + "github.com/kuoss/lethe/clock" + "github.com/kuoss/lethe/config" + "github.com/kuoss/lethe/util/testutil" + "github.com/stretchr/testify/assert" +) + +var ( + fileService2 *FileService +) + +func init() { + testutil.ChdirRoot() + testutil.ResetLogData() + clock.SetPlaygroundMode(true) + + cfg, err := config.New("test") + if err != nil { + panic(err) + } + cfg.SetLogDataPath("tmp/storage_fileservice_delete_test") + fileService2, err = New(cfg) + if err != nil { + panic(err) + } +} + +func TestDeleteByAge(t *testing.T) { + testCases := []struct { + retentionTime time.Duration + want []LogFile + }{ + { + 0, // disabled + []LogFile{ + {Fullpath: "tmp/storage_fileservice_delete_test/node/node01/2009-11-10_21.log", Subpath: "node/node01/2009-11-10_21.log", LogType: "node", Target: "node01", Name: "2009-11-10_21.log", Extension: ".log", Size: 1057}, + {Fullpath: "tmp/storage_fileservice_delete_test/node/node01/2009-11-10_22.log", Subpath: "node/node01/2009-11-10_22.log", LogType: "node", Target: "node01", Name: "2009-11-10_22.log", Extension: ".log", Size: 177}, + {Fullpath: "tmp/storage_fileservice_delete_test/node/node02/2009-11-01_00.log", Subpath: "node/node02/2009-11-01_00.log", LogType: "node", Target: "node02", Name: "2009-11-01_00.log", Extension: ".log", Size: 0}, + {Fullpath: "tmp/storage_fileservice_delete_test/node/node02/2009-11-10_21.log", Subpath: "node/node02/2009-11-10_21.log", LogType: "node", Target: "node02", Name: "2009-11-10_21.log", Extension: ".log", Size: 1116}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace01/2000-01-01_00.log", Subpath: "pod/namespace01/2000-01-01_00.log", LogType: "pod", Target: "namespace01", Name: "2000-01-01_00.log", Extension: ".log", Size: 1031}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace01/2009-11-10_21.log", Subpath: "pod/namespace01/2009-11-10_21.log", LogType: "pod", Target: "namespace01", Name: "2009-11-10_21.log", Extension: ".log", Size: 279}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace01/2009-11-10_22.log", Subpath: "pod/namespace01/2009-11-10_22.log", LogType: "pod", Target: "namespace01", Name: "2009-11-10_22.log", Extension: ".log", Size: 1031}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace01/2029-11-10_23.log", Subpath: "pod/namespace01/2029-11-10_23.log", LogType: "pod", Target: "namespace01", Name: "2029-11-10_23.log", Extension: ".log", Size: 279}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace02/0000-00-00_00.log", Subpath: "pod/namespace02/0000-00-00_00.log", LogType: "pod", Target: "namespace02", Name: "0000-00-00_00.log", Extension: ".log", Size: 12}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace02/2009-11-10_22.log", Subpath: "pod/namespace02/2009-11-10_22.log", LogType: "pod", Target: "namespace02", Name: "2009-11-10_22.log", Extension: ".log", Size: 1125}}, + }, + { + 1 * time.Second, // 1 second + []LogFile{ + {Fullpath: "tmp/storage_fileservice_delete_test/node/node01/2009-11-10_22.log", Subpath: "node/node01/2009-11-10_22.log", LogType: "node", Target: "node01", Name: "2009-11-10_22.log", Extension: ".log", Size: 177}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace01/2009-11-10_22.log", Subpath: "pod/namespace01/2009-11-10_22.log", LogType: "pod", Target: "namespace01", Name: "2009-11-10_22.log", Extension: ".log", Size: 1031}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace01/2029-11-10_23.log", Subpath: "pod/namespace01/2029-11-10_23.log", LogType: "pod", Target: "namespace01", Name: "2029-11-10_23.log", Extension: ".log", Size: 279}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace02/2009-11-10_22.log", Subpath: "pod/namespace02/2009-11-10_22.log", LogType: "pod", Target: "namespace02", Name: "2009-11-10_22.log", Extension: ".log", Size: 1125}}, + }, + { + 1 * time.Hour, // 1 hour + []LogFile{ + {Fullpath: "tmp/storage_fileservice_delete_test/node/node01/2009-11-10_22.log", Subpath: "node/node01/2009-11-10_22.log", LogType: "node", Target: "node01", Name: "2009-11-10_22.log", Extension: ".log", Size: 177}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace01/2009-11-10_22.log", Subpath: "pod/namespace01/2009-11-10_22.log", LogType: "pod", Target: "namespace01", Name: "2009-11-10_22.log", Extension: ".log", Size: 1031}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace01/2029-11-10_23.log", Subpath: "pod/namespace01/2029-11-10_23.log", LogType: "pod", Target: "namespace01", Name: "2029-11-10_23.log", Extension: ".log", Size: 279}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace02/2009-11-10_22.log", Subpath: "pod/namespace02/2009-11-10_22.log", LogType: "pod", Target: "namespace02", Name: "2009-11-10_22.log", Extension: ".log", Size: 1125}}, + }, + { + 2 * time.Hour, // 2 hours + []LogFile{ + {Fullpath: "tmp/storage_fileservice_delete_test/node/node01/2009-11-10_21.log", Subpath: "node/node01/2009-11-10_21.log", LogType: "node", Target: "node01", Name: "2009-11-10_21.log", Extension: ".log", Size: 1057}, + {Fullpath: "tmp/storage_fileservice_delete_test/node/node01/2009-11-10_22.log", Subpath: "node/node01/2009-11-10_22.log", LogType: "node", Target: "node01", Name: "2009-11-10_22.log", Extension: ".log", Size: 177}, + {Fullpath: "tmp/storage_fileservice_delete_test/node/node02/2009-11-10_21.log", Subpath: "node/node02/2009-11-10_21.log", LogType: "node", Target: "node02", Name: "2009-11-10_21.log", Extension: ".log", Size: 1116}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace01/2009-11-10_21.log", Subpath: "pod/namespace01/2009-11-10_21.log", LogType: "pod", Target: "namespace01", Name: "2009-11-10_21.log", Extension: ".log", Size: 279}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace01/2009-11-10_22.log", Subpath: "pod/namespace01/2009-11-10_22.log", LogType: "pod", Target: "namespace01", Name: "2009-11-10_22.log", Extension: ".log", Size: 1031}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace01/2029-11-10_23.log", Subpath: "pod/namespace01/2029-11-10_23.log", LogType: "pod", Target: "namespace01", Name: "2029-11-10_23.log", Extension: ".log", Size: 279}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace02/2009-11-10_22.log", Subpath: "pod/namespace02/2009-11-10_22.log", LogType: "pod", Target: "namespace02", Name: "2009-11-10_22.log", Extension: ".log", Size: 1125}}, + }, + { + 1 * 24 * time.Hour, // 1 day + []LogFile{ + {Fullpath: "tmp/storage_fileservice_delete_test/node/node01/2009-11-10_21.log", Subpath: "node/node01/2009-11-10_21.log", LogType: "node", Target: "node01", Name: "2009-11-10_21.log", Extension: ".log", Size: 1057}, + {Fullpath: "tmp/storage_fileservice_delete_test/node/node01/2009-11-10_22.log", Subpath: "node/node01/2009-11-10_22.log", LogType: "node", Target: "node01", Name: "2009-11-10_22.log", Extension: ".log", Size: 177}, + {Fullpath: "tmp/storage_fileservice_delete_test/node/node02/2009-11-10_21.log", Subpath: "node/node02/2009-11-10_21.log", LogType: "node", Target: "node02", Name: "2009-11-10_21.log", Extension: ".log", Size: 1116}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace01/2009-11-10_21.log", Subpath: "pod/namespace01/2009-11-10_21.log", LogType: "pod", Target: "namespace01", Name: "2009-11-10_21.log", Extension: ".log", Size: 279}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace01/2009-11-10_22.log", Subpath: "pod/namespace01/2009-11-10_22.log", LogType: "pod", Target: "namespace01", Name: "2009-11-10_22.log", Extension: ".log", Size: 1031}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace01/2029-11-10_23.log", Subpath: "pod/namespace01/2029-11-10_23.log", LogType: "pod", Target: "namespace01", Name: "2029-11-10_23.log", Extension: ".log", Size: 279}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace02/2009-11-10_22.log", Subpath: "pod/namespace02/2009-11-10_22.log", LogType: "pod", Target: "namespace02", Name: "2009-11-10_22.log", Extension: ".log", Size: 1125}}, + }, + { + 10 * 24 * time.Hour, // 10 days + []LogFile{ + {Fullpath: "tmp/storage_fileservice_delete_test/node/node01/2009-11-10_21.log", Subpath: "node/node01/2009-11-10_21.log", LogType: "node", Target: "node01", Name: "2009-11-10_21.log", Extension: ".log", Size: 1057}, + {Fullpath: "tmp/storage_fileservice_delete_test/node/node01/2009-11-10_22.log", Subpath: "node/node01/2009-11-10_22.log", LogType: "node", Target: "node01", Name: "2009-11-10_22.log", Extension: ".log", Size: 177}, + {Fullpath: "tmp/storage_fileservice_delete_test/node/node02/2009-11-01_00.log", Subpath: "node/node02/2009-11-01_00.log", LogType: "node", Target: "node02", Name: "2009-11-01_00.log", Extension: ".log", Size: 0}, + {Fullpath: "tmp/storage_fileservice_delete_test/node/node02/2009-11-10_21.log", Subpath: "node/node02/2009-11-10_21.log", LogType: "node", Target: "node02", Name: "2009-11-10_21.log", Extension: ".log", Size: 1116}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace01/2009-11-10_21.log", Subpath: "pod/namespace01/2009-11-10_21.log", LogType: "pod", Target: "namespace01", Name: "2009-11-10_21.log", Extension: ".log", Size: 279}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace01/2009-11-10_22.log", Subpath: "pod/namespace01/2009-11-10_22.log", LogType: "pod", Target: "namespace01", Name: "2009-11-10_22.log", Extension: ".log", Size: 1031}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace01/2029-11-10_23.log", Subpath: "pod/namespace01/2029-11-10_23.log", LogType: "pod", Target: "namespace01", Name: "2029-11-10_23.log", Extension: ".log", Size: 279}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace02/2009-11-10_22.log", Subpath: "pod/namespace02/2009-11-10_22.log", LogType: "pod", Target: "namespace02", Name: "2009-11-10_22.log", Extension: ".log", Size: 1125}}, + }, + { + 100 * 365 * 24 * time.Hour, // 100 years + []LogFile{ + {Fullpath: "tmp/storage_fileservice_delete_test/node/node01/2009-11-10_21.log", Subpath: "node/node01/2009-11-10_21.log", LogType: "node", Target: "node01", Name: "2009-11-10_21.log", Extension: ".log", Size: 1057}, + {Fullpath: "tmp/storage_fileservice_delete_test/node/node01/2009-11-10_22.log", Subpath: "node/node01/2009-11-10_22.log", LogType: "node", Target: "node01", Name: "2009-11-10_22.log", Extension: ".log", Size: 177}, + {Fullpath: "tmp/storage_fileservice_delete_test/node/node02/2009-11-01_00.log", Subpath: "node/node02/2009-11-01_00.log", LogType: "node", Target: "node02", Name: "2009-11-01_00.log", Extension: ".log", Size: 0}, + {Fullpath: "tmp/storage_fileservice_delete_test/node/node02/2009-11-10_21.log", Subpath: "node/node02/2009-11-10_21.log", LogType: "node", Target: "node02", Name: "2009-11-10_21.log", Extension: ".log", Size: 1116}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace01/2000-01-01_00.log", Subpath: "pod/namespace01/2000-01-01_00.log", LogType: "pod", Target: "namespace01", Name: "2000-01-01_00.log", Extension: ".log", Size: 1031}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace01/2009-11-10_21.log", Subpath: "pod/namespace01/2009-11-10_21.log", LogType: "pod", Target: "namespace01", Name: "2009-11-10_21.log", Extension: ".log", Size: 279}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace01/2009-11-10_22.log", Subpath: "pod/namespace01/2009-11-10_22.log", LogType: "pod", Target: "namespace01", Name: "2009-11-10_22.log", Extension: ".log", Size: 1031}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace01/2029-11-10_23.log", Subpath: "pod/namespace01/2029-11-10_23.log", LogType: "pod", Target: "namespace01", Name: "2029-11-10_23.log", Extension: ".log", Size: 279}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace02/2009-11-10_22.log", Subpath: "pod/namespace02/2009-11-10_22.log", LogType: "pod", Target: "namespace02", Name: "2009-11-10_22.log", Extension: ".log", Size: 1125}}, + }, + } + for i, tc := range testCases { + t.Run(fmt.Sprintf("#%d__%s", i, tc.retentionTime), func(t *testing.T) { + fileService2.config.SetRetentionTime(tc.retentionTime) + err := fileService2.DeleteByAge() + assert.NoError(t, err) + + got, err := fileService2.ListFiles() + assert.NoError(t, err) + assert.Equal(t, tc.want, got) + testutil.ResetLogData() + }) + } +} + +func TestDeleteBySize(t *testing.T) { + testCases := []struct { + retentionSize int + want []LogFile + }{ + { + 0, // disabled + []LogFile{ + {Fullpath: "tmp/storage_fileservice_delete_test/node/node01/2009-11-10_21.log", Subpath: "node/node01/2009-11-10_21.log", LogType: "node", Target: "node01", Name: "2009-11-10_21.log", Extension: ".log", Size: 1057}, + {Fullpath: "tmp/storage_fileservice_delete_test/node/node01/2009-11-10_22.log", Subpath: "node/node01/2009-11-10_22.log", LogType: "node", Target: "node01", Name: "2009-11-10_22.log", Extension: ".log", Size: 177}, + {Fullpath: "tmp/storage_fileservice_delete_test/node/node02/2009-11-01_00.log", Subpath: "node/node02/2009-11-01_00.log", LogType: "node", Target: "node02", Name: "2009-11-01_00.log", Extension: ".log", Size: 0}, + {Fullpath: "tmp/storage_fileservice_delete_test/node/node02/2009-11-10_21.log", Subpath: "node/node02/2009-11-10_21.log", LogType: "node", Target: "node02", Name: "2009-11-10_21.log", Extension: ".log", Size: 1116}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace01/2000-01-01_00.log", Subpath: "pod/namespace01/2000-01-01_00.log", LogType: "pod", Target: "namespace01", Name: "2000-01-01_00.log", Extension: ".log", Size: 1031}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace01/2009-11-10_21.log", Subpath: "pod/namespace01/2009-11-10_21.log", LogType: "pod", Target: "namespace01", Name: "2009-11-10_21.log", Extension: ".log", Size: 279}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace01/2009-11-10_22.log", Subpath: "pod/namespace01/2009-11-10_22.log", LogType: "pod", Target: "namespace01", Name: "2009-11-10_22.log", Extension: ".log", Size: 1031}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace01/2029-11-10_23.log", Subpath: "pod/namespace01/2029-11-10_23.log", LogType: "pod", Target: "namespace01", Name: "2029-11-10_23.log", Extension: ".log", Size: 279}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace02/0000-00-00_00.log", Subpath: "pod/namespace02/0000-00-00_00.log", LogType: "pod", Target: "namespace02", Name: "0000-00-00_00.log", Extension: ".log", Size: 12}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace02/2009-11-10_22.log", Subpath: "pod/namespace02/2009-11-10_22.log", LogType: "pod", Target: "namespace02", Name: "2009-11-10_22.log", Extension: ".log", Size: 1125}}, + }, + { + 10, // 10 bytes + []LogFile{}, + }, + { + 1 * 1024, // 1 KiB + []LogFile{ + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace01/2029-11-10_23.log", Subpath: "pod/namespace01/2029-11-10_23.log", LogType: "pod", Target: "namespace01", Name: "2029-11-10_23.log", Extension: ".log", Size: 279}}, + }, + { + 2 * 1024, // 2 KiB + []LogFile{ + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace01/2029-11-10_23.log", Subpath: "pod/namespace01/2029-11-10_23.log", LogType: "pod", Target: "namespace01", Name: "2029-11-10_23.log", Extension: ".log", Size: 279}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace02/2009-11-10_22.log", Subpath: "pod/namespace02/2009-11-10_22.log", LogType: "pod", Target: "namespace02", Name: "2009-11-10_22.log", Extension: ".log", Size: 1125}}, + }, + { + 3 * 1024, // 3 KiB + []LogFile{ + {Fullpath: "tmp/storage_fileservice_delete_test/node/node01/2009-11-10_22.log", Subpath: "node/node01/2009-11-10_22.log", LogType: "node", Target: "node01", Name: "2009-11-10_22.log", Extension: ".log", Size: 177}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace01/2009-11-10_21.log", Subpath: "pod/namespace01/2009-11-10_21.log", LogType: "pod", Target: "namespace01", Name: "2009-11-10_21.log", Extension: ".log", Size: 279}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace01/2009-11-10_22.log", Subpath: "pod/namespace01/2009-11-10_22.log", LogType: "pod", Target: "namespace01", Name: "2009-11-10_22.log", Extension: ".log", Size: 1031}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace01/2029-11-10_23.log", Subpath: "pod/namespace01/2029-11-10_23.log", LogType: "pod", Target: "namespace01", Name: "2029-11-10_23.log", Extension: ".log", Size: 279}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace02/2009-11-10_22.log", Subpath: "pod/namespace02/2009-11-10_22.log", LogType: "pod", Target: "namespace02", Name: "2009-11-10_22.log", Extension: ".log", Size: 1125}}, + }, + { + 1 * 1024 * 1024, // 1 MiB + []LogFile{ + {Fullpath: "tmp/storage_fileservice_delete_test/node/node01/2009-11-10_21.log", Subpath: "node/node01/2009-11-10_21.log", LogType: "node", Target: "node01", Name: "2009-11-10_21.log", Extension: ".log", Size: 1057}, + {Fullpath: "tmp/storage_fileservice_delete_test/node/node01/2009-11-10_22.log", Subpath: "node/node01/2009-11-10_22.log", LogType: "node", Target: "node01", Name: "2009-11-10_22.log", Extension: ".log", Size: 177}, + {Fullpath: "tmp/storage_fileservice_delete_test/node/node02/2009-11-01_00.log", Subpath: "node/node02/2009-11-01_00.log", LogType: "node", Target: "node02", Name: "2009-11-01_00.log", Extension: ".log", Size: 0}, + {Fullpath: "tmp/storage_fileservice_delete_test/node/node02/2009-11-10_21.log", Subpath: "node/node02/2009-11-10_21.log", LogType: "node", Target: "node02", Name: "2009-11-10_21.log", Extension: ".log", Size: 1116}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace01/2000-01-01_00.log", Subpath: "pod/namespace01/2000-01-01_00.log", LogType: "pod", Target: "namespace01", Name: "2000-01-01_00.log", Extension: ".log", Size: 1031}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace01/2009-11-10_21.log", Subpath: "pod/namespace01/2009-11-10_21.log", LogType: "pod", Target: "namespace01", Name: "2009-11-10_21.log", Extension: ".log", Size: 279}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace01/2009-11-10_22.log", Subpath: "pod/namespace01/2009-11-10_22.log", LogType: "pod", Target: "namespace01", Name: "2009-11-10_22.log", Extension: ".log", Size: 1031}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace01/2029-11-10_23.log", Subpath: "pod/namespace01/2029-11-10_23.log", LogType: "pod", Target: "namespace01", Name: "2029-11-10_23.log", Extension: ".log", Size: 279}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace02/0000-00-00_00.log", Subpath: "pod/namespace02/0000-00-00_00.log", LogType: "pod", Target: "namespace02", Name: "0000-00-00_00.log", Extension: ".log", Size: 12}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace02/2009-11-10_22.log", Subpath: "pod/namespace02/2009-11-10_22.log", LogType: "pod", Target: "namespace02", Name: "2009-11-10_22.log", Extension: ".log", Size: 1125}}, + }, + { + 999999999 * 1024 * 1024 * 1024, // 999,999,999 GiB + []LogFile{ + {Fullpath: "tmp/storage_fileservice_delete_test/node/node01/2009-11-10_21.log", Subpath: "node/node01/2009-11-10_21.log", LogType: "node", Target: "node01", Name: "2009-11-10_21.log", Extension: ".log", Size: 1057}, + {Fullpath: "tmp/storage_fileservice_delete_test/node/node01/2009-11-10_22.log", Subpath: "node/node01/2009-11-10_22.log", LogType: "node", Target: "node01", Name: "2009-11-10_22.log", Extension: ".log", Size: 177}, + {Fullpath: "tmp/storage_fileservice_delete_test/node/node02/2009-11-01_00.log", Subpath: "node/node02/2009-11-01_00.log", LogType: "node", Target: "node02", Name: "2009-11-01_00.log", Extension: ".log", Size: 0}, + {Fullpath: "tmp/storage_fileservice_delete_test/node/node02/2009-11-10_21.log", Subpath: "node/node02/2009-11-10_21.log", LogType: "node", Target: "node02", Name: "2009-11-10_21.log", Extension: ".log", Size: 1116}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace01/2000-01-01_00.log", Subpath: "pod/namespace01/2000-01-01_00.log", LogType: "pod", Target: "namespace01", Name: "2000-01-01_00.log", Extension: ".log", Size: 1031}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace01/2009-11-10_21.log", Subpath: "pod/namespace01/2009-11-10_21.log", LogType: "pod", Target: "namespace01", Name: "2009-11-10_21.log", Extension: ".log", Size: 279}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace01/2009-11-10_22.log", Subpath: "pod/namespace01/2009-11-10_22.log", LogType: "pod", Target: "namespace01", Name: "2009-11-10_22.log", Extension: ".log", Size: 1031}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace01/2029-11-10_23.log", Subpath: "pod/namespace01/2029-11-10_23.log", LogType: "pod", Target: "namespace01", Name: "2029-11-10_23.log", Extension: ".log", Size: 279}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace02/0000-00-00_00.log", Subpath: "pod/namespace02/0000-00-00_00.log", LogType: "pod", Target: "namespace02", Name: "0000-00-00_00.log", Extension: ".log", Size: 12}, + {Fullpath: "tmp/storage_fileservice_delete_test/pod/namespace02/2009-11-10_22.log", Subpath: "pod/namespace02/2009-11-10_22.log", LogType: "pod", Target: "namespace02", Name: "2009-11-10_22.log", Extension: ".log", Size: 1125}}, + }, + } + for i, tc := range testCases { + t.Run(fmt.Sprintf("#%d__%d", i, tc.retentionSize), func(t *testing.T) { + fileService2.config.SetRetentionSize(tc.retentionSize) + err := fileService2.DeleteBySize() + assert.NoError(t, err) + + got, err := fileService2.ListFiles() + assert.NoError(t, err) + assert.Equal(t, tc.want, got) + testutil.ResetLogData() + }) + } +} diff --git a/storage/fileservice/fileservice.go b/storage/fileservice/fileservice.go new file mode 100644 index 0000000..133f146 --- /dev/null +++ b/storage/fileservice/fileservice.go @@ -0,0 +1,26 @@ +package fileservice + +import ( + "fmt" + + "github.com/kuoss/lethe/config" + storagedriver "github.com/kuoss/lethe/storage/driver" + "github.com/kuoss/lethe/storage/driver/factory" +) + +type FileService struct { + config *config.Config + driver storagedriver.Driver +} + +func New(cfg *config.Config) (*FileService, error) { + driver, err := factory.Get("filesystem", map[string]interface{}{"RootDirectory": cfg.LogDataPath()}) + if err != nil { + return nil, fmt.Errorf("factory.Get err: %w", err) + } + return &FileService{cfg, driver}, nil +} + +func (s *FileService) Config() *config.Config { + return s.config +} diff --git a/storage/fileservice/fileservice_test.go b/storage/fileservice/fileservice_test.go new file mode 100644 index 0000000..61e2906 --- /dev/null +++ b/storage/fileservice/fileservice_test.go @@ -0,0 +1,13 @@ +package fileservice + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNew(t *testing.T) { + assert.NotEmpty(t, fileService) + assert.Equal(t, "filesystem", fileService.driver.Name()) + assert.Equal(t, "tmp/init", fileService.driver.RootDirectory()) +} diff --git a/storage/fileservice/init_test.go b/storage/fileservice/init_test.go new file mode 100644 index 0000000..0700554 --- /dev/null +++ b/storage/fileservice/init_test.go @@ -0,0 +1,26 @@ +package fileservice + +import ( + "github.com/kuoss/lethe/config" + _ "github.com/kuoss/lethe/storage/driver/filesystem" + "github.com/kuoss/lethe/util/testutil" +) + +var ( + fileService *FileService +) + +func init() { + testutil.ChdirRoot() + testutil.ResetLogData() + + cfg, err := config.New("test") + if err != nil { + panic(err) + } + cfg.SetLogDataPath("tmp/init") + fileService, err = New(cfg) + if err != nil { + panic(err) + } +} diff --git a/storage/fileservice/io.go b/storage/fileservice/io.go new file mode 100644 index 0000000..465abf5 --- /dev/null +++ b/storage/fileservice/io.go @@ -0,0 +1,14 @@ +package fileservice + +import ( + "bufio" + "fmt" +) + +func (s *FileService) Scanner(subpath string) (*bufio.Scanner, error) { + rc, err := s.driver.Reader(subpath) + if err != nil { + return nil, fmt.Errorf("reader err: %w", err) + } + return bufio.NewScanner(rc), nil +} diff --git a/storage/fileservice/list.go b/storage/fileservice/list.go new file mode 100644 index 0000000..111f49e --- /dev/null +++ b/storage/fileservice/list.go @@ -0,0 +1,135 @@ +package fileservice + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/kuoss/common/logger" + storagedriver "github.com/kuoss/lethe/storage/driver" +) + +func fullpath2subpath(rootDir string, fullpath string) string { + subpath, err := filepath.Rel(rootDir, fullpath) + if err != nil { + logger.Warnf("rel err: %s", err.Error()) + return "" + } + return subpath +} + +func (s *FileService) dirSize(subpath string) (int64, error) { + files, err := s.driver.List(subpath) + if err != nil { + return 0, err + } + var size int64 + for _, file := range files { + info, err := s.driver.Stat(file) + if err != nil { + return 0, err + } + size += info.Size() + } + return size, err +} + +func (s *FileService) List(subpath string) ([]string, error) { + dirs, err := s.driver.List(subpath) + if err != nil { + return nil, fmt.Errorf("list err: %w", err) + } + return dirs, nil +} + +func (s *FileService) ListLogDirs() []LogDir { + dirs, err := s.driver.WalkDir(".") + if err != nil { + logger.Warnf("walkDir err: %s", err.Error()) + return []LogDir{} + } + + rootDir := s.driver.RootDirectory() + logDirs := []LogDir{} + for _, dir := range dirs { + logPath := storagedriver.LogPath{RootDirectory: rootDir, Subpath: dir} + + if logPath.Depth() == storagedriver.DepthTarget { + logDirs = append(logDirs, LogDir{ + LogType: logPath.LogType(), + Target: logPath.Target(), + Subpath: dir, + Fullpath: logPath.Fullpath(), + }) + } + } + return logDirs +} + +func (s *FileService) ListLogDirsWithSize() []LogDir { + logDirs := s.ListLogDirs() + for i, logDir := range logDirs { + var size int64 + size, err := s.dirSize(logDir.Subpath) + if err != nil { + logger.Warnf("dirSize err: %s, path:%s", err.Error(), logDir.Fullpath) + continue + } + logDirs[i].Size = size + + files, err := os.ReadDir(logDir.Fullpath) + if err != nil { + logger.Warnf("readDir err: %s, path:%s", err.Error(), logDir.Fullpath) + continue + } + fileCount := len(files) + logDirs[i].FileCount = fileCount + if fileCount > 0 { + logDirs[i].FirstFile = files[0].Name() + logDirs[i].LastFile = files[fileCount-1].Name() + } + } + return logDirs +} + +func (s *FileService) ListTargets() []LogDir { + logDirs := s.ListLogDirsWithSize() + for i, logDir := range logDirs { + if logDir.LastFile == "" { + continue + } + b, err := s.driver.GetContent(filepath.Join(logDir.Subpath, logDir.LastFile)) + if err != nil { + logger.Warnf("getContent err: %s", err.Error()) + continue + } + content := string(b) + // TODO: if timestamp ? + logDirs[i].LastForward = content[:19] + "Z" + } + return logDirs +} + +func (s *FileService) ListFiles() ([]LogFile, error) { + fileInfos, err := s.driver.Walk(".") + if err != nil { + return []LogFile{}, fmt.Errorf("walk err: %e", err) + } + logFiles := []LogFile{} + rootDir := s.driver.RootDirectory() + for _, fileInfo := range fileInfos { + logPath := storagedriver.LogPath{RootDirectory: rootDir, Subpath: fullpath2subpath(s.config.LogDataPath(), fileInfo.Fullpath())} + if logPath.Depth() == storagedriver.DepthFile { + logFiles = append(logFiles, LogFile{ + Fullpath: logPath.Fullpath(), + Subpath: logPath.Subpath, + LogType: logPath.LogType(), + Target: logPath.Target(), + Name: logPath.Filename(), + Extension: filepath.Ext(logPath.Filename()), + Size: fileInfo.Size(), + }) + } + } + return logFiles, nil +} diff --git a/storage/fileservice/list_test.go b/storage/fileservice/list_test.go new file mode 100644 index 0000000..2ee7acc --- /dev/null +++ b/storage/fileservice/list_test.go @@ -0,0 +1,146 @@ +package fileservice + +import ( + "fmt" + "sort" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestFullpath2subpath(t *testing.T) { + testCases := []struct { + rootDir string + fullpath string + want string + }{ + {"", "", "."}, + {"hello", "", "../."}, + {"", "hello", "hello"}, + {"tmp/init", "tmp/init/pod", "pod"}, + {"tmp/init", "tmp/init/pod/ns1", "pod/ns1"}, + {"tmp/init", "tmp/init/pod/ns1/2023", "pod/ns1/2023"}, + } + for i, tc := range testCases { + t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { + got := fullpath2subpath(tc.rootDir, tc.fullpath) + assert.Equal(t, tc.want, got) + }) + } +} + +func TestDirSize(t *testing.T) { + testCases := []struct { + path string + want int64 + wantError string + }{ + {"", 0, ""}, + {"hello", 0, "Path not found: hello"}, + {"node", 0, ""}, + {"pod", 0, ""}, + {"node/node01", 1234, ""}, + {"node/node02", 1116, ""}, + {"pod/namespace01", 2620, ""}, + {"pod/namespace02", 1137, ""}, + } + for i, tc := range testCases { + t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { + got, err := fileService.dirSize(tc.path) + if tc.wantError == "" { + assert.NoError(t, err) + } else { + assert.EqualError(t, err, tc.wantError) + } + assert.Equal(t, tc.want, got) + }) + } +} + +func TestList(t *testing.T) { + testCases := []struct { + subpath string + want []string + wantError string + }{ + { + "", + []string{"node", "pod"}, + "", + }, + { + ".", + []string{"node", "pod"}, + "", + }, + { + "hello", + nil, + "list err: Path not found: hello", + }, + { + "node", + []string{"node/node01", "node/node02"}, + "", + }, + { + "pod", + []string{"pod/namespace01", "pod/namespace02"}, + "", + }, + { + "pod/namespace01", + []string{"pod/namespace01/2000-01-01_00.log", "pod/namespace01/2009-11-10_21.log", "pod/namespace01/2009-11-10_22.log", "pod/namespace01/2029-11-10_23.log"}, + "", + }, + { + "pod/namespace01/2029-11-10_23.log", + nil, + "list err: readdirnames err: readdirent tmp/init/pod/namespace01/2029-11-10_23.log: not a directory", + }, + } + for i, tc := range testCases { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + got, err := fileService.List(tc.subpath) + if tc.wantError == "" { + assert.NoError(t, err) + } else { + assert.EqualError(t, err, tc.wantError) + } + sort.Strings(got) + assert.Equal(t, tc.want, got) + }) + } +} + +func TestListLogDirs(t *testing.T) { + // TODO: if runtime.GOOS == "windows" + want := []LogDir{ + {Fullpath: "tmp/init/node/node01", Subpath: "node/node01", LogType: "node", Target: "node01", FileCount: 0, FirstFile: "", LastFile: "", Size: 0, LastForward: ""}, + {Fullpath: "tmp/init/node/node02", Subpath: "node/node02", LogType: "node", Target: "node02", FileCount: 0, FirstFile: "", LastFile: "", Size: 0, LastForward: ""}, + {Fullpath: "tmp/init/pod/namespace01", Subpath: "pod/namespace01", LogType: "pod", Target: "namespace01", FileCount: 0, FirstFile: "", LastFile: "", Size: 0, LastForward: ""}, + {Fullpath: "tmp/init/pod/namespace02", Subpath: "pod/namespace02", LogType: "pod", Target: "namespace02", FileCount: 0, FirstFile: "", LastFile: "", Size: 0, LastForward: ""}} + got := fileService.ListLogDirs() + assert.Equal(t, want, got) +} + +func TestListLogDirsWithSize(t *testing.T) { + want := []LogDir{ + {Fullpath: "tmp/init/node/node01", Subpath: "node/node01", LogType: "node", Target: "node01", FileCount: 2, FirstFile: "2009-11-10_21.log", LastFile: "2009-11-10_22.log", Size: 1234, LastForward: ""}, + {Fullpath: "tmp/init/node/node02", Subpath: "node/node02", LogType: "node", Target: "node02", FileCount: 2, FirstFile: "2009-11-01_00.log", LastFile: "2009-11-10_21.log", Size: 1116, LastForward: ""}, + {Fullpath: "tmp/init/pod/namespace01", Subpath: "pod/namespace01", LogType: "pod", Target: "namespace01", FileCount: 4, FirstFile: "2000-01-01_00.log", LastFile: "2029-11-10_23.log", Size: 2620, LastForward: ""}, + {Fullpath: "tmp/init/pod/namespace02", Subpath: "pod/namespace02", LogType: "pod", Target: "namespace02", FileCount: 2, FirstFile: "0000-00-00_00.log", LastFile: "2009-11-10_22.log", Size: 1137, LastForward: ""}} + got := fileService.ListLogDirsWithSize() + assert.Equal(t, want, got) +} + +func TestListTargets(t *testing.T) { + // TODO: if runtime.GOOS == "windows" + want := []LogDir{ + {Fullpath: "tmp/init/node/node01", Subpath: "node/node01", LogType: "node", Target: "node01", FileCount: 2, FirstFile: "2009-11-10_21.log", LastFile: "2009-11-10_22.log", Size: 1234, LastForward: "2009-11-10T23:00:00Z"}, + {Fullpath: "tmp/init/node/node02", Subpath: "node/node02", LogType: "node", Target: "node02", FileCount: 2, FirstFile: "2009-11-01_00.log", LastFile: "2009-11-10_21.log", Size: 1116, LastForward: "2009-11-10T21:58:00Z"}, + {Fullpath: "tmp/init/pod/namespace01", Subpath: "pod/namespace01", LogType: "pod", Target: "namespace01", FileCount: 4, FirstFile: "2000-01-01_00.log", LastFile: "2029-11-10_23.log", Size: 2620, LastForward: "2009-11-10T23:00:00Z"}, + {Fullpath: "tmp/init/pod/namespace02", Subpath: "pod/namespace02", LogType: "pod", Target: "namespace02", FileCount: 2, FirstFile: "0000-00-00_00.log", LastFile: "2009-11-10_22.log", Size: 1137, LastForward: "2009-11-10T22:58:00Z"}} + got := fileService.ListTargets() + assert.Equal(t, want, got) +} diff --git a/storage/fileservice/types.go b/storage/fileservice/types.go new file mode 100644 index 0000000..ac9e93b --- /dev/null +++ b/storage/fileservice/types.go @@ -0,0 +1,24 @@ +package fileservice + +// LogFile +type LogFile struct { + Fullpath string + Subpath string + LogType string + Target string + Name string + Extension string + Size int64 +} + +type LogDir struct { + Fullpath string + Subpath string + LogType string + Target string + FileCount int + FirstFile string + LastFile string + Size int64 + LastForward string +} diff --git a/storage/logservice/init_test.go b/storage/logservice/init_test.go new file mode 100644 index 0000000..15ed4c3 --- /dev/null +++ b/storage/logservice/init_test.go @@ -0,0 +1,29 @@ +package logservice + +import ( + "github.com/kuoss/lethe/clock" + "github.com/kuoss/lethe/config" + "github.com/kuoss/lethe/storage/fileservice" + "github.com/kuoss/lethe/util/testutil" +) + +var ( + logService1 *LogService +) + +func init() { + testutil.ChdirRoot() + testutil.ResetLogData() + clock.SetPlaygroundMode(true) + + cfg, err := config.New("test") + if err != nil { + panic(err) + } + cfg.SetLogDataPath("tmp/init") + fileService, err := fileservice.New(cfg) + if err != nil { + panic(err) + } + logService1 = New(fileService) +} diff --git a/storage/logservice/logmodel/log.go b/storage/logservice/logmodel/log.go new file mode 100644 index 0000000..a7bd382 --- /dev/null +++ b/storage/logservice/logmodel/log.go @@ -0,0 +1,32 @@ +package logmodel + +import "fmt" + +type LogType string + +const ( + LogTypeAudit LogType = "audit" + LogTypeNode LogType = "node" + LogTypePod LogType = "pod" +) + +type NodeLog struct { + Time string `json:"time"` + Node string `json:"node"` + Process string `json:"process"` + Log string `json:"log"` +} + +func (l NodeLog) Type() LogType { return LogTypeNode } +func (l NodeLog) String() string { return fmt.Sprintf("%#v", l) } + +type PodLog struct { + Time string `json:"time"` + Namespace string `json:"namespace"` + Pod string `json:"pod"` + Container string `json:"container"` + Log string `json:"log"` +} + +func (l PodLog) Type() LogType { return LogTypePod } +func (l PodLog) String() string { return fmt.Sprintf("%#v", l) } diff --git a/storage/logservice/logservice.go b/storage/logservice/logservice.go new file mode 100644 index 0000000..12c64e1 --- /dev/null +++ b/storage/logservice/logservice.go @@ -0,0 +1,199 @@ +package logservice + +import ( + "fmt" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/kuoss/lethe/config" + "github.com/kuoss/lethe/letheql/model" + "github.com/kuoss/lethe/storage/fileservice" + "github.com/kuoss/lethe/storage/logservice/logmodel" + "github.com/kuoss/lethe/storage/logservice/match" +) + +type LogService struct { + fileService *fileservice.FileService +} + +func New(fileService *fileservice.FileService) *LogService { + return &LogService{fileService} +} + +func (s *LogService) Config() *config.Config { + return s.fileService.Config() +} + +func (s *LogService) SelectLog(sel *model.LogSelector) (log model.Log, warnings model.Warnings, err error) { + + // type + switch sel.Name { + case "node": + case "pod": + default: + return log, warnings, fmt.Errorf("unknown logType: %s", sel.Name) + } + + // targets + targets, err := s.getTargets(sel, &warnings) + if err != nil { + return log, warnings, fmt.Errorf("getTargets err: %w", err) + } + + // files + files := s.getFiles(sel, targets, &warnings) + + // mfs + mfs, err := match.GetMatchFuncSet(sel) + if err != nil { + return log, warnings, fmt.Errorf("getMatchFuncSet err: %w", err) + } + + // log + log = s.getLogFromFiles(sel, files, mfs, &warnings) + if err != nil { + return log, warnings, fmt.Errorf("getTargets err: %w", err) + } + return log, warnings, nil +} + +// targets +func (s *LogService) getTargets(sel *model.LogSelector, warnings *model.Warnings) ([]string, error) { + all, err := s.fileService.List(sel.Name) + if err != nil { + return nil, fmt.Errorf("list err: %w", err) + } + matcher, ws, err := match.GetTargetMatcher(sel) + if err != nil { + return nil, fmt.Errorf("target matcher err: %w", err) + } + *warnings = append(*warnings, ws...) + + targets := []string{} + for _, t := range all { + if matcher.Matches(filepath.Base(t)) { + targets = append(targets, t) + } + } + return targets, nil +} + +// files +func (s *LogService) getFiles(sel *model.LogSelector, targets []string, warnings *model.Warnings) (files []string) { + + for _, target := range targets { + all, err := s.fileService.List(target) + if err != nil { + *warnings = append(*warnings, fmt.Errorf("list err: %w", err)) + continue + } + for _, file := range all { + if isFileInTimeRange(file, &sel.TimeRange) { + files = append(files, file) + } + } + } + return files +} + +func isFileInTimeRange(file string, tr *model.TimeRange) bool { + name := filepath.Base(file) + fileStart, err := time.Parse(time.RFC3339, strings.Replace(name[0:13], "_", "T", 1)+":00:00Z") + if err != nil { + return false + } + fileEnd := fileStart.Add(time.Duration(3599) * time.Second) // per hour for one logs + return tr.Start.Before(fileEnd) && tr.End.After(fileStart) +} + +// log +func (s *LogService) getLogFromFiles(sel *model.LogSelector, files []string, mfs *match.MatchFuncSet, warnings *model.Warnings) model.Log { + limit := s.Config().Limit() + logLines := []model.LogLine{} + sort.Strings(files) + for _, file := range files { + s.addLogLinesFromFile(sel, &logLines, file, mfs, warnings) + if len(logLines) > limit { + break + } + } + return model.Log{ + Name: sel.Name, + Lines: logLines, + } +} + +func (s *LogService) addLogLinesFromFile(sel *model.LogSelector, logLines *[]model.LogLine, file string, mfs *match.MatchFuncSet, warnings *model.Warnings) { + limit := s.Config().Limit() + sc, err := s.fileService.Scanner(file) + if err != nil { + *warnings = append(*warnings, err) + return + } + for sc.Scan() { + addLogLine(sel, logLines, sc.Text(), mfs, warnings) + if len(*logLines) > limit { + return + } + } +} + +func addLogLine(sel *model.LogSelector, logLines *[]model.LogLine, line string, mfs *match.MatchFuncSet, warnings *model.Warnings) { + pos := strings.IndexRune(line, '[') + if pos < 0 { + *warnings = append(*warnings, fmt.Errorf("no time separator")) + return + } + tim := line[:pos] + parsedTime, err := time.Parse(time.RFC3339Nano, tim) + if err != nil { + *warnings = append(*warnings, fmt.Errorf("time parse err: %w", err)) + return + } + if sel.TimeRange.Start.After(parsedTime) || sel.TimeRange.End.Before(parsedTime) { + return // skip + } + rest := line[pos+1:] + + // labels]log + pos = strings.IndexRune(rest, ']') + if pos < 0 { + *warnings = append(*warnings, fmt.Errorf("no log separator")) + return + } + labels := strings.Split(rest[:pos], "|") + log := rest[pos+2:] + + // label match + if len(labels) != len(mfs.LabelMatchFuncs)+1 { + *warnings = append(*warnings, fmt.Errorf("label count mismatch")) + return + } + for i, f := range mfs.LabelMatchFuncs { + if f == nil { + continue + } + if !f(labels[i+1]) { + return + } + } + + // line match + for _, f := range mfs.LineMatchFuncs { + if !f(log) { + return + } + } + + switch sel.Name { + case "node": + *logLines = append(*logLines, logmodel.NodeLog{Time: tim, Node: labels[0], Process: labels[1], Log: log}) + return + case "pod": + *logLines = append(*logLines, logmodel.PodLog{Time: tim, Namespace: labels[0], Pod: labels[1], Container: labels[2], Log: log}) + return + } + *warnings = append(*warnings, fmt.Errorf("addLogLine: unknown log type: %s", sel.Name)) +} diff --git a/storage/logservice/logservice_test.go b/storage/logservice/logservice_test.go new file mode 100644 index 0000000..235d634 --- /dev/null +++ b/storage/logservice/logservice_test.go @@ -0,0 +1,11 @@ +package logservice + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNew(t *testing.T) { + assert.NotEmpty(t, logService1) +} diff --git a/storage/logservice/match/label.go b/storage/logservice/match/label.go new file mode 100644 index 0000000..c05248e --- /dev/null +++ b/storage/logservice/match/label.go @@ -0,0 +1,66 @@ +package match + +import ( + "fmt" + "regexp" + + "github.com/kuoss/lethe/letheql/model" + "github.com/prometheus/prometheus/model/labels" +) + +func getLabelMatchFuncs(sel *model.LogSelector) ([]MatchFunc, error) { + switch sel.Name { + case "node": + return getLabelMatchFuncsDetail(sel, "process") + case "pod": + return getLabelMatchFuncsDetail(sel, "pod", "container") + } + return nil, fmt.Errorf("unknwon logType: %s", sel.Name) +} + +func getLabelMatchFuncsDetail(sel *model.LogSelector, names ...string) ([]MatchFunc, error) { + var funcs []MatchFunc + for _, name := range names { + f, err := getLabelMatchFunc(sel, name) + if err != nil { + return nil, fmt.Errorf("getLabelMatchFunc err: %w", err) + } + funcs = append(funcs, f) + } + return funcs, nil +} + +func getLabelMatchFunc(sel *model.LogSelector, name string) (MatchFunc, error) { + m := getLabelMatcher(sel, name) + if m == nil { + return nil, nil // ok (empty) + } + switch m.Type { + case labels.MatchEqual: + return func(s string) bool { return s == m.Value }, nil + case labels.MatchNotEqual: + return func(s string) bool { return s != m.Value }, nil + case labels.MatchRegexp: + re, err := regexp.Compile("^(?:" + m.Value + ")$") + if err != nil { + return nil, err + } + return func(s string) bool { return re.MatchString(s) }, nil + case labels.MatchNotRegexp: + re, err := regexp.Compile("^(?:" + m.Value + ")$") + if err != nil { + return nil, err + } + return func(s string) bool { return !re.MatchString(s) }, nil + } + return nil, fmt.Errorf("unknown match type: %s", m.Type) +} + +func getLabelMatcher(sel *model.LogSelector, name string) *labels.Matcher { + for _, matcher := range sel.LabelMatchers { + if matcher.Name == name { + return matcher + } + } + return nil // ok (empty) +} diff --git a/storage/logservice/match/line.go b/storage/logservice/match/line.go new file mode 100644 index 0000000..61c67ee --- /dev/null +++ b/storage/logservice/match/line.go @@ -0,0 +1,44 @@ +package match + +import ( + "fmt" + "regexp" + "strings" + + "github.com/kuoss/lethe/letheql/model" + "github.com/kuoss/lethe/letheql/parser" +) + +func getLineMatchFuncs(sel *model.LogSelector) ([]MatchFunc, error) { + var funcs []MatchFunc + for _, m := range sel.LineMatchers { + f, err := getLineMatchFunc(m) + if err != nil { + return nil, fmt.Errorf("getLineMatchFunc err: %w", err) + } + funcs = append(funcs, f) + } + return funcs, nil +} + +func getLineMatchFunc(m *model.LineMatcher) (MatchFunc, error) { + switch m.Op { + case parser.PIPE_EQL: // |= + return func(s string) bool { return strings.Contains(s, m.Value) }, nil + case parser.NEQ: // != + return func(s string) bool { return !strings.Contains(s, m.Value) }, nil + case parser.PIPE_REGEX: // |~ + re, err := regexp.Compile(m.Value) + if err != nil { + return nil, err + } + return func(s string) bool { return re.MatchString(s) }, nil + case parser.NEQ_REGEX: // !~ + re, err := regexp.Compile(m.Value) + if err != nil { + return nil, err + } + return func(s string) bool { return !re.MatchString(s) }, nil + } + return nil, fmt.Errorf("unknown match op: %s", m.Op) +} diff --git a/storage/logservice/match/match.go b/storage/logservice/match/match.go new file mode 100644 index 0000000..c34c5f9 --- /dev/null +++ b/storage/logservice/match/match.go @@ -0,0 +1,28 @@ +package match + +import ( + "fmt" + + "github.com/kuoss/lethe/letheql/model" +) + +type MatchFunc func(s string) bool +type MatchFuncSet struct { + LabelMatchFuncs []MatchFunc + LineMatchFuncs []MatchFunc +} + +func GetMatchFuncSet(sel *model.LogSelector) (*MatchFuncSet, error) { + labelMatchFuncs, err := getLabelMatchFuncs(sel) + if err != nil { + return nil, fmt.Errorf("getLabelMatchFuncs err: %w", err) + } + lineMatchFuncs, err := getLineMatchFuncs(sel) + if err != nil { + return nil, fmt.Errorf("getLineMatchFuncs err: %w", err) + } + return &MatchFuncSet{ + LabelMatchFuncs: labelMatchFuncs, + LineMatchFuncs: lineMatchFuncs, + }, nil +} diff --git a/storage/logservice/match/target.go b/storage/logservice/match/target.go new file mode 100644 index 0000000..017afae --- /dev/null +++ b/storage/logservice/match/target.go @@ -0,0 +1,46 @@ +package match + +import ( + "fmt" + + "github.com/kuoss/lethe/letheql/model" + "github.com/prometheus/prometheus/model/labels" +) + +func GetTargetMatcher(sel *model.LogSelector) (*labels.Matcher, model.Warnings, error) { + matcher, err := getTargetLabelMatcher(sel) + if err != nil { + return nil, nil, err + } + // check matcher type + switch matcher.Type { + case labels.MatchNotEqual, labels.MatchRegexp, labels.MatchNotRegexp: + // for now, selecting multiple targets is discouraged, because cross-target logs are not sorted by time + return matcher, model.Warnings{fmt.Errorf("warnMultiTargets: use operator '=' for selecting target")}, nil + case labels.MatchEqual: + return matcher, nil, nil + } + return nil, nil, fmt.Errorf("not supported matcher type: %s", matcher.Type.String()) +} + +func getTargetLabelMatcher(sel *model.LogSelector) (*labels.Matcher, error) { + var labelName string + switch sel.Name { + case "node": + labelName = "node" + case "pod": + labelName = "namespace" + default: + return nil, fmt.Errorf("getTargetMatcher: unknown logType: %s", sel.Name) + } + var matcher *labels.Matcher + for _, m := range sel.LabelMatchers { + if m.Name == labelName { + matcher = m + } + } + if matcher == nil { + return nil, fmt.Errorf("not found label '%s' for logType '%s'", labelName, sel.Name) + } + return matcher, nil +} diff --git a/storage/querier/init_test.go b/storage/querier/init_test.go new file mode 100644 index 0000000..2332e9f --- /dev/null +++ b/storage/querier/init_test.go @@ -0,0 +1,12 @@ +package querier + +import ( + "github.com/kuoss/lethe/clock" + "github.com/kuoss/lethe/util/testutil" +) + +func init() { + testutil.ChdirRoot() + testutil.ResetLogData() + clock.SetPlaygroundMode(true) +} diff --git a/storage/querier/querier.go b/storage/querier/querier.go new file mode 100644 index 0000000..b1bbcbf --- /dev/null +++ b/storage/querier/querier.go @@ -0,0 +1,38 @@ +package querier + +import ( + "context" + + "github.com/prometheus/prometheus/model/labels" + promstorage "github.com/prometheus/prometheus/storage" +) + +// A LetheQueryable is used for testing purposes so that a Lethe Querier can be used. +type LetheQueryable struct { + LetheQuerier promstorage.Querier +} + +func (q *LetheQueryable) Querier(context.Context, int64, int64) (promstorage.Querier, error) { + return q.LetheQuerier, nil +} + +// LetheQuerier is used for test purposes to Lethe the selected series that is returned. +type LetheQuerier struct { + SelectLetheFunction func(sortSeries bool, hints *promstorage.SelectHints, matchers ...*labels.Matcher) promstorage.SeriesSet +} + +func (q *LetheQuerier) LabelValues(string, ...*labels.Matcher) ([]string, promstorage.Warnings, error) { + return nil, nil, nil +} + +func (q *LetheQuerier) LabelNames(...*labels.Matcher) ([]string, promstorage.Warnings, error) { + return nil, nil, nil +} + +func (q *LetheQuerier) Close() error { + return nil +} + +func (q *LetheQuerier) Select(sortSeries bool, hints *promstorage.SelectHints, matchers ...*labels.Matcher) promstorage.SeriesSet { + return q.SelectLetheFunction(sortSeries, hints, matchers...) +} diff --git a/storage/queryservice/init_test.go b/storage/queryservice/init_test.go new file mode 100644 index 0000000..e3a4da0 --- /dev/null +++ b/storage/queryservice/init_test.go @@ -0,0 +1,31 @@ +package queryservice + +import ( + "github.com/kuoss/lethe/clock" + "github.com/kuoss/lethe/config" + "github.com/kuoss/lethe/storage/fileservice" + "github.com/kuoss/lethe/storage/logservice" + "github.com/kuoss/lethe/util/testutil" +) + +var ( + queryService *QueryService +) + +func init() { + testutil.ChdirRoot() + testutil.ResetLogData() + clock.SetPlaygroundMode(true) + + cfg, err := config.New("test") + if err != nil { + panic(err) + } + cfg.SetLogDataPath("tmp/init") + fileService, err := fileservice.New(cfg) + if err != nil { + panic(err) + } + logService := logservice.New(fileService) + queryService = New(logService) +} diff --git a/storage/queryservice/queryservice.go b/storage/queryservice/queryservice.go new file mode 100644 index 0000000..9b4dd64 --- /dev/null +++ b/storage/queryservice/queryservice.go @@ -0,0 +1,47 @@ +package queryservice + +import ( + "context" + "reflect" + "time" + + "github.com/kuoss/common/logger" + "github.com/kuoss/lethe/clock" + "github.com/kuoss/lethe/letheql" + "github.com/kuoss/lethe/letheql/model" + "github.com/kuoss/lethe/storage/logservice" + "github.com/kuoss/lethe/storage/querier" +) + +type QueryService struct { + engine *letheql.Engine + queryable *querier.LetheQueryable +} + +func New(logService *logservice.LogService) *QueryService { + return &QueryService{ + engine: letheql.NewEngine(logService), + queryable: &querier.LetheQueryable{ + LetheQuerier: &querier.LetheQuerier{}, + }, + } +} + +func (s *QueryService) Query(ctx context.Context, qs string, tr model.TimeRange) *letheql.Result { + if reflect.ValueOf(tr).IsZero() { + now := clock.Now() + tr = model.TimeRange{ + Start: now.Add(-1 * time.Minute), + End: now, + } + } + qry, err := s.engine.NewRangeQuery(ctx, s.queryable, qs, tr.Start, tr.End, 0) + if err != nil { + return &letheql.Result{Err: err} + } + res := qry.Exec(ctx) + if res.Err != nil { + logger.Errorf("exec err: %s", res.Err.Error()) + } + return res +} diff --git a/storage/queryservice/queryservice_test.go b/storage/queryservice/queryservice_test.go new file mode 100644 index 0000000..204c8f1 --- /dev/null +++ b/storage/queryservice/queryservice_test.go @@ -0,0 +1,52 @@ +package queryservice + +import ( + "context" + "fmt" + "testing" + + "github.com/kuoss/lethe/letheql" + "github.com/kuoss/lethe/letheql/model" + "github.com/kuoss/lethe/storage/logservice/logmodel" + "github.com/stretchr/testify/assert" +) + +func TestNew(t *testing.T) { + assert.NotEmpty(t, queryService) +} + +func TestQuery(t *testing.T) { + testCases := []struct { + qs string + tr model.TimeRange + wantError string + want *letheql.Result + }{ + { + `pod`, + model.TimeRange{}, + "getTargets err: target matcher err: not found label 'namespace' for logType 'pod'", + &letheql.Result{}, + }, + { + `pod{namespace="namespace01"}`, + model.TimeRange{}, + "", + &letheql.Result{Err: error(nil), Value: model.Log{Name: "pod", Lines: []model.LogLine{ + logmodel.PodLog{Time: "2009-11-10T22:59:00.000000Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: "lerom ipsum"}, + logmodel.PodLog{Time: "2009-11-10T22:59:00.000000Z", Namespace: "namespace01", Pod: "nginx-deployment-75675f5897-7ci7o", Container: "nginx", Log: "hello world"}}}, Warnings: model.Warnings(nil)}, + }, + } + for i, tc := range testCases { + t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { + res := queryService.Query(context.TODO(), tc.qs, tc.tr) + if tc.wantError == "" { + assert.NoError(t, res.Err) + } else { + assert.EqualError(t, res.Err, tc.wantError) + } + res.Err = nil + assert.Equal(t, tc.want, res) + }) + } +} diff --git a/testutil/testcase.go b/testutil/testcase.go deleted file mode 100644 index 6a73fc0..0000000 --- a/testutil/testcase.go +++ /dev/null @@ -1,15 +0,0 @@ -package testutil - -import ( - "fmt" - "runtime" - "strings" -) - -func TC() string { - _, file, line, _ := runtime.Caller(1) - if slash := strings.LastIndex(file, "/"); slash >= 0 { - file = file[slash+1:] - } - return fmt.Sprintf("TESTCASE:%s:%d", file, line) -} diff --git a/testutil/testcase_inner_test.go b/testutil/testcase_inner_test.go deleted file mode 100644 index a147845..0000000 --- a/testutil/testcase_inner_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package testutil - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestTC_inner(t *testing.T) { - testCases := []struct { - input string - want string - }{ - {TC(), "TESTCASE:testcase_inner_test.go:15"}, - {TC(), "TESTCASE:testcase_inner_test.go:16"}, - {TC(), "TESTCASE:testcase_inner_test.go:17"}, - } - for i, tc := range testCases { - t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { - assert.Equal(t, tc.want, tc.input) - }) - } -} diff --git a/testutil/testcase_outer_test.go b/testutil/testcase_outer_test.go deleted file mode 100644 index fc0bd9e..0000000 --- a/testutil/testcase_outer_test.go +++ /dev/null @@ -1,25 +0,0 @@ -package testutil_test - -import ( - "fmt" - "testing" - - "github.com/kuoss/lethe/testutil" - "github.com/stretchr/testify/assert" -) - -func TestTC_outer(t *testing.T) { - testCases := []struct { - input string - want string - }{ - {testutil.TC(), "TESTCASE:testcase_outer_test.go:16"}, - {testutil.TC(), "TESTCASE:testcase_outer_test.go:17"}, - {testutil.TC(), "TESTCASE:testcase_outer_test.go:18"}, - } - for i, tc := range testCases { - t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { - assert.Equal(t, tc.want, tc.input) - }) - } -} diff --git a/testutil/testenv.go b/testutil/testenv.go deleted file mode 100644 index 65d1e0b..0000000 --- a/testutil/testenv.go +++ /dev/null @@ -1,131 +0,0 @@ -package testutil - -import ( - "fmt" - "log" - "os" - "path" - "path/filepath" - - "runtime" - - "github.com/kuoss/common/logger" - "github.com/kuoss/lethe/config" -) - -const ( - POD = "pod" - NODE = "node" - namespace01 = "namespace01" - namespace02 = "namespace02" - node01 = "node01" - node02 = "node02" -) - -func Init() { - logRoot := filepath.Join(".", "tmp", "log") - os.Setenv("TEST_MODE", "1") - changeWorkingDirectoryToProjectRoot() - - err := config.LoadConfig() - if err != nil { - logger.Fatalf("error on LoadConfig: %s", err) - } - config.Viper().Set("retention.time", "3h") - config.Viper().Set("retention.size", "10m") - config.Viper().Set("retention.sizingStrategy", "files") - config.SetLimit(1000) - config.SetLogDataPath(logRoot) - - setenvIntialDiskAvailableBytes() - fmt.Println("Test environment initialized...") -} - -func setenvIntialDiskAvailableBytes() { - if os.Getenv("TEST_INITIAL_DISK_AVAILABLE_BYTES") != "" { - return - } - logDirectory := config.GetLogDataPath() - _ = os.MkdirAll(logDirectory, 0755) - avail, err := getDiskAvailableBytes(logDirectory) - if err != nil { - log.Fatal(err) - } - os.Setenv("TEST_INITIAL_DISK_AVAILABLE_BYTES", avail) -} - -func changeWorkingDirectoryToProjectRoot() { - _, filename, _, _ := runtime.Caller(0) - dir := path.Join(path.Dir(filename), "..") - err := os.Chdir(dir) - if err != nil { - log.Fatalf("cannot change directory to [%s]", dir) - } -} - -// func ClearTestLogFiles() { -// logDirectory := config.GetLogRoot() -// fmt.Printf("clear logDirectory: %s\n", logDirectory) -// err := os.RemoveAll(logDirectory) -// if err != nil { -// log.Fatalf("cannot remove logDirectory [%s]: %s", logDirectory, err) -// } -// os.MkdirAll(logDirectory, 0755) -// } - -func SetTestLogFiles() { - // ClearTestLogFiles() - logDirectory := config.GetLogDataPath() - logger.Infof("SetTestLogFiles: logDirectory=%s", logDirectory) - err := CopyRecursively("./testutil/log", logDirectory) - if err != nil { - logger.Errorf("error on CopyRecursively: %s", err) - } -} - -func CopyRecursively(src string, dest string) error { - logger.Infof("CopyRecursively... src=%s, dest=%s", src, dest) - - f, err := os.Open(src) - if err != nil { - return fmt.Errorf("error on Open: %w", err) - } - file, err := f.Stat() - if err != nil { - return fmt.Errorf("error on Stat: %w", err) - } - if !file.IsDir() { - return fmt.Errorf("src[%s] is not a dir", file.Name()) - } - err = os.MkdirAll(dest, 0755) - if err != nil { - return fmt.Errorf("error on MkdirAll: %w", err) - } - files, err := os.ReadDir(src) - if err != nil { - return fmt.Errorf("error on ReadDir: %w", err) - } - for _, f := range files { - srcFile := src + "/" + f.Name() - destFile := dest + "/" + f.Name() - // dir - if f.IsDir() { - err := CopyRecursively(srcFile, destFile) - if err != nil { - logger.Errorf("error on CopyRecursively: %s", err) - } - continue - } - // file - content, err := os.ReadFile(srcFile) - if err != nil { - logger.Errorf("error on ReadFile: %s", err) - continue - } - err = os.WriteFile(destFile, content, 0755) - if err != nil { - logger.Errorf("error on WriteFile: %s", err) - } - } - return nil -} diff --git a/testutil/testenv_disk_windows.go b/testutil/testenv_disk_windows.go deleted file mode 100644 index 4188ef4..0000000 --- a/testutil/testenv_disk_windows.go +++ /dev/null @@ -1,24 +0,0 @@ -//go:build windows -// +build windows - -package testutil - -import ( - "fmt" - // "golang.org/x/sys/windows" -) - -func getDiskAvailableBytes(path string) (string, error) { - fmt.Println("Currently getDiskAvailableBytes is not supported on Windows.") - return "99999999", nil - // var free, total, available uint64 - // pathPtr, err := windows.UTF16PtrFromString(path) - // if err != nil { - // return "", fmt.Errorf("cannot get utf16ptr from string [%s]: %s", path, err) - // } - // err = windows.GetDiskFreeSpaceEx(pathPtr, &free, &total, &available) - // if err != nil { - // return "", fmt.Errorf("cannot get disk free space for [%s]: %s", path, err) - // } - // return fmt.Sprintf("%d", available), nil -} diff --git a/testutil/testenv_test.go b/testutil/testenv_test.go deleted file mode 100644 index c40c484..0000000 --- a/testutil/testenv_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package testutil - -import ( - "os" - "testing" - - "github.com/kuoss/lethe/config" - "github.com/stretchr/testify/assert" -) - -func init() { - Init() -} - -func Test_Init(t *testing.T) { - testMode := os.Getenv("TEST_MODE") - if testMode != "1" { - t.Fatalf("TEST_MODE=[%s], not 1", testMode) - } -} - -func Test_SetTestLogFiles(t *testing.T) { - SetTestLogFiles() - - logDirectory := config.GetLogDataPath() - assert.DirExists(t, logDirectory) -} diff --git a/util/byte.go b/util/byte.go deleted file mode 100644 index ccd3587..0000000 --- a/util/byte.go +++ /dev/null @@ -1,23 +0,0 @@ -package util - -import ( - "fmt" - "strconv" -) - -func StringToBytes(str string) (int, error) { - unit := str[len(str)-1:] - num, err := strconv.Atoi(str[:len(str)-1]) - if err != nil { - return 0, err - } - switch unit { - case "k": - return num * 1024, nil - case "m": - return num * 1024 * 1024, nil - case "g": - return num * 1024 * 1024 * 1024, nil - } - return 0, fmt.Errorf("cannot accept unit '%s' in '%s''. allowed units: [k, m, g]", unit, str) -} diff --git a/util/byte_test.go b/util/byte_test.go deleted file mode 100644 index bf8a533..0000000 --- a/util/byte_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package util - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func Test_StringToBytes(t *testing.T) { - var out int - out, _ = StringToBytes("100k") - assert.Equal(t, 102400, out) - out, _ = StringToBytes("100m") - assert.Equal(t, 104857600, out) - out, _ = StringToBytes("100g") - assert.Equal(t, 107374182400, out) -} diff --git a/util/string.go b/util/string.go index b668810..45a93ad 100644 --- a/util/string.go +++ b/util/string.go @@ -1,6 +1,7 @@ package util import ( + "fmt" "strconv" "strings" ) @@ -27,3 +28,20 @@ func CountNewlines(s string) string { } return strconv.Itoa(n) } + +func StringToBytes(str string) (int, error) { + unit := str[len(str)-1:] + num, err := strconv.Atoi(str[:len(str)-1]) + if err != nil { + return 0, err + } + switch unit { + case "k": + return num * 1024, nil + case "m": + return num * 1024 * 1024, nil + case "g": + return num * 1024 * 1024 * 1024, nil + } + return 0, fmt.Errorf("cannot accept unit '%s' in '%s''. allowed units: [k, m, g]", unit, str) +} diff --git a/util/string_test.go b/util/string_test.go index a82141c..f2a3675 100644 --- a/util/string_test.go +++ b/util/string_test.go @@ -1,26 +1,49 @@ package util import ( + "fmt" "testing" "github.com/stretchr/testify/assert" ) -func Test_SubstrAfter(t *testing.T) { - assert.Equal(t, "orld", SubstrAfter("hello world", "w")) - assert.Equal(t, "world", SubstrAfter("hello world", " ")) - assert.Equal(t, "lo world", SubstrAfter("hello world", "l")) +func TestSubstrAfter(t *testing.T) { + testCases := []struct { + haystack string + needle string + want string + }{ + {"hello world", "w", "orld"}, + {"hello world", " ", "world"}, + {"hello world", "l", "lo world"}, + } + for i, tc := range testCases { + t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { + got := SubstrAfter(tc.haystack, tc.needle) + assert.Equal(t, tc.want, got) + }) + } } -func Test_SubstrBefore(t *testing.T) { +func TestSubstrBefore(t *testing.T) { assert.Equal(t, "hello ", SubstrBefore("hello world", "w")) assert.Equal(t, "hello", SubstrBefore("hello world", " ")) assert.Equal(t, "he", SubstrBefore("hello world", "l")) } -func Test_CountNewlines(t *testing.T) { +func TestCountNewlines(t *testing.T) { assert.Equal(t, "1", CountNewlines("hello world")) assert.Equal(t, "2", CountNewlines("hello\nworld")) assert.Equal(t, "2", CountNewlines("hello\nworld\n")) } + +func TestStringToBytes(t *testing.T) { + var out int + out, _ = StringToBytes("100k") + assert.Equal(t, 102400, out) + out, _ = StringToBytes("100m") + assert.Equal(t, 104857600, out) + out, _ = StringToBytes("100g") + assert.Equal(t, 107374182400, out) +} diff --git a/testutil/log/node/node01/2009-11-10_21.log b/util/testutil/log/node/node01/2009-11-10_21.log similarity index 100% rename from testutil/log/node/node01/2009-11-10_21.log rename to util/testutil/log/node/node01/2009-11-10_21.log diff --git a/testutil/log/node/node01/2009-11-10_22.log b/util/testutil/log/node/node01/2009-11-10_22.log similarity index 100% rename from testutil/log/node/node01/2009-11-10_22.log rename to util/testutil/log/node/node01/2009-11-10_22.log diff --git a/testutil/log/node/node02/2009-11-01_00.log b/util/testutil/log/node/node02/2009-11-01_00.log similarity index 100% rename from testutil/log/node/node02/2009-11-01_00.log rename to util/testutil/log/node/node02/2009-11-01_00.log diff --git a/testutil/log/node/node02/2009-11-10_21.log b/util/testutil/log/node/node02/2009-11-10_21.log similarity index 100% rename from testutil/log/node/node02/2009-11-10_21.log rename to util/testutil/log/node/node02/2009-11-10_21.log diff --git a/testutil/log/pod/namespace01/2000-01-01_00.log b/util/testutil/log/pod/namespace01/2000-01-01_00.log similarity index 100% rename from testutil/log/pod/namespace01/2000-01-01_00.log rename to util/testutil/log/pod/namespace01/2000-01-01_00.log diff --git a/testutil/log/pod/namespace01/2009-11-10_21.log b/util/testutil/log/pod/namespace01/2009-11-10_21.log similarity index 100% rename from testutil/log/pod/namespace01/2009-11-10_21.log rename to util/testutil/log/pod/namespace01/2009-11-10_21.log diff --git a/testutil/log/pod/namespace01/2009-11-10_22.log b/util/testutil/log/pod/namespace01/2009-11-10_22.log similarity index 100% rename from testutil/log/pod/namespace01/2009-11-10_22.log rename to util/testutil/log/pod/namespace01/2009-11-10_22.log diff --git a/testutil/log/pod/namespace01/2029-11-10_23.log b/util/testutil/log/pod/namespace01/2029-11-10_23.log similarity index 100% rename from testutil/log/pod/namespace01/2029-11-10_23.log rename to util/testutil/log/pod/namespace01/2029-11-10_23.log diff --git a/testutil/log/pod/namespace02/0000-00-00_00.log b/util/testutil/log/pod/namespace02/0000-00-00_00.log similarity index 100% rename from testutil/log/pod/namespace02/0000-00-00_00.log rename to util/testutil/log/pod/namespace02/0000-00-00_00.log diff --git a/testutil/log/pod/namespace02/2009-11-10_22.log b/util/testutil/log/pod/namespace02/2009-11-10_22.log similarity index 100% rename from testutil/log/pod/namespace02/2009-11-10_22.log rename to util/testutil/log/pod/namespace02/2009-11-10_22.log diff --git a/util/testutil/testutil.go b/util/testutil/testutil.go new file mode 100644 index 0000000..d49575a --- /dev/null +++ b/util/testutil/testutil.go @@ -0,0 +1,102 @@ +package testutil + +import ( + "fmt" + "os" + "path" + "path/filepath" + "runtime" + "strings" +) + +var root string + +func ChdirRoot() { + if root != "" { + fmt.Printf("ChdirRoot: %s (skipped)\n", root) + return + } + + _, filename, _, _ := runtime.Caller(0) + root = path.Join(path.Dir(filename), "../..") + fmt.Printf("ChdirRoot: %s\n", root) + err := os.Chdir(root) + if err != nil { + panic(err) + } +} + +func getTestID() string { + _, filename, _, _ := runtime.Caller(2) + if strings.HasSuffix(filename, "/init_test.go") { + return "init" + } + rel, err := filepath.Rel(root, filename) + if err != nil { + panic(err) + } + rel = strings.ReplaceAll(rel, ".go", "") + rel = strings.ReplaceAll(rel, string(os.PathSeparator), "_") + return rel +} + +func ResetLogData() { + testID := getTestID() + logDataPath := "tmp/" + testID + if testID != "init" { + fmt.Printf("remove logDataPath: %s\n", logDataPath) + err := os.RemoveAll(logDataPath) + if err != nil { + panic(err) + } + } + fmt.Printf("fill logDataPath: %s\n", logDataPath) + err := copyRecursively("./util/testutil/log", logDataPath) + if err != nil { + panic(err) + } +} + +func copyRecursively(src string, dest string) error { + f, err := os.Open(src) + if err != nil { + return fmt.Errorf("open err: %w", err) + } + file, err := f.Stat() + if err != nil { + return fmt.Errorf("stat err: %w", err) + } + if !file.IsDir() { + return fmt.Errorf("not dir: %s", file.Name()) + } + err = os.MkdirAll(dest, 0755) + if err != nil { + return fmt.Errorf("mkdirAll err: %w", err) + } + files, err := os.ReadDir(src) + if err != nil { + return fmt.Errorf("readDir err: %w", err) + } + for _, f := range files { + srcFile := src + "/" + f.Name() + destFile := dest + "/" + f.Name() + // dir + if f.IsDir() { + err := copyRecursively(srcFile, destFile) + if err != nil { + return fmt.Errorf("copyRecursively err: %w", err) + } + continue + } + // file + content, err := os.ReadFile(srcFile) + if err != nil { + return fmt.Errorf("readFile err: %w", err) + } + err = os.WriteFile(destFile, content, 0755) + if err != nil { + return fmt.Errorf("writeFile err: %w", err) + } + } + return nil +}