From c5e50bf2b96f9981b06d426ea713300c326b79fa Mon Sep 17 00:00:00 2001 From: krdhruva <51477511+krdhruva@users.noreply.github.com> Date: Wed, 20 May 2020 02:14:27 +0530 Subject: [PATCH] Adding Authz support for Azure Signed-off-by: Krupesh Dhruva --- .github/workflows/ci.yml | 26 +- .github/workflows/release.yml | 20 +- Dockerfile.dbg | 2 +- Dockerfile.in | 2 +- Makefile | 23 +- auth/providers/azure/azure.go | 15 +- authz/providers/azure/azure.go | 125 +++++++ authz/providers/azure/azure_test.go | 122 +++++++ authz/providers/azure/data/datastore.go | 139 +++++++ authz/providers/azure/options.go | 124 +++++++ .../azure/rbac/checkaccessreqhelper.go | 338 ++++++++++++++++++ .../azure/rbac/checkaccessreqhelper_test.go | 258 +++++++++++++ authz/providers/azure/rbac/rbac.go | 258 +++++++++++++ authz/providers/azure/rbac/rbac_test.go | 206 +++++++++++ authz/providers/providers.go | 66 ++++ authz/types.go | 57 +++ commands/installer.go | 16 +- commands/run.go | 8 +- commands/webhok_config.go | 81 +++-- docs/CHANGELOG.md | 8 + go.mod | 24 +- go.sum | 24 +- installer/deployment.go | 57 +-- installer/installer.go | 16 +- installer/options.go | 38 +- server/{recommended.go => authrecommended.go} | 10 +- server/authzhandler.go | 80 +++++ server/authzrecommended.go | 52 +++ server/handler.go | 19 +- server/handler_test.go | 6 +- server/prometheus.go | 15 +- server/server.go | 67 +++- server/utils.go | 43 +++ test/e2e/installer_test.go | 62 ++-- vendor/github.com/allegro/bigcache/.gitignore | 10 + .../github.com/allegro/bigcache/.travis.yml | 31 ++ vendor/github.com/allegro/bigcache/LICENSE | 201 +++++++++++ vendor/github.com/allegro/bigcache/README.md | 150 ++++++++ .../github.com/allegro/bigcache/bigcache.go | 202 +++++++++++ vendor/github.com/allegro/bigcache/bytes.go | 14 + .../allegro/bigcache/bytes_appengine.go | 7 + vendor/github.com/allegro/bigcache/clock.go | 14 + vendor/github.com/allegro/bigcache/config.go | 86 +++++ .../github.com/allegro/bigcache/encoding.go | 62 ++++ .../allegro/bigcache/entry_not_found_error.go | 6 + vendor/github.com/allegro/bigcache/fnv.go | 28 ++ vendor/github.com/allegro/bigcache/hash.go | 8 + .../github.com/allegro/bigcache/iterator.go | 122 +++++++ vendor/github.com/allegro/bigcache/logger.go | 30 ++ .../allegro/bigcache/queue/bytes_queue.go | 238 ++++++++++++ vendor/github.com/allegro/bigcache/shard.go | 259 ++++++++++++++ vendor/github.com/allegro/bigcache/stats.go | 15 + vendor/github.com/allegro/bigcache/utils.go | 16 + vendor/github.com/ghodss/yaml/yaml_go110.go | 14 + vendor/github.com/pkg/errors/Makefile | 44 +++ vendor/github.com/pkg/errors/go113.go | 38 ++ .../github.com/spf13/pflag/float32_slice.go | 174 +++++++++ .../github.com/spf13/pflag/float64_slice.go | 166 +++++++++ vendor/github.com/spf13/pflag/go.mod | 3 + vendor/github.com/spf13/pflag/go.sum | 0 vendor/github.com/spf13/pflag/int32_slice.go | 174 +++++++++ vendor/github.com/spf13/pflag/int64_slice.go | 166 +++++++++ .../github.com/spf13/pflag/string_to_int64.go | 149 ++++++++ vendor/modules.txt | 10 +- 64 files changed, 4655 insertions(+), 189 deletions(-) create mode 100644 authz/providers/azure/azure.go create mode 100644 authz/providers/azure/azure_test.go create mode 100644 authz/providers/azure/data/datastore.go create mode 100644 authz/providers/azure/options.go create mode 100644 authz/providers/azure/rbac/checkaccessreqhelper.go create mode 100644 authz/providers/azure/rbac/checkaccessreqhelper_test.go create mode 100644 authz/providers/azure/rbac/rbac.go create mode 100644 authz/providers/azure/rbac/rbac_test.go create mode 100644 authz/providers/providers.go create mode 100644 authz/types.go rename server/{recommended.go => authrecommended.go} (90%) create mode 100644 server/authzhandler.go create mode 100644 server/authzrecommended.go create mode 100644 vendor/github.com/allegro/bigcache/.gitignore create mode 100644 vendor/github.com/allegro/bigcache/.travis.yml create mode 100644 vendor/github.com/allegro/bigcache/LICENSE create mode 100644 vendor/github.com/allegro/bigcache/README.md create mode 100644 vendor/github.com/allegro/bigcache/bigcache.go create mode 100644 vendor/github.com/allegro/bigcache/bytes.go create mode 100644 vendor/github.com/allegro/bigcache/bytes_appengine.go create mode 100644 vendor/github.com/allegro/bigcache/clock.go create mode 100644 vendor/github.com/allegro/bigcache/config.go create mode 100644 vendor/github.com/allegro/bigcache/encoding.go create mode 100644 vendor/github.com/allegro/bigcache/entry_not_found_error.go create mode 100644 vendor/github.com/allegro/bigcache/fnv.go create mode 100644 vendor/github.com/allegro/bigcache/hash.go create mode 100644 vendor/github.com/allegro/bigcache/iterator.go create mode 100644 vendor/github.com/allegro/bigcache/logger.go create mode 100644 vendor/github.com/allegro/bigcache/queue/bytes_queue.go create mode 100644 vendor/github.com/allegro/bigcache/shard.go create mode 100644 vendor/github.com/allegro/bigcache/stats.go create mode 100644 vendor/github.com/allegro/bigcache/utils.go create mode 100644 vendor/github.com/ghodss/yaml/yaml_go110.go create mode 100644 vendor/github.com/pkg/errors/Makefile create mode 100644 vendor/github.com/pkg/errors/go113.go create mode 100644 vendor/github.com/spf13/pflag/float32_slice.go create mode 100644 vendor/github.com/spf13/pflag/float64_slice.go create mode 100644 vendor/github.com/spf13/pflag/go.mod create mode 100644 vendor/github.com/spf13/pflag/go.sum create mode 100644 vendor/github.com/spf13/pflag/int32_slice.go create mode 100644 vendor/github.com/spf13/pflag/int64_slice.go create mode 100644 vendor/github.com/spf13/pflag/string_to_int64.go diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 900572145..91311cb7d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,11 +1,12 @@ name: CI on: + pull_request: + branches: + - '*' push: - branches-ignore: - - 'release-*' - tags-ignore: - - '*.*' + branches: + - master jobs: @@ -14,28 +15,31 @@ jobs: runs-on: ubuntu-latest steps: - - name: Set up Go 1.13 - uses: actions/setup-go@v1 + - name: Set up Go 1.14 + uses: actions/setup-go@v2 with: - go-version: 1.13 + go-version: 1.14 id: go - - name: Check out code into the Go module directory - uses: actions/checkout@v1 + - uses: actions/checkout@v2 - name: Set up Docker Buildx id: buildx uses: crazy-max/ghaction-docker-buildx@v1 with: - version: latest + buildx-version: latest + qemu-version: latest - name: Available platforms run: echo ${{ steps.buildx.outputs.platforms }} - - name: Run checks + - name: Prepare Host run: | sudo apt-get -qq update || true sudo apt-get install -y bzr + + - name: Run checks + run: | make ci - name: Build diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 3cfa71e69..fef7a99d6 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -12,8 +12,7 @@ jobs: runs-on: ubuntu-latest steps: - - name: Check out code into the Go module directory - uses: actions/checkout@v1 + - uses: actions/checkout@v1 - name: Print version info id: semver @@ -24,7 +23,8 @@ jobs: id: buildx uses: crazy-max/ghaction-docker-buildx@v1 with: - version: latest + buildx-version: latest + qemu-version: latest - name: Available platforms run: echo ${{ steps.buildx.outputs.platforms }} @@ -38,6 +38,20 @@ jobs: docker login --username ${USERNAME} --password ${DOCKER_TOKEN} make release + - name: Release + uses: softprops/action-gh-release@v1 + if: startsWith(github.ref, 'refs/tags/') + with: + draft: true + files: | + bin/guard-linux-amd64 + bin/guard-linux-arm + bin/guard-linux-arm64 + bin/guard-windows-amd64.exe + bin/guard-darwin-amd64 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - uses: actions/upload-artifact@master with: name: guard-binaries diff --git a/Dockerfile.dbg b/Dockerfile.dbg index d17837fbc..3d4374484 100644 --- a/Dockerfile.dbg +++ b/Dockerfile.dbg @@ -14,6 +14,6 @@ FROM {ARG_FROM} -ADD bin/{ARG_OS}_{ARG_ARCH}/{ARG_BIN} /{ARG_BIN} +ADD bin/{ARG_BIN}-{ARG_OS}-{ARG_ARCH} /{ARG_BIN} ENTRYPOINT ["/{ARG_BIN}"] diff --git a/Dockerfile.in b/Dockerfile.in index 239a82234..cef1d353b 100644 --- a/Dockerfile.in +++ b/Dockerfile.in @@ -14,7 +14,7 @@ FROM {ARG_FROM} -ADD bin/{ARG_OS}_{ARG_ARCH}/{ARG_BIN} /{ARG_BIN} +ADD bin/{ARG_BIN}-{ARG_OS}-{ARG_ARCH} /{ARG_BIN} # This would be nicer as `nobody:nobody` but distroless has no such entries. USER 65535:65535 diff --git a/Makefile b/Makefile index e12f6fd19..7fb88d24e 100644 --- a/Makefile +++ b/Makefile @@ -47,7 +47,7 @@ endif ### These variables should not need tweaking. ### -SRC_PKGS := *.go auth commands docs installer server util +SRC_PKGS := *.go auth authz commands docs installer server util SRC_DIRS := $(SRC_PKGS) test hack/gendocs # directories which hold app source (not vendored) DOCKER_PLATFORMS := linux/amd64 linux/arm linux/arm64 @@ -67,12 +67,13 @@ TAG := $(VERSION)_$(OS)_$(ARCH) TAG_PROD := $(TAG) TAG_DBG := $(VERSION)-dbg_$(OS)_$(ARCH) -GO_VERSION ?= 1.13.8 +GO_VERSION ?= 1.14.2 BUILD_IMAGE ?= appscode/golang-dev:$(GO_VERSION) -OUTBIN = bin/$(OS)_$(ARCH)/$(BIN) +OUTBIN = bin/$(BIN)-$(OS)-$(ARCH) ifeq ($(OS),windows) - OUTBIN = bin/$(OS)_$(ARCH)/$(BIN).exe + OUTBIN := bin/$(BIN)-$(OS)-$(ARCH).exe + BIN := $(BIN).exe endif # Directories that we need created to build/test. @@ -189,7 +190,7 @@ $(OUTBIN): .go/$(OUTBIN).stamp " @if [ $(COMPRESS) = yes ] && [ $(OS) != darwin ]; then \ echo "compressing $(OUTBIN)"; \ - @docker run \ + docker run \ -i \ --rm \ -u $$(id -u):$$(id -g) \ @@ -201,11 +202,11 @@ $(OUTBIN): .go/$(OUTBIN).stamp --env HTTP_PROXY=$(HTTP_PROXY) \ --env HTTPS_PROXY=$(HTTPS_PROXY) \ $(BUILD_IMAGE) \ - upx --brute /go/$(OUTBIN); \ + upx --brute /go/bin/$(BIN); \ fi - @if ! cmp -s .go/$(OUTBIN) $(OUTBIN); then \ - mv .go/$(OUTBIN) $(OUTBIN); \ - date >$@; \ + @if ! cmp -s .go/bin/$(OS)_$(ARCH)/$(BIN) $(OUTBIN); then \ + mv .go/bin/$(OS)_$(ARCH)/$(BIN) $(OUTBIN); \ + date >$@; \ fi @echo @@ -213,7 +214,7 @@ $(OUTBIN): .go/$(OUTBIN).stamp DOTFILE_IMAGE = $(subst /,_,$(IMAGE))-$(TAG) container: bin/.container-$(DOTFILE_IMAGE)-PROD bin/.container-$(DOTFILE_IMAGE)-DBG -bin/.container-$(DOTFILE_IMAGE)-%: bin/$(OS)_$(ARCH)/$(BIN) $(DOCKERFILE_%) +bin/.container-$(DOTFILE_IMAGE)-%: $(OUTBIN) $(DOCKERFILE_%) @echo "container: $(IMAGE):$(TAG_$*)" @sed \ -e 's|{ARG_BIN}|$(BIN)|g' \ @@ -324,7 +325,7 @@ lint: $(BUILD_DIRS) --env HTTP_PROXY=$(HTTP_PROXY) \ --env HTTPS_PROXY=$(HTTPS_PROXY) \ --env GO111MODULE=on \ - --env GOFLAGS="-mod=vendor" \ + --env GOFLAGS="-mod=vendor" \ $(BUILD_IMAGE) \ golangci-lint run --enable $(ADDTL_LINTERS) --deadline=10m --skip-files="generated.*\.go$\" --skip-dirs-use-default --skip-dirs=client,vendor diff --git a/auth/providers/azure/azure.go b/auth/providers/azure/azure.go index 1b176471e..1cff909b3 100644 --- a/auth/providers/azure/azure.go +++ b/auth/providers/azure/azure.go @@ -22,6 +22,7 @@ import ( "fmt" "io/ioutil" "net/http" + "strings" "github.com/appscode/guard/auth" "github.com/appscode/guard/auth/providers/azure/graph" @@ -161,6 +162,7 @@ func (s Authenticator) Check(token string) (*authv1.UserInfo, error) { if err != nil { return nil, err } + if s.Options.ResolveGroupMembershipOnlyOnOverageClaim { groups, skipGraphAPI, err := getGroupsAndCheckOverage(claims) if err != nil { @@ -284,7 +286,11 @@ func (c claims) getUserInfo(usernameClaim, userObjectIDClaim string) (*authv1.Us return nil, errors.Wrap(err, "unable to get username claim") } - return &authv1.UserInfo{Username: username}, nil + useroid, _ := c.string(userObjectIDClaim) + + return &authv1.UserInfo{ + Username: username, + Extra: map[string]authv1.ExtraValue{"oid": {useroid}}}, nil } // String gets a string value from claims given a key. Returns error if @@ -316,9 +322,14 @@ func getAuthInfo(environment, tenantID string, getMetadata func(string, string) return nil, errors.Wrap(err, "failed to get metadata for azure") } + msgraphHost := metadata.MsgraphHost + if strings.EqualFold(azure.USGovernmentCloud.Name, environment) { + msgraphHost = "graph.microsoft.us" + } + return &authInfo{ AADEndpoint: env.ActiveDirectoryEndpoint, - MSGraphHost: metadata.MsgraphHost, + MSGraphHost: msgraphHost, Issuer: metadata.Issuer, }, nil } diff --git a/authz/providers/azure/azure.go b/authz/providers/azure/azure.go new file mode 100644 index 000000000..7936acf20 --- /dev/null +++ b/authz/providers/azure/azure.go @@ -0,0 +1,125 @@ +/* +Copyright The Guard Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package azure + +import ( + "strings" + + "github.com/Azure/go-autorest/autorest/azure" + auth "github.com/appscode/guard/auth/providers/azure" + "github.com/appscode/guard/authz" + "github.com/appscode/guard/authz/providers/azure/rbac" + "github.com/golang/glog" + "github.com/pkg/errors" + authzv1 "k8s.io/api/authorization/v1" +) + +const ( + OrgType = "azure" +) + +func init() { + authz.SupportedOrgs = append(authz.SupportedOrgs, OrgType) +} + +type Authorizer struct { + rbacClient *rbac.AccessInfo +} + +type authzInfo struct { + AADEndpoint string + ARMEndPoint string +} + +func New(opts Options, authopts auth.Options, dataStore authz.Store) (authz.Interface, error) { + c := &Authorizer{} + + authzInfoVal, err := getAuthzInfo(authopts.Environment) + if err != nil { + return nil, errors.Wrap(err, "Error in getAuthzInfo %s") + } + + switch opts.AuthzMode { + case ARCAuthzMode: + c.rbacClient, err = rbac.New(authopts.ClientID, authopts.ClientSecret, authopts.TenantID, authzInfoVal.AADEndpoint, authzInfoVal.ARMEndPoint, opts.AuthzMode, opts.ResourceId, opts.ARMCallLimit, dataStore, opts.SkipAuthzCheck, opts.AuthzResolveGroupMemberships, opts.SkipAuthzForNonAADUsers) + case AKSAuthzMode: + c.rbacClient, err = rbac.NewWithAKS(opts.AKSAuthzURL, authopts.TenantID, authzInfoVal.ARMEndPoint, opts.AuthzMode, opts.ResourceId, opts.ARMCallLimit, dataStore, opts.SkipAuthzCheck, opts.AuthzResolveGroupMemberships, opts.SkipAuthzForNonAADUsers) + } + + if err != nil { + return nil, errors.Wrap(err, "failed to create ms rbac client") + } + return c, nil +} + +func (s Authorizer) Check(request *authzv1.SubjectAccessReviewSpec) (*authzv1.SubjectAccessReviewStatus, error) { + if request == nil { + return nil, errors.New("subject access review is nil") + } + + // check if user is service account + if strings.HasPrefix(strings.ToLower(request.User), "system") { + glog.V(3).Infof("returning no op to service accounts") + return &authzv1.SubjectAccessReviewStatus{Allowed: false, Reason: rbac.NoOpinionVerdict}, nil + } + + if _, ok := request.Extra["oid"]; !ok { + if s.rbacClient.ShouldSkipAuthzCheckForNonAADUsers() { + glog.V(3).Infof("Skip RBAC is set for non AAD users. Returning no opinion for user %s. You may observe this for AAD users for 'can-i' requests.", request.User) + return &authzv1.SubjectAccessReviewStatus{Allowed: false, Reason: rbac.NoOpinionVerdict}, nil + } else { + glog.V(3).Infof("Skip RBAC for non AAD user is not set. Returning deny access for non AAD user %s. You may observe this for AAD users for 'can-i' requests.", request.User) + return &authzv1.SubjectAccessReviewStatus{Allowed: false, Denied: true, Reason: rbac.NotAllowedForNonAADUsers}, nil + } + } + + if s.rbacClient.SkipAuthzCheck(request) { + glog.V(3).Infof("user %s is part of skip authz list. returning no op.", request.User) + return &authzv1.SubjectAccessReviewStatus{Allowed: false, Reason: rbac.NoOpinionVerdict}, nil + } + + exist, result := s.rbacClient.GetResultFromCache(request) + if exist { + if result { + glog.V(3).Infof("cache hit: returning allowed to user") + return &authzv1.SubjectAccessReviewStatus{Allowed: result, Reason: rbac.AccessAllowedVerdict}, nil + } else { + glog.V(3).Infof("cache hit: returning denied to user") + return &authzv1.SubjectAccessReviewStatus{Allowed: result, Denied: true, Reason: rbac.AccessNotAllowedVerdict}, nil + } + } + + if s.rbacClient.IsTokenExpired() { + s.rbacClient.RefreshToken() + } + return s.rbacClient.CheckAccess(request) +} + +func getAuthzInfo(environment string) (*authzInfo, error) { + var err error + env := azure.PublicCloud + if environment != "" { + env, err = azure.EnvironmentFromName(environment) + if err != nil { + return nil, errors.Wrap(err, "failed to parse environment for azure") + } + } + + return &authzInfo{ + AADEndpoint: env.ActiveDirectoryEndpoint, + ARMEndPoint: env.ResourceManagerEndpoint, + }, nil +} diff --git a/authz/providers/azure/azure_test.go b/authz/providers/azure/azure_test.go new file mode 100644 index 000000000..de3811e24 --- /dev/null +++ b/authz/providers/azure/azure_test.go @@ -0,0 +1,122 @@ +/* +Copyright The Guard Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure + +import ( + "net" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/appscode/guard/authz/providers/azure/data" + "github.com/appscode/guard/authz/providers/azure/rbac" + "github.com/appscode/pat" + "github.com/stretchr/testify/assert" + authzv1 "k8s.io/api/authorization/v1" +) + +const ( + loginResp = `{ "token_type": "Bearer", "expires_in": 8459, "access_token": "%v"}` +) + +func clientSetup(serverUrl, mode string) (*Authorizer, error) { + c := &Authorizer{} + + var testOptions = data.Options{ + HardMaxCacheSize: 1, + Shards: 1, + LifeWindow: 1 * time.Minute, + CleanWindow: 1 * time.Minute, + MaxEntriesInWindow: 10, + MaxEntrySize: 5, + Verbose: false, + } + dataStore, err := data.NewDataStore(testOptions) + if err != nil { + return nil, err + } + + c.rbacClient, err = rbac.New("client_id", "client_secret", "tenant_id", serverUrl+"/login/", serverUrl+"/arm/", mode, "resourceId", 2000, dataStore, []string{"alpha, tango, charlie"}, true, true) + if err != nil { + return nil, err + } + + return c, nil +} + +func serverSetup(loginResp, checkaccessResp string, loginStatus, checkaccessStatus int) (*httptest.Server, error) { + listener, err := net.Listen("tcp", "127.0.0.1:") + if err != nil { + return nil, err + } + + m := pat.New() + + m.Post("/login/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(loginStatus) + _, _ = w.Write([]byte(loginResp)) + })) + + m.Post("/arm/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(checkaccessStatus) + _, _ = w.Write([]byte(checkaccessResp)) + })) + + srv := &httptest.Server{ + Listener: listener, + Config: &http.Server{Handler: m}, + } + srv.Start() + + return srv, nil +} + +func getServerAndClient(t *testing.T, loginResp, checkaccessResp string) (*httptest.Server, *Authorizer) { + srv, err := serverSetup(loginResp, checkaccessResp, http.StatusOK, http.StatusOK) + if err != nil { + t.Fatalf("Error when creating server, reason: %v", err) + } + + client, err := clientSetup(srv.URL, "arc") + if err != nil { + t.Fatalf("Error when creatidng azure client. reason : %v", err) + } + return srv, client +} + +func TestCheck(t *testing.T) { + t.Run("successful request", func(t *testing.T) { + var validBody = `[{"accessDecision":"Allowed", + "actionId":"Microsoft.Kubernetes/connectedClusters/pods/delete", + "isDataAction":true,"roleAssignment":null,"denyAssignment":null,"timeToLiveInMs":300000}]` + + srv, client := getServerAndClient(t, loginResp, validBody) + defer srv.Close() + + request := &authzv1.SubjectAccessReviewSpec{ + User: "beta@bing.com", + ResourceAttributes: &authzv1.ResourceAttributes{Namespace: "dev", Group: "", Resource: "pods", + Subresource: "status", Version: "v1", Name: "test", Verb: "delete"}, Extra: map[string]authzv1.ExtraValue{"oid": {"00000000-0000-0000-0000-000000000000"}}} + + resp, err := client.Check(request) + assert.Nilf(t, err, "Should not have got error") + assert.NotNil(t, resp) + assert.Equal(t, resp.Allowed, true) + assert.Equal(t, resp.Denied, false) + }) +} diff --git a/authz/providers/azure/data/datastore.go b/authz/providers/azure/data/datastore.go new file mode 100644 index 000000000..c0dfd1342 --- /dev/null +++ b/authz/providers/azure/data/datastore.go @@ -0,0 +1,139 @@ +package data + +import ( + "encoding/json" + "errors" + "time" + + "github.com/allegro/bigcache" + "github.com/appscode/guard/authz" +) + +const ( + maxCacheSizeInMB = 5 + totalShards = 128 + ttlInMins = 5 + cleanupInMins = 2 + maxEntrySize = 100 + maxEntriesInWin = 10 * 10 * 60 +) + +type DataStore struct { + cache *bigcache.BigCache +} + +// Set stores the given value for the given key. +// The key must not be "" and the value must not be nil. +func (s *DataStore) Set(key string, value interface{}) error { + if key == "" || value == nil { + return errors.New("invalid key value pair") + } + + data, err := json.Marshal(value) + if err != nil { + return err + } + return s.cache.Set(key, data) +} + +// Get retrieves the Stored value for the given key. +// If no value is found it returns (false, nil). +// The key must not be "" and the pointer must not be nil. +func (s *DataStore) Get(key string, value interface{}) (found bool, err error) { + if key == "" || value == nil { + return false, errors.New("invalid key value pair") + } + + data, err := s.cache.Get(key) + if err != nil { + return false, err + } + + return true, json.Unmarshal(data, value) +} + +// Delete deletes the stored value for the given key. +// The key must not be "". +func (s *DataStore) Delete(key string) error { + if key == "" { + return errors.New("invalid key") + } + + err := s.cache.Delete(key) + if err != nil { + return err + } + + return nil +} + +// Close closes the DataStore. +// When called, the cache is left for removal by the garbage collector. +func (s *DataStore) Close() error { + return s.cache.Close() +} + +// Options are the options for the BigCache store. +type Options struct { + // Number of cache shards, value must be a power of two + Shards int + // Time after which entry can be evicted + LifeWindow time.Duration + // Interval between removing expired entries (clean up). + // If set to <= 0 then no action is performed. Setting to < 1 second is counterproductive — bigcache has a one second resolution. + CleanWindow time.Duration + // Max number of entries in life window. Used only to calculate initial size for cache shards. + // When proper value is set then additional memory allocation does not occur. + MaxEntriesInWindow int + // Max size of entry in bytes. Used only to calculate initial size for cache shards. + MaxEntrySize int + // StatsEnabled if true calculate the number of times a cached resource was requested. + StatsEnabled bool + // Verbose mode prints information about new memory allocation + Verbose bool + // HardMaxCacheSize is a limit for cache size in MB. Cache will not allocate more memory than this limit. + // It can protect application from consuming all available memory on machine, therefore from running OOM Killer. + // Default value is 0 which means unlimited size. When the limit is higher than 0 and reached then + // the oldest entries are overridden for the new ones. + HardMaxCacheSize int +} + +// DefaultOptions is an Options object with default values. +// Bigcache provides option to give hash function however we are going with default it uses +// FNV 1a: https://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function#FNV-1a_hash +// Key : email address/oid - Max length of email is 264 chars but 95% email length is 31 +// Value: result bool +// true means access allowed +// false means access denied +// We will tweak MaxEntrySize and MaxEntriesInWindows as per requirement and testing. +var DefaultOptions = Options{ + HardMaxCacheSize: maxCacheSizeInMB, + Shards: totalShards, + LifeWindow: ttlInMins * time.Minute, + CleanWindow: cleanupInMins * time.Minute, + MaxEntriesInWindow: maxEntriesInWin, + MaxEntrySize: maxEntrySize, + Verbose: false, +} + +// NewDataStore creates a BigCache store. +func NewDataStore(options Options) (authz.Store, error) { + config := bigcache.Config{ + Shards: options.Shards, + LifeWindow: options.LifeWindow, + CleanWindow: options.CleanWindow, + MaxEntriesInWindow: options.MaxEntriesInWindow, + MaxEntrySize: options.MaxEntriesInWindow, + Verbose: options.Verbose, + HardMaxCacheSize: options.HardMaxCacheSize, + OnRemove: nil, + OnRemoveWithReason: nil, + } + + cache, err := bigcache.NewBigCache(config) + if err != nil || cache == nil { + return nil, err + } + + return &DataStore{cache: cache}, nil +} diff --git a/authz/providers/azure/options.go b/authz/providers/azure/options.go new file mode 100644 index 000000000..180eb3bcd --- /dev/null +++ b/authz/providers/azure/options.go @@ -0,0 +1,124 @@ +/* +Copyright The Guard Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure + +import ( + "fmt" + "strings" + + "github.com/appscode/guard/auth/providers/azure" + "github.com/pkg/errors" + "github.com/spf13/pflag" + apps "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +const ( + AKSAuthzMode = "aks" + ARCAuthzMode = "arc" + defaultArmCallLimit = 2000 + maxPermissibleArmCallLimit = 4000 +) + +type Options struct { + AuthzMode string + ResourceId string + AKSAuthzURL string + ARMCallLimit int + SkipAuthzCheck []string + AuthzResolveGroupMemberships bool + SkipAuthzForNonAADUsers bool +} + +func NewOptions() Options { + return Options{} +} + +func (o *Options) AddFlags(fs *pflag.FlagSet) { + fs.StringVar(&o.AuthzMode, "azure.authz-mode", "", "authz mode to call RBAC api, valid value is either aks or arc") + fs.StringVar(&o.ResourceId, "azure.resource-id", "", "azure cluster resource id (//subscription//resourcegroups//providers/Microsoft.ContainerService/managedClusters/ for AKS or //subscription//resourcegroups//providers/Microsoft.Kubernetes/connectedClusters/ for arc) to be used as scope for RBAC check") + fs.StringVar(&o.AKSAuthzURL, "azure.aks-authz-url", "", "url to call for AKS Authz flow") + fs.IntVar(&o.ARMCallLimit, "azure.arm-call-limit", defaultArmCallLimit, "No of calls before which webhook switch to new ARM instance to avoid throttling") + fs.StringSliceVar(&o.SkipAuthzCheck, "azure.skip-authz-check", []string{""}, "name of usernames/email for which authz check will be skipped") + fs.BoolVar(&o.AuthzResolveGroupMemberships, "azure.authz-resolve-group-memberships", o.AuthzResolveGroupMemberships, "set to true to resolve group membership by authorizer. Setting to false will use group list from subjectaccessreview request") + fs.BoolVar(&o.SkipAuthzForNonAADUsers, "azure.skip-authz-for-non-aad-users", o.SkipAuthzForNonAADUsers, "skip authz for non AAD users") +} + +func (o *Options) Validate(azure azure.Options) []error { + var errs []error + o.AuthzMode = strings.ToLower(o.AuthzMode) + switch o.AuthzMode { + case AKSAuthzMode: + case ARCAuthzMode: + case "": + default: + errs = append(errs, errors.New("invalid azure.authz-mode. valid value is either aks or arc")) + } + + if o.AuthzMode != "" && o.ResourceId == "" { + errs = append(errs, errors.New("azure.resource-id must be non-empty for authorization")) + } + + if o.AuthzMode == AKSAuthzMode && o.AKSAuthzURL == "" { + errs = append(errs, errors.New("azure.aks-authz-url must be non-empty")) + } + + if o.AuthzMode != AKSAuthzMode && o.AKSAuthzURL != "" { + errs = append(errs, errors.New("azure.aks-authz-url must be set only with AKS authz mode")) + } + + if o.AuthzMode == ARCAuthzMode { + if azure.ClientSecret == "" { + errs = append(errs, errors.New("azure.client-secret must be non-empty")) + } + if azure.ClientID == "" { + errs = append(errs, errors.New("azure.client-id must be non-empty")) + } + } + + if o.ARMCallLimit > maxPermissibleArmCallLimit { + errs = append(errs, errors.New("azure.arm-call-limit must not be more than 4000")) + } + + return errs +} + +func (o Options) Apply(d *apps.Deployment) (extraObjs []runtime.Object, err error) { + container := d.Spec.Template.Spec.Containers[0] + args := container.Args + switch o.AuthzMode { + case AKSAuthzMode: + fallthrough + case ARCAuthzMode: + args = append(args, fmt.Sprintf("--azure.authz-mode=%s", o.AuthzMode)) + args = append(args, fmt.Sprintf("--azure.resource-id=%s", o.ResourceId)) + args = append(args, fmt.Sprintf("--azure.arm-call-limit=%d", o.ARMCallLimit)) + } + + if o.AKSAuthzURL != "" { + args = append(args, fmt.Sprintf("--azure.aks-authz-url=%s", o.AKSAuthzURL)) + } + + if len(o.SkipAuthzCheck) > 0 { + args = append(args, fmt.Sprintf("--azure.skip-authz-check=%s", strings.Join(o.SkipAuthzCheck, ","))) + } + + args = append(args, fmt.Sprintf("--azure.authz-resolve-group-memberships=%t", o.AuthzResolveGroupMemberships)) + + args = append(args, fmt.Sprintf("--azure.skip-authz-for-non-aad-users=%t", o.SkipAuthzForNonAADUsers)) + + container.Args = args + d.Spec.Template.Spec.Containers[0] = container + return extraObjs, nil +} diff --git a/authz/providers/azure/rbac/checkaccessreqhelper.go b/authz/providers/azure/rbac/checkaccessreqhelper.go new file mode 100644 index 000000000..557400a33 --- /dev/null +++ b/authz/providers/azure/rbac/checkaccessreqhelper.go @@ -0,0 +1,338 @@ +/* +Copyright The Guard Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package rbac + +import ( + "encoding/json" + "path" + "strings" + + "github.com/golang/glog" + "github.com/google/uuid" + "github.com/pkg/errors" + authzv1 "k8s.io/api/authorization/v1" +) + +const ( + AccessAllowedVerdict = "Access allowed" + Allowed = "allowed" + AccessNotAllowedVerdict = "User does not have access to the resource in Azure. Update role assignment to allow access." + namespaces = "namespaces" + NotAllowedForNonAADUsers = "Access denied by Azure RBAC for non AAD users. Configure --azure.skip-authz-for-non-aad-users to enable access." + NoOpinionVerdict = "Azure does not have opinion for this user." +) + +type SubjectInfoAttributes struct { + ObjectId string `json:"ObjectId"` + Groups []string `json:"Groups,omitempty"` + RetrieveGroupMemberships bool `json:"xms-pasrp-retrievegroupmemberships"` +} + +type SubjectInfo struct { + Attributes SubjectInfoAttributes `json:"Attributes"` +} + +type AuthorizationEntity struct { + Id string `json:"Id"` +} + +type AuthorizationActionInfo struct { + AuthorizationEntity + IsDataAction bool `json:"IsDataAction"` +} + +type CheckAccessRequest struct { + Subject SubjectInfo `json:"Subject"` + Actions []AuthorizationActionInfo `json:"Actions"` + Resource AuthorizationEntity `json:"Resource"` +} + +type AccessDecision struct { + Decision string `json:"accessDecision"` +} + +type RoleAssignment struct { + Id string `json:"id"` + RoleDefinitionId string `json:"roleDefinitionId"` + PrincipalId string `json:"principalId"` + PrincipalType string `json:"principalType"` + Scope string `json:"scope"` + Condition string `json:"condition"` + ConditionVersion string `json:"conditionVersion"` + CanDelegate bool `json:"canDelegate"` +} + +type AzureRoleAssignment struct { + DelegatedManagedIdentityResourceId string `json:"delegatedManagedIdentityResourceId"` + RoleAssignment +} + +type Permission struct { + Actions []string `json:"actions,omitempty"` + NoActions []string `json:"noactions,omitempty"` + DataActions []string `json:"dataactions,omitempty"` + NoDataActions []string `json:"nodataactions,omitempty"` + Condition string `json:"condition"` + ConditionVersion string `json:"conditionVersion"` +} + +type Principal struct { + Id string `json:"id"` + Type string `json:"type"` +} + +type DenyAssignment struct { + Id string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + Permission + Scope string `json:"scope"` + DoNotApplyToChildScopes bool `json:"doNotApplyToChildScopes"` + principals []Principal + excludeprincipals []Principal + Condition string `json:"condition"` + ConditionVersion string `json:"conditionVersion"` +} + +type AzureDenyAssignment struct { + MetaData map[string]interface{} `json:"metadata"` + IsSystemProtected string `json:"isSystemProtected"` + IsBuiltIn bool `json:isBuiltIn` + DenyAssignment +} + +type AuthorizationDecision struct { + Decision string `json:"accessDecision"` + ActionId string `json:"actionId"` + IsDataAction bool `json:"isDataAction"` + AzureRoleAssignment AzureRoleAssignment `json:"roleAssignment"` + AzureDenyAssignment AzureDenyAssignment `json:"denyAssignment"` + TimeToLiveInMs int `json:"timeToLiveInMs"` +} + +func getScope(resourceId string, attr *authzv1.ResourceAttributes) string { + if attr != nil && attr.Namespace != "" { + return path.Join(resourceId, namespaces, attr.Namespace) + } + return resourceId +} + +func isValidUUID(u string) bool { + _, err := uuid.Parse(u) + return err == nil +} + +func getValidSecurityGroups(groups []string) []string { + var finalGroups []string + for _, element := range groups { + if isValidUUID(element) { + finalGroups = append(finalGroups, element) + } + } + return finalGroups +} + +func getActionName(verb string) string { + /* special verbs + use verb on podsecuritypolicies resources in the policy API group + bind and escalate verbs on roles and clusterroles resources in the rbac.authorization.k8s.io API group + impersonate verb on users, groups, and serviceaccounts in the core API group + userextras in the authentication.k8s.io API group + + https://kubernetes.io/docs/reference/access-authn-authz/authorization/#determine-the-request-verb + */ + switch verb { + case "get": + fallthrough + case "list": + fallthrough + case "watch": + return "read" + + case "bind": + return "bind/action" + case "escalate": + return "escalate/action" + case "use": + return "use/action" + case "impersonate": + return "impersonate/action" + + case "create": + fallthrough //instead of action create will be mapped to write + case "patch": + fallthrough + case "update": + return "write" + + case "delete": + fallthrough + case "deletecollection": // TODO: verify scenario + return "delete" + default: + return "" + } +} + +func getDataAction(subRevReq *authzv1.SubjectAccessReviewSpec, clusterType string) AuthorizationActionInfo { + authInfo := AuthorizationActionInfo{ + IsDataAction: true} + + authInfo.AuthorizationEntity.Id = clusterType + if subRevReq.ResourceAttributes != nil { + if subRevReq.ResourceAttributes.Group != "" { + authInfo.AuthorizationEntity.Id = path.Join(authInfo.AuthorizationEntity.Id, subRevReq.ResourceAttributes.Group) + } + authInfo.AuthorizationEntity.Id = path.Join(authInfo.AuthorizationEntity.Id, subRevReq.ResourceAttributes.Resource, getActionName(subRevReq.ResourceAttributes.Verb)) + } else if subRevReq.NonResourceAttributes != nil { + authInfo.AuthorizationEntity.Id = path.Join(authInfo.AuthorizationEntity.Id, subRevReq.NonResourceAttributes.Path, getActionName(subRevReq.NonResourceAttributes.Verb)) + } + return authInfo +} + +func getResultCacheKey(subRevReq *authzv1.SubjectAccessReviewSpec) string { + cacheKey := subRevReq.User + + if subRevReq.ResourceAttributes != nil { + if subRevReq.ResourceAttributes.Namespace != "" { + cacheKey = path.Join(cacheKey, subRevReq.ResourceAttributes.Namespace) + } + if subRevReq.ResourceAttributes.Group != "" { + cacheKey = path.Join(cacheKey, subRevReq.ResourceAttributes.Group) + } + cacheKey = path.Join(cacheKey, subRevReq.ResourceAttributes.Resource, getActionName(subRevReq.ResourceAttributes.Verb)) + } else if subRevReq.NonResourceAttributes != nil { + cacheKey = path.Join(cacheKey, subRevReq.NonResourceAttributes.Path, getActionName(subRevReq.NonResourceAttributes.Verb)) + } + + return cacheKey +} + +func prepareCheckAccessRequestBody(req *authzv1.SubjectAccessReviewSpec, clusterType, resourceId string, retrieveGroupMemberships bool) (*CheckAccessRequest, error) { + /* This is how sample SubjectAccessReview request will look like + { + "kind": "SubjectAccessReview", + "apiVersion": "authorization.k8s.io/v1beta1", + "metadata": { + "creationTimestamp": null + }, + "spec": { + "resourceAttributes": { + "namespace": "default", + "verb": "get", + "group": "extensions", + "version": "v1beta1", + "resource": "deployments", + "name": "obo-deploy" + }, + "user": "user@contoso.com", + "extra": { + "oid": [ + "62103f2e-051d-48cc-af47-b1ff3deec630" + ] + } + }, + "status": { + "allowed": false + } + } + + For check access it will be converted into following request for arc cluster: + { + "Subject": { + "Attributes": { + "ObjectId": "62103f2e-051d-48cc-af47-b1ff3deec630", + "xms-pasrp-retrievegroupmemberships": true + } + }, + "Actions": [ + { + "Id": "Microsoft.Kubernetes/connectedClusters/extensions/deployments/read", + "IsDataAction": true + } + ], + "Resource": { + "Id": "/namespaces/" + } + } + */ + checkaccessreq := CheckAccessRequest{} + var userOid string + if oid, ok := req.Extra["oid"]; ok { + val := oid.String() + userOid = val[1 : len(val)-1] + } else { + return nil, errors.New("oid info not sent from authentication module") + } + + if isValidUUID(userOid) { + checkaccessreq.Subject.Attributes.ObjectId = userOid + } else { + return nil, errors.New("oid info sent from authentication module is not valid") + } + + if !retrieveGroupMemberships { + groups := getValidSecurityGroups(req.Groups) + checkaccessreq.Subject.Attributes.Groups = groups + } + + checkaccessreq.Subject.Attributes.RetrieveGroupMemberships = retrieveGroupMemberships + action := make([]AuthorizationActionInfo, 1) + action[0] = getDataAction(req, clusterType) + checkaccessreq.Actions = action + checkaccessreq.Resource.Id = getScope(resourceId, req.ResourceAttributes) + + return &checkaccessreq, nil +} + +func getNameSpaceScope(req *authzv1.SubjectAccessReviewSpec) (bool, string) { + var namespace string = "" + if req.ResourceAttributes != nil && req.ResourceAttributes.Namespace != "" { + namespace = path.Join(namespaces, req.ResourceAttributes.Namespace) + return true, namespace + } + return false, namespace +} + +func ConvertCheckAccessResponse(body []byte) (*authzv1.SubjectAccessReviewStatus, error) { + var ( + response []AuthorizationDecision + allowed bool + denied bool + verdict string + ) + err := json.Unmarshal(body, &response) + if err != nil { + glog.V(10).Infof("Failed to parse checkacccess response. Error:%s", err.Error()) + return nil, errors.Wrap(err, "Error in unmarshalling check access response.") + } + + if glog.V(10) { + binaryData, _ := json.MarshalIndent(response, "", " ") + glog.Infof("check access response:%s", binaryData) + } + + if strings.ToLower(response[0].Decision) == Allowed { + allowed = true + verdict = AccessAllowedVerdict + } else { + allowed = false + denied = true + verdict = AccessNotAllowedVerdict + } + + return &authzv1.SubjectAccessReviewStatus{Allowed: allowed, Reason: verdict, Denied: denied}, nil +} diff --git a/authz/providers/azure/rbac/checkaccessreqhelper_test.go b/authz/providers/azure/rbac/checkaccessreqhelper_test.go new file mode 100644 index 000000000..4f4b3efd3 --- /dev/null +++ b/authz/providers/azure/rbac/checkaccessreqhelper_test.go @@ -0,0 +1,258 @@ +/* +Copyright The Guard Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rbac + +import ( + "reflect" + "testing" + + "github.com/pkg/errors" + authzv1 "k8s.io/api/authorization/v1" +) + +func Test_getScope(t *testing.T) { + type args struct { + resourceId string + attr *authzv1.ResourceAttributes + } + tests := []struct { + name string + args args + want string + }{ + {"nilAttr", args{"resourceId", nil}, "resourceId"}, + {"bothnil", args{"", nil}, ""}, + {"emptyRes", args{"", &authzv1.ResourceAttributes{Namespace: ""}}, ""}, + {"emptyNS", args{"resourceId", &authzv1.ResourceAttributes{Namespace: ""}}, "resourceId"}, + {"bothPresent", args{"resourceId", &authzv1.ResourceAttributes{Namespace: "test"}}, "resourceId/namespaces/test"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := getScope(tt.args.resourceId, tt.args.attr); got != tt.want { + t.Errorf("getScope() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_getValidSecurityGroups(t *testing.T) { + type args struct { + groups []string + } + tests := []struct { + name string + args args + want []string + }{ + {"nilGroup", args{nil}, nil}, + {"emptyGroup", args{[]string{}}, nil}, + {"noGuidGroup", args{[]string{"abc", "def", "system:ghi"}}, nil}, + {"someGroup", + args{[]string{"abc", "1cffe3ae-93c0-4a87-9484-2e90e682aae9", "sys:admin", "", "0ab7f20f-8e9a-43ba-b5ac-1811c91b3d40"}}, + []string{"1cffe3ae-93c0-4a87-9484-2e90e682aae9", "0ab7f20f-8e9a-43ba-b5ac-1811c91b3d40"}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := getValidSecurityGroups(tt.args.groups); !reflect.DeepEqual(got, tt.want) { + t.Errorf("getValidSecurityGroups() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_getDataAction(t *testing.T) { + type args struct { + subRevReq *authzv1.SubjectAccessReviewSpec + clusterType string + } + tests := []struct { + name string + args args + want AuthorizationActionInfo + }{ + {"aks", args{ + subRevReq: &authzv1.SubjectAccessReviewSpec{ + NonResourceAttributes: &authzv1.NonResourceAttributes{Path: "/apis", Verb: "list"}}, clusterType: "aks"}, + AuthorizationActionInfo{AuthorizationEntity: AuthorizationEntity{Id: "aks/apis/read"}, IsDataAction: true}}, + + {"aks2", args{ + subRevReq: &authzv1.SubjectAccessReviewSpec{ + NonResourceAttributes: &authzv1.NonResourceAttributes{Path: "/logs", Verb: "get"}}, clusterType: "aks"}, + AuthorizationActionInfo{AuthorizationEntity: AuthorizationEntity{Id: "aks/logs/read"}, IsDataAction: true}}, + + {"arc", args{ + subRevReq: &authzv1.SubjectAccessReviewSpec{ + ResourceAttributes: &authzv1.ResourceAttributes{Group: "", Resource: "pods", Subresource: "status", Version: "v1", Name: "test", Verb: "delete"}}, clusterType: "arc"}, + AuthorizationActionInfo{AuthorizationEntity: AuthorizationEntity{Id: "arc/pods/delete"}, IsDataAction: true}}, + + {"arc2", args{ + subRevReq: &authzv1.SubjectAccessReviewSpec{ + ResourceAttributes: &authzv1.ResourceAttributes{Group: "apps", Resource: "deployments", Subresource: "status", Version: "v1", Name: "test", Verb: "create"}}, clusterType: "arc"}, + AuthorizationActionInfo{AuthorizationEntity: AuthorizationEntity{Id: "arc/apps/deployments/write"}, IsDataAction: true}}, + + {"arc3", args{ + subRevReq: &authzv1.SubjectAccessReviewSpec{ + ResourceAttributes: &authzv1.ResourceAttributes{Group: "policy", Resource: "podsecuritypolicies", Subresource: "status", Version: "v1", Name: "test", Verb: "use"}}, clusterType: "arc"}, + AuthorizationActionInfo{AuthorizationEntity: AuthorizationEntity{Id: "arc/policy/podsecuritypolicies/use/action"}, IsDataAction: true}}, + + {"aks3", args{ + subRevReq: &authzv1.SubjectAccessReviewSpec{ + ResourceAttributes: &authzv1.ResourceAttributes{Group: "authentication.k8s.io", Resource: "userextras", Subresource: "scopes", Version: "v1", Name: "test", Verb: "impersonate"}}, clusterType: "aks"}, + AuthorizationActionInfo{AuthorizationEntity: AuthorizationEntity{Id: "aks/authentication.k8s.io/userextras/impersonate/action"}, IsDataAction: true}}, + + {"arc4", args{ + subRevReq: &authzv1.SubjectAccessReviewSpec{ + ResourceAttributes: &authzv1.ResourceAttributes{Group: "rbac.authorization.k8s.io", Resource: "clusterroles", Subresource: "status", Version: "v1", Name: "test", Verb: "bind"}}, clusterType: "arc"}, + AuthorizationActionInfo{AuthorizationEntity: AuthorizationEntity{Id: "arc/rbac.authorization.k8s.io/clusterroles/bind/action"}, IsDataAction: true}}, + + {"aks4", args{ + subRevReq: &authzv1.SubjectAccessReviewSpec{ + ResourceAttributes: &authzv1.ResourceAttributes{Group: "rbac.authorization.k8s.io", Resource: "clusterroles", Subresource: "status", Version: "v1", Name: "test", Verb: "escalate"}}, clusterType: "aks"}, + AuthorizationActionInfo{AuthorizationEntity: AuthorizationEntity{Id: "aks/rbac.authorization.k8s.io/clusterroles/escalate/action"}, IsDataAction: true}}, + + {"arc5", args{ + subRevReq: &authzv1.SubjectAccessReviewSpec{ + ResourceAttributes: &authzv1.ResourceAttributes{Group: "scheduling.k8s.io", Resource: "priorityclasses", Subresource: "status", Version: "v1", Name: "test", Verb: "update"}}, clusterType: "arc"}, + AuthorizationActionInfo{AuthorizationEntity: AuthorizationEntity{Id: "arc/scheduling.k8s.io/priorityclasses/write"}, IsDataAction: true}}, + + {"aks5", args{ + subRevReq: &authzv1.SubjectAccessReviewSpec{ + ResourceAttributes: &authzv1.ResourceAttributes{Group: "events.k8s.io", Resource: "events", Subresource: "status", Version: "v1", Name: "test", Verb: "watch"}}, clusterType: "aks"}, + AuthorizationActionInfo{AuthorizationEntity: AuthorizationEntity{Id: "aks/events.k8s.io/events/read"}, IsDataAction: true}}, + + {"arc6", args{ + subRevReq: &authzv1.SubjectAccessReviewSpec{ + ResourceAttributes: &authzv1.ResourceAttributes{Group: "batch", Resource: "cronjobs", Subresource: "status", Version: "v1", Name: "test", Verb: "patch"}}, clusterType: "arc"}, + AuthorizationActionInfo{AuthorizationEntity: AuthorizationEntity{Id: "arc/batch/cronjobs/write"}, IsDataAction: true}}, + + {"aks6", args{ + subRevReq: &authzv1.SubjectAccessReviewSpec{ + ResourceAttributes: &authzv1.ResourceAttributes{Group: "certificates.k8s.io", Resource: "certificatesigningrequests", Subresource: "approvals", Version: "v1", Name: "test", Verb: "deletecollection"}}, clusterType: "aks"}, + AuthorizationActionInfo{AuthorizationEntity: AuthorizationEntity{Id: "aks/certificates.k8s.io/certificatesigningrequests/delete"}, IsDataAction: true}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := getDataAction(tt.args.subRevReq, tt.args.clusterType); !reflect.DeepEqual(got, tt.want) { + t.Errorf("getDataAction() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_getNameSpaceScope(t *testing.T) { + req := authzv1.SubjectAccessReviewSpec{ResourceAttributes: nil} + want := false + got, str := getNameSpaceScope(&req) + if got || str != "" { + t.Errorf("Want:%v, got:%v", want, got) + } + + req = authzv1.SubjectAccessReviewSpec{ + ResourceAttributes: &authzv1.ResourceAttributes{Namespace: ""}} + want = false + got, str = getNameSpaceScope(&req) + if got || str != "" { + t.Errorf("Want:%v, got:%v", want, got) + } + + req = authzv1.SubjectAccessReviewSpec{ + ResourceAttributes: &authzv1.ResourceAttributes{Namespace: "dev"}} + outputstring := "namespaces/dev" + want = true + got, str = getNameSpaceScope(&req) + if !got || str != outputstring { + t.Errorf("Want:%v - %s, got: %v - %s", want, outputstring, got, str) + } +} + +func Test_prepareCheckAccessRequestBody(t *testing.T) { + req := &authzv1.SubjectAccessReviewSpec{Extra: nil} + resouceId := "resourceId" + clusterType := "aks" + var want *CheckAccessRequest = nil + wantErr := errors.New("oid info not sent from authenticatoin module") + + got, gotErr := prepareCheckAccessRequestBody(req, clusterType, resouceId, true) + + if got != want && gotErr != wantErr { + t.Errorf("Want:%v WantErr:%v, got:%v, gotErr:%v", want, wantErr, got, gotErr) + } + + req = &authzv1.SubjectAccessReviewSpec{Extra: map[string]authzv1.ExtraValue{"oid": {"test"}}} + resouceId = "resourceId" + clusterType = "arc" + want = nil + wantErr = errors.New("oid info sent from authenticatoin module is not valid") + + got, gotErr = prepareCheckAccessRequestBody(req, clusterType, resouceId, true) + + if got != want && gotErr != wantErr { + t.Errorf("Want:%v WantErr:%v, got:%v, gotErr:%v", want, wantErr, got, gotErr) + } +} + +func Test_getResultCacheKey(t *testing.T) { + type args struct { + subRevReq *authzv1.SubjectAccessReviewSpec + } + tests := []struct { + name string + args args + want string + }{ + {"aks", args{ + subRevReq: &authzv1.SubjectAccessReviewSpec{ + User: "charlie@yahoo.com", + NonResourceAttributes: &authzv1.NonResourceAttributes{Path: "/apis/v1", Verb: "list"}}}, + "charlie@yahoo.com/apis/v1/read"}, + + {"aks", args{ + subRevReq: &authzv1.SubjectAccessReviewSpec{ + User: "echo@outlook.com", + NonResourceAttributes: &authzv1.NonResourceAttributes{Path: "/logs", Verb: "get"}}}, + "echo@outlook.com/logs/read"}, + + {"aks", args{ + subRevReq: &authzv1.SubjectAccessReviewSpec{ + User: "alpha@bing.com", + ResourceAttributes: &authzv1.ResourceAttributes{Namespace: "dev", Group: "", Resource: "pods", + Subresource: "status", Version: "v1", Name: "test", Verb: "delete"}}}, + "alpha@bing.com/dev/pods/delete"}, + + {"arc", args{ + subRevReq: &authzv1.SubjectAccessReviewSpec{ + User: "beta@msn.com", + ResourceAttributes: &authzv1.ResourceAttributes{Namespace: "azure-arc", + Group: "authentication.k8s.io", Resource: "userextras", Subresource: "scopes", Version: "v1", + Name: "test", Verb: "impersonate"}}}, + "beta@msn.com/azure-arc/authentication.k8s.io/userextras/impersonate/action"}, + + {"arc", args{ + subRevReq: &authzv1.SubjectAccessReviewSpec{ + User: "beta@msn.com", + ResourceAttributes: &authzv1.ResourceAttributes{Namespace: "", Group: "", Resource: "nodes", + Subresource: "scopes", Version: "v1", Name: "", Verb: "list"}}}, + "beta@msn.com/nodes/read"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := getResultCacheKey(tt.args.subRevReq); got != tt.want { + t.Errorf("getResultCacheKey() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/authz/providers/azure/rbac/rbac.go b/authz/providers/azure/rbac/rbac.go new file mode 100644 index 000000000..9bad74175 --- /dev/null +++ b/authz/providers/azure/rbac/rbac.go @@ -0,0 +1,258 @@ +/* +Copyright The Guard Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package rbac + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "path" + "strconv" + "strings" + "time" + + "github.com/appscode/guard/auth/providers/azure/graph" + "github.com/appscode/guard/authz" + "github.com/golang/glog" + "github.com/moul/http2curl" + "github.com/pkg/errors" + authzv1 "k8s.io/api/authorization/v1" +) + +const ( + managedClusters = "Microsoft.ContainerService/managedClusters" + connectedClusters = "Microsoft.Kubernetes/connectedClusters" + checkAccessPath = "/providers/Microsoft.Authorization/checkaccess" + checkAccessAPIVersion = "2018-09-01-preview" + remaingSubReadARMHeader = "x-ms-ratelimit-remaining-subscription-reads" + expiryDelta = 60 * time.Second +) + +type void struct{} + +// AccessInfo allows you to check user access from MS RBAC +type AccessInfo struct { + headers http.Header + client *http.Client + expiresAt time.Time + // These allow us to mock out the URL for testing + apiURL *url.URL + + tokenProvider graph.TokenProvider + clusterType string + azureResourceId string + armCallLimit int + dataStore authz.Store + skipCheck map[string]void + retrieveGroupMemberships bool + skipAuthzForNonAADUsers bool +} + +func newAccessInfo(tokenProvider graph.TokenProvider, rbacURL *url.URL, clsuterType, resourceId string, armCallLimit int, dataStore authz.Store, skipList []string, retrieveGroupMemberships, skipAuthzForNonAADUsers bool) (*AccessInfo, error) { + u := &AccessInfo{ + client: http.DefaultClient, + headers: http.Header{ + "Content-Type": []string{"application/json"}, + }, + apiURL: rbacURL, + tokenProvider: tokenProvider, + azureResourceId: resourceId, + armCallLimit: armCallLimit, + dataStore: dataStore, + retrieveGroupMemberships: retrieveGroupMemberships, + skipAuthzForNonAADUsers: skipAuthzForNonAADUsers} + + u.skipCheck = make(map[string]void, len(skipList)) + var member void + for _, s := range skipList { + u.skipCheck[strings.ToLower(s)] = member + } + + if clsuterType == "arc" { + u.clusterType = connectedClusters + } + + if clsuterType == "aks" { + u.clusterType = managedClusters + } + + return u, nil +} + +func New(clientID, clientSecret, tenantID, aadEndpoint, armEndPoint, clusterType, resourceId string, armCallLimit int, dataStore authz.Store, skipCheck []string, retrieveGroupMemberships, skipAuthzForNonAADUsers bool) (*AccessInfo, error) { + rbacURL, err := url.Parse(armEndPoint) + + if err != nil { + return nil, err + } + + tokenProvider := graph.NewClientCredentialTokenProvider(clientID, clientSecret, + fmt.Sprintf("%s%s/oauth2/v2.0/token", aadEndpoint, tenantID), + fmt.Sprintf("%s.default", armEndPoint)) + + return newAccessInfo(tokenProvider, rbacURL, clusterType, resourceId, armCallLimit, dataStore, skipCheck, retrieveGroupMemberships, skipAuthzForNonAADUsers) +} + +func NewWithAKS(tokenURL, tenantID, armEndPoint, clusterType, resourceId string, armCallLimit int, dataStore authz.Store, skipCheck []string, retrieveGroupMemberships, skipAuthzForNonAADUsers bool) (*AccessInfo, error) { + rbacURL, err := url.Parse(armEndPoint) + + if err != nil { + return nil, err + } + tokenProvider := graph.NewAKSTokenProvider(tokenURL, tenantID) + + return newAccessInfo(tokenProvider, rbacURL, clusterType, resourceId, armCallLimit, dataStore, skipCheck, retrieveGroupMemberships, skipAuthzForNonAADUsers) +} + +func (a *AccessInfo) RefreshToken() error { + resp, err := a.tokenProvider.Acquire("") + if err != nil { + glog.Errorf("%s failed to refresh token : %s", a.tokenProvider.Name(), err.Error()) + return errors.Wrap(err, "failed to refresh rbac token") + } + + // Set the authorization headers for future requests + a.headers.Set("Authorization", fmt.Sprintf("Bearer %s", resp.Token)) + expIn := time.Duration(resp.Expires) * time.Second + a.expiresAt = time.Now().Add(expIn - expiryDelta) + + return nil +} + +func (a *AccessInfo) IsTokenExpired() bool { + if a.expiresAt.Before(time.Now()) { + return true + } else { + return false + } +} + +func (a *AccessInfo) ShouldSkipAuthzCheckForNonAADUsers() bool { + return a.skipAuthzForNonAADUsers +} + +func (a *AccessInfo) GetResultFromCache(request *authzv1.SubjectAccessReviewSpec) (bool, bool) { + var result bool + key := getResultCacheKey(request) + glog.V(10).Infof("Cache search for key: %s", key) + found, _ := a.dataStore.Get(key, &result) + return found, result +} + +func (a *AccessInfo) SkipAuthzCheck(request *authzv1.SubjectAccessReviewSpec) bool { + if a.clusterType == connectedClusters { + _, ok := a.skipCheck[strings.ToLower(request.User)] + return ok + } + return false +} + +func (a *AccessInfo) SetResultInCache(request *authzv1.SubjectAccessReviewSpec, result bool) error { + key := getResultCacheKey(request) + glog.V(10).Infof("Cache set for key: %s, value: %t", key, result) + return a.dataStore.Set(key, result) +} + +func (a *AccessInfo) CheckAccess(request *authzv1.SubjectAccessReviewSpec) (*authzv1.SubjectAccessReviewStatus, error) { + checkAccessBody, err := prepareCheckAccessRequestBody(request, a.clusterType, a.azureResourceId, a.retrieveGroupMemberships) + + if err != nil { + return nil, errors.Wrap(err, "error in preparing check access request") + } + + checkAccessURL := *a.apiURL + // Append the path for azure cluster resource id + checkAccessURL.Path = path.Join(checkAccessURL.Path, a.azureResourceId) + exist, nameSpaceString := getNameSpaceScope(request) + if exist { + checkAccessURL.Path = path.Join(checkAccessURL.Path, nameSpaceString) + } + + checkAccessURL.Path = path.Join(checkAccessURL.Path, checkAccessPath) + params := url.Values{} + params.Add("api-version", checkAccessAPIVersion) + checkAccessURL.RawQuery = params.Encode() + + buf := new(bytes.Buffer) + if err := json.NewEncoder(buf).Encode(checkAccessBody); err != nil { + return nil, errors.Wrap(err, "error encoding check access request") + } + + if glog.V(10) { + binaryData, _ := json.MarshalIndent(checkAccessBody, "", " ") + glog.V(10).Infof("checkAccessURI:%s", checkAccessURL.String()) + glog.V(10).Infof("binary data:%s", binaryData) + } + + req, err := http.NewRequest(http.MethodPost, checkAccessURL.String(), buf) + if err != nil { + return nil, errors.Wrap(err, "error creating check access request") + } + // Set the auth headers for the request + req.Header = a.headers + + if glog.V(10) { + cmd, _ := http2curl.GetCurlCommand(req) + glog.V(10).Infoln(cmd) + } + + resp, err := a.client.Do(req) + if err != nil { + return nil, errors.Wrap(err, "error in check access request execution") + } + + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, errors.Wrap(err, "error in reading response body") + } + + defer resp.Body.Close() + glog.V(10).Infof("checkaccess response: %s, Configured ARM call limit: %d", string(data), a.armCallLimit) + if resp.StatusCode != http.StatusOK { + glog.Errorf("error in check access response. error code: %d, response: %s", resp.StatusCode, string(data)) + if resp.StatusCode == http.StatusTooManyRequests { + glog.V(10).Infoln("Closing idle TCP connections.") + a.client.CloseIdleConnections() + // TODO: add prom metrics for this scenario + } + return nil, errors.Errorf("request %s failed with status code: %d and response: %s", req.URL.Path, resp.StatusCode, string(data)) + } else { + remaining := resp.Header.Get(remaingSubReadARMHeader) + glog.Infof("Remaining request count in ARM instance:%s", remaining) + count, _ := strconv.Atoi(remaining) + if count < a.armCallLimit { + if glog.V(10) { + glog.V(10).Infoln("Closing idle TCP connections.") + } + // Usually ARM connections are cached by destinatio ip and port + // By closing the idle connection, a new request will use different port which + // will connect to different ARM instance of the region to ensure there is no ARM throttling + a.client.CloseIdleConnections() + } + } + + // Decode response and prepare k8s response + response, err := ConvertCheckAccessResponse(data) + if err == nil { + a.SetResultInCache(request, response.Allowed) + } else { + a.SetResultInCache(request, false) + } + return response, err +} diff --git a/authz/providers/azure/rbac/rbac_test.go b/authz/providers/azure/rbac/rbac_test.go new file mode 100644 index 000000000..17bdfafbb --- /dev/null +++ b/authz/providers/azure/rbac/rbac_test.go @@ -0,0 +1,206 @@ +/* +Copyright The Guard Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package rbac + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "testing" + "time" + + "github.com/appscode/guard/auth/providers/azure/graph" + "github.com/appscode/guard/authz" + "github.com/appscode/guard/authz/providers/azure/data" + "github.com/stretchr/testify/assert" + authzv1 "k8s.io/api/authorization/v1" +) + +func getAPIServerAndAccessInfo(returnCode int, body, clusterType, resourceId string, options data.Options) (*httptest.Server, *AccessInfo, authz.Store) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(returnCode) + _, _ = w.Write([]byte(body)) + })) + apiURL, _ := url.Parse(ts.URL) + datastore, _ := data.NewDataStore(options) + u := &AccessInfo{ + client: http.DefaultClient, + apiURL: apiURL, + headers: http.Header{}, + expiresAt: time.Now().Add(time.Hour), + clusterType: clusterType, + azureResourceId: resourceId, + armCallLimit: 0, + dataStore: datastore} + return ts, u, datastore +} + +func TestCheckAccess(t *testing.T) { + t.Run("successful request", func(t *testing.T) { + var validBody = `[{"accessDecision":"Allowed", + "actionId":"Microsoft.Kubernetes/connectedClusters/pods/delete", + "isDataAction":true,"roleAssignment":null,"denyAssignment":null,"timeToLiveInMs":300000}]` + + var testOptions = data.Options{ + HardMaxCacheSize: 1, + Shards: 1, + LifeWindow: 1 * time.Minute, + CleanWindow: 1 * time.Minute, + MaxEntriesInWindow: 10, + MaxEntrySize: 5, + Verbose: false, + } + + ts, u, store := getAPIServerAndAccessInfo(http.StatusOK, validBody, "arc", "resourceid", testOptions) + defer ts.Close() + defer store.Close() + + request := &authzv1.SubjectAccessReviewSpec{ + User: "alpha@bing.com", + ResourceAttributes: &authzv1.ResourceAttributes{Namespace: "dev", Group: "", Resource: "pods", + Subresource: "status", Version: "v1", Name: "test", Verb: "delete"}, Extra: map[string]authzv1.ExtraValue{"oid": {"00000000-0000-0000-0000-000000000000"}}} + + response, err := u.CheckAccess(request) + + assert.Nilf(t, err, "Should not have got error") + assert.NotNil(t, response) + assert.Equal(t, response.Allowed, true) + assert.Equal(t, response.Denied, false) + }) + + t.Run("too many requests", func(t *testing.T) { + var validBody = `""` + + var testOptions = data.Options{ + HardMaxCacheSize: 1, + Shards: 1, + LifeWindow: 1 * time.Minute, + CleanWindow: 1 * time.Minute, + MaxEntriesInWindow: 10, + MaxEntrySize: 5, + Verbose: false, + } + + ts, u, store := getAPIServerAndAccessInfo(http.StatusTooManyRequests, validBody, "arc", "resourceid", testOptions) + defer ts.Close() + defer store.Close() + + request := &authzv1.SubjectAccessReviewSpec{ + User: "alpha@bing.com", + ResourceAttributes: &authzv1.ResourceAttributes{Namespace: "dev", Group: "", Resource: "pods", + Subresource: "status", Version: "v1", Name: "test", Verb: "delete"}, Extra: map[string]authzv1.ExtraValue{"oid": {"00000000-0000-0000-0000-000000000000"}}} + + response, err := u.CheckAccess(request) + + assert.Nilf(t, response, "response should be nil") + assert.NotNilf(t, err, "should get error") + }) + + t.Run("check acess not available", func(t *testing.T) { + var validBody = `""` + + var testOptions = data.Options{ + HardMaxCacheSize: 1, + Shards: 1, + LifeWindow: 1 * time.Minute, + CleanWindow: 1 * time.Minute, + MaxEntriesInWindow: 10, + MaxEntrySize: 5, + Verbose: false, + } + + ts, u, store := getAPIServerAndAccessInfo(http.StatusInternalServerError, validBody, + "arc", "resourceid", testOptions) + defer ts.Close() + defer store.Close() + + request := &authzv1.SubjectAccessReviewSpec{ + User: "alpha@bing.com", + ResourceAttributes: &authzv1.ResourceAttributes{Namespace: "dev", Group: "", Resource: "pods", + Subresource: "status", Version: "v1", Name: "test", Verb: "delete"}, Extra: map[string]authzv1.ExtraValue{"oid": {"00000000-0000-0000-0000-000000000000"}}} + + response, err := u.CheckAccess(request) + + assert.Nilf(t, response, "response should be nil") + assert.NotNilf(t, err, "should get error") + }) +} + +func getAuthServerAndAccessInfo(returnCode int, body, clientID, clientSecret string) (*httptest.Server, *AccessInfo) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(returnCode) + _, _ = w.Write([]byte(body)) + })) + u := &AccessInfo{ + client: http.DefaultClient, + headers: http.Header{}, + } + u.tokenProvider = graph.NewClientCredentialTokenProvider(clientID, clientSecret, ts.URL, "") + return ts, u +} + +func TestLogin(t *testing.T) { + t.Run("successful login", func(t *testing.T) { + var validToken = "blackbriar" + var validBody = `{ + "token_type": "Bearer", + "expires_in": 3599, + "access_token": "%s" + }` + ts, u := getAuthServerAndAccessInfo(http.StatusOK, fmt.Sprintf(validBody, validToken), "jason", "bourne") + defer ts.Close() + + err := u.RefreshToken() + if err != nil { + t.Errorf("Error when trying to log in: %s", err) + } + if u.headers.Get("Authorization") != fmt.Sprintf("Bearer %s", validToken) { + t.Errorf("Authorization header should be set. Expected: %q. Got: %q", fmt.Sprintf("Bearer %s", validToken), u.headers.Get("Authorization")) + } + if !time.Now().Before(u.expiresAt) { + t.Errorf("Expiry not set properly. Expected it to be after the current time. Actual: %v", u.expiresAt) + } + }) + + t.Run("unsuccessful login", func(t *testing.T) { + ts, u := getAuthServerAndAccessInfo(http.StatusUnauthorized, "Unauthorized", "CIA", "treadstone") + defer ts.Close() + + err := u.RefreshToken() + assert.NotNilf(t, err, "Should have gotten error") + }) + + t.Run("request error", func(t *testing.T) { + badURL := "https://127.0.0.1:34567" + u := &AccessInfo{ + client: http.DefaultClient, + headers: http.Header{}, + } + u.tokenProvider = graph.NewClientCredentialTokenProvider("CIA", "outcome", badURL, "") + + err := u.RefreshToken() + assert.NotNilf(t, err, "Should have gotten error") + }) + + t.Run("bad response body", func(t *testing.T) { + ts, u := getAuthServerAndAccessInfo(http.StatusOK, "{bad_json", "CIA", "treadstone") + defer ts.Close() + + err := u.RefreshToken() + assert.NotNilf(t, err, "Should have gotten error") + }) +} diff --git a/authz/providers/providers.go b/authz/providers/providers.go new file mode 100644 index 000000000..aaf11a6b5 --- /dev/null +++ b/authz/providers/providers.go @@ -0,0 +1,66 @@ +/* +Copyright The Guard Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package providers + +import ( + "fmt" + "strings" + + "github.com/appscode/guard/authz" + _ "github.com/appscode/guard/authz/providers/azure" + + "github.com/pkg/errors" + "github.com/spf13/pflag" + apps "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +type AuthzProviders struct { + Providers []string // contains providers name for which guard will provide service, required +} + +func (a *AuthzProviders) AddFlags(fs *pflag.FlagSet) { + fs.StringSliceVar(&a.Providers, "authz-providers", a.Providers, fmt.Sprintf("name of providers for which guard will provide authorization service, supported providers : %v", authz.SupportedOrgs.String())) +} + +func (a *AuthzProviders) Validate() []error { + var errs []error + + for _, p := range a.Providers { + if !authz.SupportedOrgs.Has(p) { + errs = append(errs, errors.Errorf("provider %s not supported", p)) + } + } + return errs +} + +func (a *AuthzProviders) Apply(d *apps.Deployment) (extraObjs []runtime.Object, err error) { + if len(a.Providers) > 0 { + d.Spec.Template.Spec.Containers[0].Args = append(d.Spec.Template.Spec.Containers[0].Args, fmt.Sprintf("--authz-providers=%s", strings.Join(a.Providers, ","))) + } + + return nil, nil +} + +func (a *AuthzProviders) Has(name string) bool { + name = strings.TrimSpace(name) + for _, p := range a.Providers { + if strings.EqualFold(p, name) { + return true + } + } + return false +} diff --git a/authz/types.go b/authz/types.go new file mode 100644 index 000000000..45ac8437e --- /dev/null +++ b/authz/types.go @@ -0,0 +1,57 @@ +/* +Copyright The Guard Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package authz + +import ( + "sort" + "strings" + + authzv1 "k8s.io/api/authorization/v1" +) + +type orgs []string + +var SupportedOrgs orgs + +func (o orgs) Has(name string) bool { + name = strings.TrimSpace(strings.ToLower(name)) + for _, org := range o { + if org == name { + return true + } + } + return false +} + +func (o orgs) String() string { + names := make([]string, len(o)) + for i, org := range o { + names[i] = strings.Title(org) + } + sort.Strings(names) + return strings.Join(names, "/") +} + +type Interface interface { + Check(request *authzv1.SubjectAccessReviewSpec) (*authzv1.SubjectAccessReviewStatus, error) +} + +type Store interface { + Set(key string, value interface{}) error + Get(key string, value interface{}) (bool, error) + Delete(key string) error + Close() error +} diff --git a/commands/installer.go b/commands/installer.go index d600a4f8d..d8b59f142 100644 --- a/commands/installer.go +++ b/commands/installer.go @@ -26,24 +26,32 @@ import ( ) func NewCmdInstaller() *cobra.Command { - opts := installer.New() + authopts := installer.NewAuthOptions() + authzopts := installer.NewAuthzOptions() + cmd := &cobra.Command{ Use: "installer", Short: "Prints Kubernetes objects for deploying guard server", DisableAutoGenTag: true, Run: func(cmd *cobra.Command, args []string) { - errs := opts.Validate() + errs := authopts.Validate() + if errs != nil { + glog.Fatal(errs) + } + + errs = authzopts.Validate(&authopts) if errs != nil { glog.Fatal(errs) } - data, err := installer.Generate(opts) + data, err := installer.Generate(authopts, authzopts) if err != nil { glog.Fatal(err) } fmt.Println(string(data)) }, } - opts.AddFlags(cmd.Flags()) + authopts.AddFlags(cmd.Flags()) + authzopts.AddFlags(cmd.Flags()) return cmd } diff --git a/commands/run.go b/commands/run.go index 388609d87..8e5bb0076 100644 --- a/commands/run.go +++ b/commands/run.go @@ -26,9 +26,11 @@ import ( ) func NewCmdRun() *cobra.Command { - o := server.NewRecommendedOptions() + o := server.NewAuthRecommendedOptions() + ao := server.NewAuthzRecommendedOptions() srv := server.Server{ - RecommendedOptions: o, + AuthRecommendedOptions: o, + AuthzRecommendedOptions: ao, } cmd := &cobra.Command{ Use: "run", @@ -38,7 +40,7 @@ func NewCmdRun() *cobra.Command { cli.SendPeriodicAnalytics(c, v.Version.Version) }, Run: func(cmd *cobra.Command, args []string) { - if !srv.RecommendedOptions.SecureServing.UseTLS() { + if !srv.AuthRecommendedOptions.SecureServing.UseTLS() { glog.Fatalln("Guard server must use SSL.") } srv.ListenAndServe() diff --git a/commands/webhok_config.go b/commands/webhok_config.go index a86ad15a6..cf41ce956 100644 --- a/commands/webhok_config.go +++ b/commands/webhok_config.go @@ -37,6 +37,7 @@ func NewCmdGetWebhookConfig() *cobra.Command { rootDir = auth.DefaultDataDir org string addr string + mode string ) cmd := &cobra.Command{ Use: "webhook-config", @@ -92,39 +93,73 @@ func NewCmdGetWebhookConfig() *cobra.Command { glog.Fatalf("Failed to load client certificate. Reason: %v.", err) } - config := clientcmdapi.Config{ - Kind: "Config", - APIVersion: "v1", - Clusters: map[string]*clientcmdapi.Cluster{ - "guard-server": { - Server: fmt.Sprintf("https://%s/tokenreviews", addr), - CertificateAuthorityData: caCert, + if mode == "auth" { + config := clientcmdapi.Config{ + Kind: "Config", + APIVersion: "v1", + Clusters: map[string]*clientcmdapi.Cluster{ + "guard-server": { + Server: fmt.Sprintf("https://%s/tokenreviews", addr), + CertificateAuthorityData: caCert, + }, }, - }, - AuthInfos: map[string]*clientcmdapi.AuthInfo{ - filename(cfg): { - ClientCertificateData: clientCert, - ClientKeyData: clientKey, + AuthInfos: map[string]*clientcmdapi.AuthInfo{ + filename(cfg): { + ClientCertificateData: clientCert, + ClientKeyData: clientKey, + }, }, - }, - Contexts: map[string]*clientcmdapi.Context{ - "webhook": { - Cluster: "guard-server", - AuthInfo: filename(cfg), + Contexts: map[string]*clientcmdapi.Context{ + "webhook": { + Cluster: "guard-server", + AuthInfo: filename(cfg), + }, }, - }, - CurrentContext: "webhook", + CurrentContext: "webhook", + } + data, err := clientcmd.Write(config) + if err != nil { + glog.Fatalln(err) + } + fmt.Println(string(data)) } - data, err := clientcmd.Write(config) - if err != nil { - glog.Fatalln(err) + + if mode == "authz" { + config := clientcmdapi.Config{ + Kind: "Config", + APIVersion: "v1", + Clusters: map[string]*clientcmdapi.Cluster{ + "guard-server": { + Server: fmt.Sprintf("https://%s/subjectaccessreviews", addr), + CertificateAuthorityData: caCert, + }, + }, + AuthInfos: map[string]*clientcmdapi.AuthInfo{ + filename(cfg): { + ClientCertificateData: clientCert, + ClientKeyData: clientKey, + }, + }, + Contexts: map[string]*clientcmdapi.Context{ + "webhook": { + Cluster: "guard-server", + AuthInfo: filename(cfg), + }, + }, + CurrentContext: "webhook", + } + data, err := clientcmd.Write(config) + if err != nil { + glog.Fatalln(err) + } + fmt.Println(string(data)) } - fmt.Println(string(data)) }, } cmd.Flags().StringVar(&rootDir, "pki-dir", rootDir, "Path to directory where pki files are stored.") cmd.Flags().StringVarP(&org, "organization", "o", org, fmt.Sprintf("Name of Organization (%v).", auth.SupportedOrgs)) cmd.Flags().StringVar(&addr, "addr", "10.96.10.96:443", "Address (host:port) of guard server.") + cmd.Flags().StringVar(&mode, "mode", "auth", "Mode to generate config, Supported mode: auth, authz") return cmd } diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index b79d7214d..86ff5702d 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -17,6 +17,14 @@ aliases: # Change Log +## [v0.5.0](https://github.com/appscode/guard/tree/v0.5.0) (2020-05-11) +[Full Changelog](https://github.com/appscode/guard/compare/v0.5.0-rc.1...v0.5.0) + +**Merged pull requests:** + +- Use Go 1.14.2 [\#251](https://github.com/appscode/guard/pull/251) ([tamalsaha](https://github.com/tamalsaha)) +- Azure: switch to new graph endpoint for US Government cloud [\#250](https://github.com/appscode/guard/pull/250) ([karataliu](https://github.com/karataliu)) + ## [v0.5.0-rc.1](https://github.com/appscode/guard/tree/v0.5.0-rc.1) (2020-02-16) [Full Changelog](https://github.com/appscode/guard/compare/v0.5.0-rc.0...v0.5.0-rc.1) diff --git a/go.mod b/go.mod index 3621147ad..dfd51c994 100644 --- a/go.mod +++ b/go.mod @@ -8,44 +8,31 @@ require ( github.com/appscode/pat v0.0.0-20170521084856-48ff78925b79 github.com/aws/aws-sdk-go v1.20.20 github.com/coreos/go-oidc v2.1.0+incompatible - github.com/ghodss/yaml v1.0.0 // indirect github.com/go-ldap/ldap v3.0.3+incompatible - github.com/go-openapi/swag v0.19.4 // indirect - github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48 // indirect github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b - github.com/golang/protobuf v1.3.2 // indirect github.com/google/go-github/v25 v25.1.3 github.com/google/gofuzz v1.0.0 - github.com/hashicorp/go-uuid v1.0.2 // indirect - github.com/jcmturner/gofork v1.0.0 // indirect github.com/json-iterator/go v1.1.7 github.com/moul/http2curl v1.0.0 - github.com/nmcclain/asn1-ber v0.0.0-20170104154839-2661553a0484 // indirect github.com/nmcclain/ldap v0.0.0-20191021200707-3b3b69a7e9e3 github.com/onsi/ginkgo v1.8.0 github.com/onsi/gomega v1.5.0 - github.com/pkg/errors v0.8.1 + github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v0.9.2 github.com/skratchdot/open-golang v0.0.0-20190402232053-79abb63cd66e github.com/spf13/afero v1.2.2 github.com/spf13/cobra v0.0.5 - github.com/spf13/pflag v1.0.3 - github.com/stretchr/testify v1.4.0 + github.com/spf13/pflag v1.0.5 + github.com/stretchr/testify v1.5.1 github.com/xanzy/go-gitlab v0.22.3 golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 - golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3 // indirect gomodules.xyz/cert v1.0.2 google.golang.org/api v0.6.1-0.20190607001116-5213b8090861 - gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect - gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect - gopkg.in/jcmturner/aescts.v1 v1.0.1 // indirect - gopkg.in/jcmturner/dnsutils.v1 v1.0.1 // indirect - gopkg.in/jcmturner/goidentity.v1 v1.0.0 // indirect gopkg.in/jcmturner/gokrb5.v4 v4.1.2 gopkg.in/square/go-jose.v2 v2.2.2 - k8s.io/api v0.0.0-20191114100352-16d7abae0d2a - k8s.io/apimachinery v0.16.5-beta.1 + k8s.io/api v0.18.2 + k8s.io/apimachinery v0.18.2 k8s.io/client-go v12.0.0+incompatible kmodules.xyz/client-go v0.0.0-20200125212626-a094b2ba24c6 ) @@ -86,4 +73,5 @@ replace ( sigs.k8s.io/kustomize => sigs.k8s.io/kustomize v2.0.3+incompatible sigs.k8s.io/structured-merge-diff => sigs.k8s.io/structured-merge-diff v0.0.0-20190817042607-6149e4549fca sigs.k8s.io/yaml => sigs.k8s.io/yaml v1.1.0 + ) diff --git a/go.sum b/go.sum index b7c887fea..1aa755a22 100644 --- a/go.sum +++ b/go.sum @@ -1,6 +1,8 @@ cloud.google.com/go v0.38.0 h1:ROfEUZz+Gh5pa62DJWXSaonyu3StP6EA6lPEXPI6mCo= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest v13.0.0+incompatible h1:56c11ykhsFSPNNQuS73Ri8h/ezqVhr2h6t9LJIEKVO0= +github.com/Azure/go-autorest v13.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU= @@ -23,6 +25,8 @@ github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbt github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/allegro/bigcache v1.2.1 h1:hg1sY1raCwic3Vnsvje6TT7/pnZba83LeFck5NrFKSc= +github.com/allegro/bigcache v1.2.1/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/appscode/go v0.0.0-20191119085241-0887d8ec2ecc h1:Q5jwbRq/a1Tien+Is8Zn6HMBRgE13+UOQ9mTvrIuvuE= github.com/appscode/go v0.0.0-20191119085241-0887d8ec2ecc/go.mod h1:OawnOmAL4ZX3YaPdN+8HTNwBveT1jMsqP74moa9XUbE= github.com/appscode/pat v0.0.0-20170521084856-48ff78925b79 h1:UpN2ZbEdh9ZuVGZwC9sm8VQoOfmu7bFucsp/wqQyZ3s= @@ -76,6 +80,7 @@ github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4 h1:bRzFpEzvausOAt4va+I/22BZ1vXDtERngp0BNYDKej0= github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -118,11 +123,13 @@ github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+Z github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88dE= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.4 h1:i/65mCM9s1h8eCkT07F5Z/C1e/f8VTgEwer+00yevpA= github.com/go-openapi/swag v0.19.4/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48 h1:X+zN6RZXsvnrSJaAIQhZezPfAfvsqihKKR8oiLHid34= github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= @@ -132,8 +139,8 @@ github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4er github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -189,7 +196,6 @@ github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22 github.com/jpillora/go-ogle-analytics v0.0.0-20161213085824-14b04e0594ef h1:jLpa0vamfyIGeIJ/CfUJEWoKriw4ODeOgF1XxDvgMZ4= github.com/jpillora/go-ogle-analytics v0.0.0-20161213085824-14b04e0594ef/go.mod h1:PlwhC7q1VSK73InDzdDatVetQrTsQHIbOvcJAZzitY0= github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -212,7 +218,6 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4= @@ -255,6 +260,8 @@ github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -292,6 +299,8 @@ github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb6 github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -301,6 +310,8 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/xanzy/go-gitlab v0.22.3 h1:/rNlZ2hquUWNc6rJdntVM03tEOoTmnZ1lcNyJCl0WlU= @@ -361,7 +372,6 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -375,6 +385,7 @@ golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f h1:25KHgbfyiSm6vwQLbM3zZIe1v9p/3ea4Rz+nnM5K/i4= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3 h1:4y9KwBHBgBNwDbtu44R5o1fdOCQUEXhbk/P4A9WmJq0= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -412,17 +423,14 @@ google.golang.org/api v0.6.1-0.20190607001116-5213b8090861 h1:ppLucX0K/60T3t6LPZ google.golang.org/api v0.6.1-0.20190607001116-5213b8090861/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873 h1:nfPFGzJkUDX6uBmpN/pSw7MbOAWegH5QDQuoXFHedLg= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A= @@ -430,7 +438,6 @@ google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -452,7 +459,6 @@ gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76 gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/installer/deployment.go b/installer/deployment.go index ccb85b91c..6b2333cf3 100644 --- a/installer/deployment.go +++ b/installer/deployment.go @@ -28,6 +28,7 @@ import ( "github.com/appscode/guard/auth/providers/google" "github.com/appscode/guard/auth/providers/ldap" "github.com/appscode/guard/auth/providers/token" + azureauthz "github.com/appscode/guard/authz/providers/azure" "github.com/appscode/guard/server" apps "k8s.io/api/apps/v1" @@ -37,11 +38,11 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" ) -func newDeployment(opts Options) (objects []runtime.Object, err error) { +func newDeployment(authopts AuthOptions, authzopts AuthzOptions) (objects []runtime.Object, err error) { d := &apps.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: "guard", - Namespace: opts.Namespace, + Namespace: authopts.Namespace, Labels: labels, }, Spec: apps.DeploymentSpec{ @@ -61,7 +62,7 @@ func newDeployment(opts Options) (objects []runtime.Object, err error) { Containers: []core.Container{ { Name: "guard", - Image: fmt.Sprintf("%s/guard:%v", opts.PrivateRegistry, stringz.Val(v.Version.Version, "canary")), + Image: fmt.Sprintf("%s/guard:%v", authopts.PrivateRegistry, stringz.Val(v.Version.Version, "canary")), Args: []string{ "run", "--v=3", @@ -103,14 +104,14 @@ func newDeployment(opts Options) (objects []runtime.Object, err error) { }, }, } - if opts.imagePullSecret != "" { + if authopts.imagePullSecret != "" { d.Spec.Template.Spec.ImagePullSecrets = []core.LocalObjectReference{ { - Name: opts.imagePullSecret, + Name: authopts.imagePullSecret, }, } } - if opts.RunOnMaster { + if authopts.RunOnMaster { d.Spec.Template.Spec.NodeSelector = map[string]string{ "node-role.kubernetes.io/master": "", } @@ -122,61 +123,77 @@ func newDeployment(opts Options) (objects []runtime.Object, err error) { } objects = append(objects, d) - servingOpts := server.NewSecureServingOptionsFromDir(opts.PkiDir) + servingOpts := server.NewSecureServingOptionsFromDir(authopts.PkiDir) if extras, err := servingOpts.Apply(d); err != nil { return nil, err } else { objects = append(objects, extras...) } - if extras, err := opts.AuthProvider.Apply(d); err != nil { + if extras, err := authopts.AuthProvider.Apply(d); err != nil { return nil, err } else { objects = append(objects, extras...) } - if opts.AuthProvider.Has(token.OrgType) { - if extras, err := opts.Token.Apply(d); err != nil { + if authopts.AuthProvider.Has(token.OrgType) { + if extras, err := authopts.Token.Apply(d); err != nil { return nil, err } else { objects = append(objects, extras...) } } - if opts.AuthProvider.Has(google.OrgType) { - if extras, err := opts.Google.Apply(d); err != nil { + if authopts.AuthProvider.Has(google.OrgType) { + if extras, err := authopts.Google.Apply(d); err != nil { return nil, err } else { objects = append(objects, extras...) } } - if opts.AuthProvider.Has(azure.OrgType) { - if extras, err := opts.Azure.Apply(d); err != nil { + if authopts.AuthProvider.Has(azure.OrgType) { + if extras, err := authopts.Azure.Apply(d); err != nil { return nil, err } else { objects = append(objects, extras...) } } - if opts.AuthProvider.Has(ldap.OrgType) { - if extras, err := opts.LDAP.Apply(d); err != nil { + if authopts.AuthProvider.Has(ldap.OrgType) { + if extras, err := authopts.LDAP.Apply(d); err != nil { return nil, err } else { objects = append(objects, extras...) } } - if opts.AuthProvider.Has(github.OrgType) { - if extras, err := opts.Github.Apply(d); err != nil { + if authopts.AuthProvider.Has(github.OrgType) { + if extras, err := authopts.Github.Apply(d); err != nil { return nil, err } else { objects = append(objects, extras...) } } - if opts.AuthProvider.Has(gitlab.OrgType) { - if extras, err := opts.Gitlab.Apply(d); err != nil { + if authopts.AuthProvider.Has(gitlab.OrgType) { + if extras, err := authopts.Gitlab.Apply(d); err != nil { + return nil, err + } else { + objects = append(objects, extras...) + } + } + + if len(authzopts.AuthzProvider.Providers) > 0 { + if extras, err := authzopts.AuthzProvider.Apply(d); err != nil { + return nil, err + } else { + objects = append(objects, extras...) + } + } + + if authzopts.AuthzProvider.Has(azureauthz.OrgType) { + if extras, err := authzopts.Azure.Apply(d); err != nil { return nil, err } else { objects = append(objects, extras...) diff --git a/installer/installer.go b/installer/installer.go index db694d15c..1183fa8ce 100644 --- a/installer/installer.go +++ b/installer/installer.go @@ -30,21 +30,21 @@ var labels = map[string]string{ "app": "guard", } -func Generate(opts Options) ([]byte, error) { +func Generate(authopts AuthOptions, authzopts AuthzOptions) ([]byte, error) { var objects []runtime.Object - if opts.Namespace != metav1.NamespaceSystem && opts.Namespace != metav1.NamespaceDefault { - objects = append(objects, newNamespace(opts.Namespace)) + if authopts.Namespace != metav1.NamespaceSystem && authopts.Namespace != metav1.NamespaceDefault { + objects = append(objects, newNamespace(authopts.Namespace)) } - objects = append(objects, newServiceAccount(opts.Namespace)) - objects = append(objects, newClusterRole(opts.Namespace)) - objects = append(objects, newClusterRoleBinding(opts.Namespace)) - if deployObjects, err := newDeployment(opts); err != nil { + objects = append(objects, newServiceAccount(authopts.Namespace)) + objects = append(objects, newClusterRole(authopts.Namespace)) + objects = append(objects, newClusterRoleBinding(authopts.Namespace)) + if deployObjects, err := newDeployment(authopts, authzopts); err != nil { return nil, err } else { objects = append(objects, deployObjects...) } - if svc, err := newService(opts.Namespace, opts.Addr); err != nil { + if svc, err := newService(authopts.Namespace, authopts.Addr); err != nil { return nil, err } else { objects = append(objects, svc) diff --git a/installer/options.go b/installer/options.go index 877ed2ddb..e8c0e4295 100644 --- a/installer/options.go +++ b/installer/options.go @@ -25,12 +25,14 @@ import ( "github.com/appscode/guard/auth/providers/google" "github.com/appscode/guard/auth/providers/ldap" "github.com/appscode/guard/auth/providers/token" + authz "github.com/appscode/guard/authz/providers" + azureauthz "github.com/appscode/guard/authz/providers/azure" "github.com/spf13/pflag" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -type Options struct { +type AuthOptions struct { PkiDir string Namespace string Addr string @@ -47,8 +49,13 @@ type Options struct { Gitlab gitlab.Options } -func New() Options { - return Options{ +type AuthzOptions struct { + AuthzProvider authz.AuthzProviders + Azure azureauthz.Options +} + +func NewAuthOptions() AuthOptions { + return AuthOptions{ PkiDir: auth.DefaultDataDir, Namespace: metav1.NamespaceSystem, Addr: "10.96.10.96:443", @@ -63,7 +70,13 @@ func New() Options { } } -func (o *Options) AddFlags(fs *pflag.FlagSet) { +func NewAuthzOptions() AuthzOptions { + return AuthzOptions{ + Azure: azureauthz.NewOptions(), + } +} + +func (o *AuthOptions) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&o.PkiDir, "pki-dir", o.PkiDir, "Path to directory where pki files are stored.") fs.StringVarP(&o.Namespace, "namespace", "n", o.Namespace, "Name of Kubernetes namespace used to run guard server.") fs.StringVar(&o.Addr, "addr", o.Addr, "Address (host:port) of guard server.") @@ -79,7 +92,11 @@ func (o *Options) AddFlags(fs *pflag.FlagSet) { o.Gitlab.AddFlags(fs) } -func (o *Options) Validate() []error { +func (o *AuthzOptions) AddFlags(fs *pflag.FlagSet) { + o.AuthzProvider.AddFlags(fs) + o.Azure.AddFlags(fs) +} +func (o *AuthOptions) Validate() []error { var errs []error errs = append(errs, o.AuthProvider.Validate()...) @@ -104,3 +121,14 @@ func (o *Options) Validate() []error { return errs } + +func (o *AuthzOptions) Validate(opt *AuthOptions) []error { + var errs []error + errs = append(errs, o.AuthzProvider.Validate()...) + + if o.AuthzProvider.Has(azureauthz.OrgType) { + errs = append(errs, o.Azure.Validate(opt.Azure)...) + } + + return errs +} diff --git a/server/recommended.go b/server/authrecommended.go similarity index 90% rename from server/recommended.go rename to server/authrecommended.go index 28fc6931b..c0c449778 100644 --- a/server/recommended.go +++ b/server/authrecommended.go @@ -28,7 +28,7 @@ import ( "github.com/spf13/pflag" ) -type RecommendedOptions struct { +type AuthRecommendedOptions struct { SecureServing SecureServingOptions NTP NTPOptions Github github.Options @@ -40,8 +40,8 @@ type RecommendedOptions struct { AuthProvider providers.AuthProviders } -func NewRecommendedOptions() *RecommendedOptions { - return &RecommendedOptions{ +func NewAuthRecommendedOptions() *AuthRecommendedOptions { + return &AuthRecommendedOptions{ SecureServing: NewSecureServingOptions(), NTP: NewNTPOptions(), Github: github.NewOptions(), @@ -53,7 +53,7 @@ func NewRecommendedOptions() *RecommendedOptions { } } -func (o *RecommendedOptions) AddFlags(fs *pflag.FlagSet) { +func (o *AuthRecommendedOptions) AddFlags(fs *pflag.FlagSet) { o.SecureServing.AddFlags(fs) o.NTP.AddFlags(fs) o.AuthProvider.AddFlags(fs) @@ -65,7 +65,7 @@ func (o *RecommendedOptions) AddFlags(fs *pflag.FlagSet) { o.LDAP.AddFlags(fs) } -func (o *RecommendedOptions) Validate() []error { +func (o *AuthRecommendedOptions) Validate() []error { var errs []error errs = append(errs, o.SecureServing.Validate()...) errs = append(errs, o.NTP.Validate()...) diff --git a/server/authzhandler.go b/server/authzhandler.go new file mode 100644 index 000000000..981232222 --- /dev/null +++ b/server/authzhandler.go @@ -0,0 +1,80 @@ +/* +Copyright The Guard Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package server + +import ( + "net/http" + "strings" + + "github.com/appscode/guard/authz" + "github.com/appscode/guard/authz/providers/azure" + "github.com/golang/glog" + "github.com/pkg/errors" + authzv1 "k8s.io/api/authorization/v1" +) + +type Authzhandler struct { + AuthRecommendedOptions *AuthRecommendedOptions + AuthzRecommendedOptions *AuthzRecommendedOptions + Store authz.Store +} + +func (s *Authzhandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + if req.TLS == nil || len(req.TLS.PeerCertificates) == 0 { + writeAuthzResponse(w, nil, nil, WithCode(errors.New("Missing client certificate"), http.StatusBadRequest)) + return + } + crt := req.TLS.PeerCertificates[0] + if len(crt.Subject.Organization) == 0 { + writeAuthzResponse(w, nil, nil, WithCode(errors.New("Client certificate is missing organization"), http.StatusBadRequest)) + return + } + org := crt.Subject.Organization[0] + glog.Infof("Received subject access review request for %s/%s", org, crt.Subject.CommonName) + + data := authzv1.SubjectAccessReview{} + err := json.NewDecoder(req.Body).Decode(&data) + if err != nil { + writeAuthzResponse(w, nil, nil, WithCode(errors.Wrap(err, "Failed to parse request"), http.StatusBadRequest)) + return + } + + binaryData, _ := json.MarshalIndent(&data, "", " ") + glog.V(10).Infof("Authz req:%s", binaryData) + + if !s.AuthzRecommendedOptions.AuthzProvider.Has(org) { + writeAuthzResponse(w, &data.Spec, nil, WithCode(errors.Errorf("guard does not provide service for %v", org), http.StatusBadRequest)) + return + } + + client, err := s.getAuthzProviderClient(org, crt.Subject.CommonName) + if err != nil { + writeAuthzResponse(w, &data.Spec, nil, err) + return + } + + resp, err := client.Check(&data.Spec) + writeAuthzResponse(w, &data.Spec, resp, err) +} + +func (s *Authzhandler) getAuthzProviderClient(org, commonName string) (authz.Interface, error) { + switch strings.ToLower(org) { + case azure.OrgType: + return azure.New(s.AuthzRecommendedOptions.Azure, s.AuthRecommendedOptions.Azure, s.Store) + } + + return nil, errors.Errorf("Client is using unknown organization %s", org) +} diff --git a/server/authzrecommended.go b/server/authzrecommended.go new file mode 100644 index 000000000..7297a5ea8 --- /dev/null +++ b/server/authzrecommended.go @@ -0,0 +1,52 @@ +/* +Copyright The Guard Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package server + +import ( + authz "github.com/appscode/guard/authz/providers" + "github.com/appscode/guard/authz/providers/azure" + + "github.com/spf13/pflag" +) + +type AuthzRecommendedOptions struct { + Azure azure.Options + AuthzProvider authz.AuthzProviders +} + +func NewAuthzRecommendedOptions() *AuthzRecommendedOptions { + return &AuthzRecommendedOptions{ + Azure: azure.NewOptions(), + } +} + +func (o *AuthzRecommendedOptions) AddFlags(fs *pflag.FlagSet) { + o.Azure.AddFlags(fs) + o.AuthzProvider.AddFlags(fs) +} + +func (o *AuthzRecommendedOptions) Validate(opts *AuthRecommendedOptions) []error { + var errs []error + if len(o.AuthzProvider.Providers) > 0 { + errs = append(errs, o.AuthzProvider.Validate()...) + } + + if o.AuthzProvider.Has(azure.OrgType) { + errs = append(errs, o.Azure.Validate(opts.Azure)...) + } + + return errs +} diff --git a/server/handler.go b/server/handler.go index 64cd05371..9b2d34dbb 100644 --- a/server/handler.go +++ b/server/handler.go @@ -27,13 +27,12 @@ import ( "github.com/appscode/guard/auth/providers/google" "github.com/appscode/guard/auth/providers/ldap" "github.com/appscode/guard/auth/providers/token" - "github.com/golang/glog" "github.com/pkg/errors" authv1 "k8s.io/api/authentication/v1" ) -func (s Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { +func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { if req.TLS == nil || len(req.TLS.PeerCertificates) == 0 { write(w, nil, WithCode(errors.New("Missing client certificate"), http.StatusBadRequest)) return @@ -53,12 +52,12 @@ func (s Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { return } - if !s.RecommendedOptions.AuthProvider.Has(org) { + if !s.AuthRecommendedOptions.AuthProvider.Has(org) { write(w, nil, WithCode(errors.Errorf("guard does not provide service for %v", org), http.StatusBadRequest)) return } - if s.RecommendedOptions.AuthProvider.Has(token.OrgType) && s.TokenAuthenticator != nil { + if s.AuthRecommendedOptions.AuthProvider.Has(token.OrgType) && s.TokenAuthenticator != nil { resp, err := s.TokenAuthenticator.Check(data.Spec.Token) if err == nil { write(w, resp, err) @@ -76,18 +75,18 @@ func (s Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { write(w, resp, err) } -func (s Server) getAuthProviderClient(org, commonName string) (auth.Interface, error) { +func (s *Server) getAuthProviderClient(org, commonName string) (auth.Interface, error) { switch strings.ToLower(org) { case github.OrgType: - return github.New(s.RecommendedOptions.Github, commonName), nil + return github.New(s.AuthRecommendedOptions.Github, commonName), nil case google.OrgType: - return google.New(s.RecommendedOptions.Google, commonName) + return google.New(s.AuthRecommendedOptions.Google, commonName) case gitlab.OrgType: - return gitlab.New(s.RecommendedOptions.Gitlab), nil + return gitlab.New(s.AuthRecommendedOptions.Gitlab), nil case azure.OrgType: - return azure.New(s.RecommendedOptions.Azure) + return azure.New(s.AuthRecommendedOptions.Azure) case ldap.OrgType: - return ldap.New(s.RecommendedOptions.LDAP), nil + return ldap.New(s.AuthRecommendedOptions.LDAP), nil } return nil, errors.Errorf("Client is using unknown organization %s", org) diff --git a/server/handler_test.go b/server/handler_test.go index f67e614a3..e2904a4cb 100644 --- a/server/handler_test.go +++ b/server/handler_test.go @@ -40,7 +40,7 @@ import ( func TestServeHTTP(t *testing.T) { srv := Server{ - RecommendedOptions: NewRecommendedOptions(), + AuthRecommendedOptions: NewAuthRecommendedOptions(), } store, err := certstore.NewCertStore(afero.NewMemMapFs(), "/pki", "foo") @@ -149,12 +149,12 @@ func TestGetAuthProviderClient(t *testing.T) { }, } s := Server{ - RecommendedOptions: NewRecommendedOptions(), + AuthRecommendedOptions: NewAuthRecommendedOptions(), } // https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-v2-protocols-oidc // https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-protocols-oauth-code#jwt-token-claims - s.RecommendedOptions.Azure.TenantID = "7fe81447-da57-4385-becb-6de57f21477e" + s.AuthRecommendedOptions.Azure.TenantID = "7fe81447-da57-4385-becb-6de57f21477e" for _, test := range testData { t.Run(test.testName, func(t *testing.T) { diff --git a/server/prometheus.go b/server/prometheus.go index 86da9b456..271474e17 100644 --- a/server/prometheus.go +++ b/server/prometheus.go @@ -65,9 +65,22 @@ var ( }, []string{"handler"}, ) + + inFlightGaugeAuthz = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "subjectaccessreviews_handler_requests_in_flight", + Help: "A gauge of requests currently being served by the tokenreviews handler.", + }) + + counterAuthz = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "subjectaccessreviews_handler_requests_total", + Help: "A counter for requests to the tokenreviews handler.", + }, + []string{"code", "method"}, + ) ) func init() { // Register all of the metrics in the standard registry. - prometheus.MustRegister(version, inFlightGauge, counter, duration, responseSize) + prometheus.MustRegister(version, inFlightGauge, counter, duration, responseSize, inFlightGaugeAuthz, counterAuthz) } diff --git a/server/server.go b/server/server.go index b31da9f89..1a8572858 100644 --- a/server/server.go +++ b/server/server.go @@ -29,6 +29,8 @@ import ( "github.com/appscode/go/signals" v "github.com/appscode/go/version" "github.com/appscode/guard/auth/providers/token" + "github.com/appscode/guard/authz/providers/azure" + "github.com/appscode/guard/authz/providers/azure/data" "github.com/appscode/pat" "github.com/golang/glog" @@ -40,32 +42,38 @@ import ( ) type Server struct { - RecommendedOptions *RecommendedOptions - TokenAuthenticator *token.Authenticator + AuthRecommendedOptions *AuthRecommendedOptions + AuthzRecommendedOptions *AuthzRecommendedOptions + TokenAuthenticator *token.Authenticator } func (s *Server) AddFlags(fs *pflag.FlagSet) { - s.RecommendedOptions.AddFlags(fs) + s.AuthRecommendedOptions.AddFlags(fs) + s.AuthzRecommendedOptions.AddFlags(fs) } func (s Server) ListenAndServe() { - if errs := s.RecommendedOptions.Validate(); errs != nil { + if errs := s.AuthRecommendedOptions.Validate(); errs != nil { glog.Fatal(errs) } - if s.RecommendedOptions.NTP.Enabled() { - ticker := time.NewTicker(s.RecommendedOptions.NTP.Interval) + if errs := s.AuthzRecommendedOptions.Validate(s.AuthRecommendedOptions); errs != nil { + glog.Fatal(errs) + } + + if s.AuthRecommendedOptions.NTP.Enabled() { + ticker := time.NewTicker(s.AuthRecommendedOptions.NTP.Interval) go func() { for range ticker.C { - if err := ntp.CheckSkewFromServer(s.RecommendedOptions.NTP.NTPServer, s.RecommendedOptions.NTP.MaxClodkSkew); err != nil { + if err := ntp.CheckSkewFromServer(s.AuthRecommendedOptions.NTP.NTPServer, s.AuthRecommendedOptions.NTP.MaxClodkSkew); err != nil { glog.Fatal(err) } } }() } - if s.RecommendedOptions.Token.AuthFile != "" { - s.TokenAuthenticator = token.New(s.RecommendedOptions.Token) + if s.AuthRecommendedOptions.Token.AuthFile != "" { + s.TokenAuthenticator = token.New(s.AuthRecommendedOptions.Token) err := s.TokenAuthenticator.Configure() if err != nil { @@ -73,7 +81,7 @@ func (s Server) ListenAndServe() { } if meta.PossiblyInCluster() { w := fsnotify.Watcher{ - WatchDir: filepath.Dir(s.RecommendedOptions.Token.AuthFile), + WatchDir: filepath.Dir(s.AuthRecommendedOptions.Token.AuthFile), Reload: func() error { return s.TokenAuthenticator.Configure() }, @@ -87,10 +95,10 @@ func (s Server) ListenAndServe() { } // loading file read related data - if err := s.RecommendedOptions.LDAP.Configure(); err != nil { + if err := s.AuthRecommendedOptions.LDAP.Configure(); err != nil { glog.Fatal(err) } - if err := s.RecommendedOptions.Google.Configure(); err != nil { + if err := s.AuthRecommendedOptions.Google.Configure(); err != nil { glog.Fatal(err) } @@ -101,7 +109,7 @@ func (s Server) ListenAndServe() { - http://www.bite-code.com/2015/06/25/tls-mutual-auth-in-golang/ - http://www.hydrogen18.com/blog/your-own-pki-tls-golang.html */ - caCert, err := ioutil.ReadFile(s.RecommendedOptions.SecureServing.CACertFile) + caCert, err := ioutil.ReadFile(s.AuthRecommendedOptions.SecureServing.CACertFile) if err != nil { glog.Fatal(err) } @@ -128,7 +136,6 @@ func (s Server) ListenAndServe() { ClientCAs: caCertPool, NextProtos: []string{"h2", "http/1.1"}, } - tlsConfig.BuildNameToCertificate() m := pat.New() @@ -139,10 +146,11 @@ func (s Server) ListenAndServe() { handler := promhttp.InstrumentHandlerInFlight(inFlightGauge, promhttp.InstrumentHandlerDuration(duration.MustCurryWith(prometheus.Labels{"handler": "tokenreviews"}), promhttp.InstrumentHandlerCounter(counter, - promhttp.InstrumentHandlerResponseSize(responseSize.MustCurryWith(prometheus.Labels{"handler": "tokenreviews"}), s), + promhttp.InstrumentHandlerResponseSize(responseSize.MustCurryWith(prometheus.Labels{"handler": "tokenreviews"}), &s), ), ), ) + m.Post("/tokenreviews", handler) m.Get("/metrics", promhttp.Handler()) m.Get("/healthz", http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { @@ -154,12 +162,37 @@ func (s Server) ListenAndServe() { glog.Fatal(err) } })) + + glog.Infoln("setting up authz providers") + if len(s.AuthzRecommendedOptions.AuthzProvider.Providers) > 0 { + authzhandler := Authzhandler{ + AuthRecommendedOptions: s.AuthRecommendedOptions, + AuthzRecommendedOptions: s.AuthzRecommendedOptions} + authzPromHandler := promhttp.InstrumentHandlerInFlight(inFlightGaugeAuthz, + promhttp.InstrumentHandlerDuration(duration.MustCurryWith(prometheus.Labels{"handler": "subjectaccessreviews"}), + promhttp.InstrumentHandlerCounter(counterAuthz, + promhttp.InstrumentHandlerResponseSize(responseSize.MustCurryWith(prometheus.Labels{"handler": "subjectaccessreview"}), &authzhandler), + ), + ), + ) + + m.Post("/subjectaccessreviews", authzPromHandler) + + if s.AuthzRecommendedOptions.AuthzProvider.Has(azure.OrgType) { + options := data.DefaultOptions + authzhandler.Store, err = data.NewDataStore(options) + if authzhandler.Store == nil || err != nil { + glog.Fatalf("Error in initalizing cache. Error:%s", err.Error()) + } + } + } + srv := &http.Server{ - Addr: s.RecommendedOptions.SecureServing.SecureAddr, + Addr: s.AuthRecommendedOptions.SecureServing.SecureAddr, ReadTimeout: 5 * time.Second, WriteTimeout: 10 * time.Second, Handler: m, TLSConfig: tlsConfig, } - glog.Fatalln(srv.ListenAndServeTLS(s.RecommendedOptions.SecureServing.CertFile, s.RecommendedOptions.SecureServing.KeyFile)) + glog.Fatalln(srv.ListenAndServeTLS(s.AuthRecommendedOptions.SecureServing.CertFile, s.AuthRecommendedOptions.SecureServing.KeyFile)) } diff --git a/server/utils.go b/server/utils.go index f24cf5626..c905ddda6 100644 --- a/server/utils.go +++ b/server/utils.go @@ -25,6 +25,7 @@ import ( jsoniter "github.com/json-iterator/go" "github.com/pkg/errors" auth "k8s.io/api/authentication/v1" + authz "k8s.io/api/authorization/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -74,6 +75,48 @@ func write(w http.ResponseWriter, info *auth.UserInfo, err error) { } } +func writeAuthzResponse(w http.ResponseWriter, spec *authz.SubjectAccessReviewSpec, accessInfo *authz.SubjectAccessReviewStatus, err error) { + w.Header().Set("Content-Type", "application/json") + w.Header().Set("x-content-type-options", "nosniff") + code := http.StatusOK + + resp := authz.SubjectAccessReview{ + TypeMeta: metav1.TypeMeta{ + APIVersion: authz.SchemeGroupVersion.String(), + Kind: "SubjectAccessReview", + }, + } + + if spec != nil { + resp.Spec = *spec + } + + if accessInfo != nil { + resp.Status = *accessInfo + } else { + accessInfo := authz.SubjectAccessReviewStatus{Allowed: false, Denied: true} + if err != nil { + accessInfo.Reason = err.Error() + } + resp.Status = accessInfo + } + + if err != nil { + printStackTrace(err) + } + + w.WriteHeader(code) + if glog.V(10) { + data, _ := json.MarshalIndent(resp, "", " ") + glog.V(10).Infof("final data:%s", string(data)) + } + + err = json.NewEncoder(w).Encode(resp) + if err != nil { + panic(err) + } +} + type stackTracer interface { StackTrace() errors.StackTrace } diff --git a/test/e2e/installer_test.go b/test/e2e/installer_test.go index b63f8bd62..3869824c3 100644 --- a/test/e2e/installer_test.go +++ b/test/e2e/installer_test.go @@ -127,13 +127,13 @@ var _ = Describe("Installer test", func() { ) var ( - setupGuard = func(opts installer.Options) { + setupGuard = func(authopts installer.AuthOptions, authzopts installer.AuthzOptions) { By("Validate installer flag options") - errs := opts.Validate() + errs := authopts.Validate() Expect(errors.NewAggregate(errs)).NotTo(HaveOccurred()) By("Generating installer yaml") - data, err := installer.Generate(opts) + data, err := installer.Generate(authopts, authzopts) Expect(err).NotTo(HaveOccurred()) glog.Info(string(data)) @@ -292,11 +292,12 @@ var _ = Describe("Installer test", func() { Describe("Set up guard for individual auth provider", func() { var ( secretName string - opts installer.Options + authopts installer.AuthOptions + authzopts installer.AuthzOptions ) BeforeEach(func() { - opts = installer.Options{ + authopts = installer.AuthOptions{ PkiDir: certDir, RunOnMaster: false, Namespace: root.Namespace(), @@ -304,6 +305,8 @@ var _ = Describe("Installer test", func() { PrivateRegistry: privateRegistryName, } + authzopts = installer.AuthzOptions{} + secretName = pkiSecret checkServiceDeleted() @@ -325,11 +328,11 @@ var _ = Describe("Installer test", func() { Context("Setting up guard for github", func() { BeforeEach(func() { - opts.AuthProvider = providers.AuthProviders{Providers: []string{github.OrgType}} + authopts.AuthProvider = providers.AuthProviders{Providers: []string{github.OrgType}} }) It("Set up guard for github should be successful", func() { - setupGuard(opts) + setupGuard(authopts, authzopts) checkServiceCreated() checkClusterRoleCreated() @@ -340,9 +343,9 @@ var _ = Describe("Installer test", func() { }) It("Set up guard for github should be successful, provided base url", func() { - opts.Github = githubOpts + authopts.Github = githubOpts - setupGuard(opts) + setupGuard(authopts, authzopts) checkServiceCreated() checkClusterRoleCreated() @@ -355,11 +358,11 @@ var _ = Describe("Installer test", func() { Context("Setting up guard for gitlab", func() { BeforeEach(func() { - opts.AuthProvider = providers.AuthProviders{Providers: []string{gitlab.OrgType}} + authopts.AuthProvider = providers.AuthProviders{Providers: []string{gitlab.OrgType}} }) It("Set up guard for gitlab should be successful", func() { - setupGuard(opts) + setupGuard(authopts, authzopts) checkServiceCreated() checkClusterRoleCreated() @@ -370,9 +373,9 @@ var _ = Describe("Installer test", func() { }) It("Set up guard for gitlab should be successful, provided base url", func() { - opts.Gitlab = gitlabOpts + authopts.Gitlab = gitlabOpts - setupGuard(opts) + setupGuard(authopts, authzopts) checkServiceCreated() checkClusterRoleCreated() @@ -385,8 +388,8 @@ var _ = Describe("Installer test", func() { Context("Setting up guard for azure", func() { BeforeEach(func() { - opts.AuthProvider = providers.AuthProviders{Providers: []string{azure.OrgType}} - opts.Azure = azureOpts + authopts.AuthProvider = providers.AuthProviders{Providers: []string{azure.OrgType}} + authopts.Azure = azureOpts checkSecretDeleted(azureSecret) }) @@ -396,7 +399,7 @@ var _ = Describe("Installer test", func() { }) It("Set up guard for azure should be successful", func() { - setupGuard(opts) + setupGuard(authopts, authzopts) checkServiceCreated() checkClusterRoleCreated() @@ -411,8 +414,8 @@ var _ = Describe("Installer test", func() { Context("Setting up guard for LDAP", func() { BeforeEach(func() { - opts.AuthProvider = providers.AuthProviders{Providers: []string{ldap.OrgType}} - opts.LDAP = ldapOpts + authopts.AuthProvider = providers.AuthProviders{Providers: []string{ldap.OrgType}} + authopts.LDAP = ldapOpts checkSecretDeleted(ldapSecret) }) @@ -422,7 +425,7 @@ var _ = Describe("Installer test", func() { }) It("Set up guard for LDAP should be successful", func() { - setupGuard(opts) + setupGuard(authopts, authzopts) checkServiceCreated() checkClusterRoleCreated() @@ -437,8 +440,8 @@ var _ = Describe("Installer test", func() { Context("Setting up guard for token auth", func() { BeforeEach(func() { - opts.AuthProvider = providers.AuthProviders{Providers: []string{token.OrgType}} - opts.Token = tokenOpts + authopts.AuthProvider = providers.AuthProviders{Providers: []string{token.OrgType}} + authopts.Token = tokenOpts err := appFs.Mkdir(tokenAuthDir, 0777) Expect(err).NotTo(HaveOccurred()) @@ -455,7 +458,7 @@ var _ = Describe("Installer test", func() { }) It("Set up guard for token auth should be successful", func() { - setupGuard(opts) + setupGuard(authopts, authzopts) checkServiceCreated() checkClusterRoleCreated() @@ -470,8 +473,8 @@ var _ = Describe("Installer test", func() { Context("Setting up guard for google", func() { BeforeEach(func() { - opts.AuthProvider = providers.AuthProviders{Providers: []string{google.OrgType}} - opts.Google = googleOpts + authopts.AuthProvider = providers.AuthProviders{Providers: []string{google.OrgType}} + authopts.Google = googleOpts err := appFs.Mkdir(saDir, 0777) Expect(err).NotTo(HaveOccurred()) @@ -489,7 +492,7 @@ var _ = Describe("Installer test", func() { }) It("Set up guard for google should be successful", func() { - setupGuard(opts) + setupGuard(authopts, authzopts) checkServiceCreated() checkClusterRoleCreated() @@ -507,11 +510,12 @@ var _ = Describe("Installer test", func() { Describe("Setting up guard for all providers", func() { var ( secretNames []string - opts installer.Options + authopts installer.AuthOptions + authzopts installer.AuthzOptions ) BeforeEach(func() { - opts = installer.Options{ + authopts = installer.AuthOptions{ PkiDir: certDir, RunOnMaster: false, Namespace: root.Namespace(), @@ -523,7 +527,7 @@ var _ = Describe("Installer test", func() { Google: googleOpts, } - opts.AuthProvider = providers.AuthProviders{Providers: []string{ + authopts.AuthProvider = providers.AuthProviders{Providers: []string{ azure.OrgType, github.OrgType, gitlab.OrgType, @@ -574,7 +578,7 @@ var _ = Describe("Installer test", func() { }) It("Set up guard for all providers should be successful", func() { - setupGuard(opts) + setupGuard(authopts, authzopts) checkServiceCreated() checkClusterRoleCreated() diff --git a/vendor/github.com/allegro/bigcache/.gitignore b/vendor/github.com/allegro/bigcache/.gitignore new file mode 100644 index 000000000..256d66590 --- /dev/null +++ b/vendor/github.com/allegro/bigcache/.gitignore @@ -0,0 +1,10 @@ +.idea +.DS_Store +/server/server.exe +/server/server +/server/server_dar* +/server/server_fre* +/server/server_win* +/server/server_net* +/server/server_ope* +CHANGELOG.md diff --git a/vendor/github.com/allegro/bigcache/.travis.yml b/vendor/github.com/allegro/bigcache/.travis.yml new file mode 100644 index 000000000..cc28df6a9 --- /dev/null +++ b/vendor/github.com/allegro/bigcache/.travis.yml @@ -0,0 +1,31 @@ +language: go + +go: + - 1.x + - tip + +matrix: + allow_failures: + - go: tip + fast_finish: true + +before_install: + - go get github.com/modocache/gover + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover + - go get golang.org/x/tools/cmd/goimports + - go get golang.org/x/lint/golint + - go get github.com/stretchr/testify/assert + - go get github.com/gordonklaus/ineffassign + +script: + - gofiles=$(find ./ -name '*.go') && [ -z "$gofiles" ] || unformatted=$(goimports -l $gofiles) && [ -z "$unformatted" ] || (echo >&2 "Go files must be formatted with gofmt. Following files has problem:\n $unformatted" && false) + - diff <(echo -n) <(gofmt -s -d .) + - golint ./... # This won't break the build, just show warnings + - ineffassign . + - go vet ./... + - go test -race -count=1 -coverprofile=queue.coverprofile ./queue + - go test -race -count=1 -coverprofile=server.coverprofile ./server + - go test -race -count=1 -coverprofile=main.coverprofile + - $HOME/gopath/bin/gover + - $HOME/gopath/bin/goveralls -coverprofile=gover.coverprofile -service travis-ci diff --git a/vendor/github.com/allegro/bigcache/LICENSE b/vendor/github.com/allegro/bigcache/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/github.com/allegro/bigcache/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/allegro/bigcache/README.md b/vendor/github.com/allegro/bigcache/README.md new file mode 100644 index 000000000..c23f7f36c --- /dev/null +++ b/vendor/github.com/allegro/bigcache/README.md @@ -0,0 +1,150 @@ +# BigCache [![Build Status](https://travis-ci.org/allegro/bigcache.svg?branch=master)](https://travis-ci.org/allegro/bigcache) [![Coverage Status](https://coveralls.io/repos/github/allegro/bigcache/badge.svg?branch=master)](https://coveralls.io/github/allegro/bigcache?branch=master) [![GoDoc](https://godoc.org/github.com/allegro/bigcache?status.svg)](https://godoc.org/github.com/allegro/bigcache) [![Go Report Card](https://goreportcard.com/badge/github.com/allegro/bigcache)](https://goreportcard.com/report/github.com/allegro/bigcache) + +Fast, concurrent, evicting in-memory cache written to keep big number of entries without impact on performance. +BigCache keeps entries on heap but omits GC for them. To achieve that operations on bytes arrays take place, +therefore entries (de)serialization in front of the cache will be needed in most use cases. + +## Usage + +### Simple initialization + +```go +import "github.com/allegro/bigcache" + +cache, _ := bigcache.NewBigCache(bigcache.DefaultConfig(10 * time.Minute)) + +cache.Set("my-unique-key", []byte("value")) + +entry, _ := cache.Get("my-unique-key") +fmt.Println(string(entry)) +``` + +### Custom initialization + +When cache load can be predicted in advance then it is better to use custom initialization because additional memory +allocation can be avoided in that way. + +```go +import ( + "log" + + "github.com/allegro/bigcache" +) + +config := bigcache.Config { + // number of shards (must be a power of 2) + Shards: 1024, + // time after which entry can be evicted + LifeWindow: 10 * time.Minute, + // rps * lifeWindow, used only in initial memory allocation + MaxEntriesInWindow: 1000 * 10 * 60, + // max entry size in bytes, used only in initial memory allocation + MaxEntrySize: 500, + // prints information about additional memory allocation + Verbose: true, + // cache will not allocate more memory than this limit, value in MB + // if value is reached then the oldest entries can be overridden for the new ones + // 0 value means no size limit + HardMaxCacheSize: 8192, + // callback fired when the oldest entry is removed because of its expiration time or no space left + // for the new entry, or because delete was called. A bitmask representing the reason will be returned. + // Default value is nil which means no callback and it prevents from unwrapping the oldest entry. + OnRemove: nil, + // OnRemoveWithReason is a callback fired when the oldest entry is removed because of its expiration time or no space left + // for the new entry, or because delete was called. A constant representing the reason will be passed through. + // Default value is nil which means no callback and it prevents from unwrapping the oldest entry. + // Ignored if OnRemove is specified. + OnRemoveWithReason: nil, + } + +cache, initErr := bigcache.NewBigCache(config) +if initErr != nil { + log.Fatal(initErr) +} + +cache.Set("my-unique-key", []byte("value")) + +if entry, err := cache.Get("my-unique-key"); err == nil { + fmt.Println(string(entry)) +} +``` + +## Benchmarks + +Three caches were compared: bigcache, [freecache](https://github.com/coocood/freecache) and map. +Benchmark tests were made using an i7-6700K with 32GB of RAM on Windows 10. + +### Writes and reads + +```bash +cd caches_bench; go test -bench=. -benchtime=10s ./... -timeout 30m + +BenchmarkMapSet-8 3000000 569 ns/op 202 B/op 3 allocs/op +BenchmarkConcurrentMapSet-8 1000000 1592 ns/op 347 B/op 8 allocs/op +BenchmarkFreeCacheSet-8 3000000 775 ns/op 355 B/op 2 allocs/op +BenchmarkBigCacheSet-8 3000000 640 ns/op 303 B/op 2 allocs/op +BenchmarkMapGet-8 5000000 407 ns/op 24 B/op 1 allocs/op +BenchmarkConcurrentMapGet-8 3000000 558 ns/op 24 B/op 2 allocs/op +BenchmarkFreeCacheGet-8 2000000 682 ns/op 136 B/op 2 allocs/op +BenchmarkBigCacheGet-8 3000000 512 ns/op 152 B/op 4 allocs/op +BenchmarkBigCacheSetParallel-8 10000000 225 ns/op 313 B/op 3 allocs/op +BenchmarkFreeCacheSetParallel-8 10000000 218 ns/op 341 B/op 3 allocs/op +BenchmarkConcurrentMapSetParallel-8 5000000 318 ns/op 200 B/op 6 allocs/op +BenchmarkBigCacheGetParallel-8 20000000 178 ns/op 152 B/op 4 allocs/op +BenchmarkFreeCacheGetParallel-8 20000000 295 ns/op 136 B/op 3 allocs/op +BenchmarkConcurrentMapGetParallel-8 10000000 237 ns/op 24 B/op 2 allocs/op +``` + +Writes and reads in bigcache are faster than in freecache. +Writes to map are the slowest. + +### GC pause time + +```bash +cd caches_bench; go run caches_gc_overhead_comparison.go + +Number of entries: 20000000 +GC pause for bigcache: 5.8658ms +GC pause for freecache: 32.4341ms +GC pause for map: 52.9661ms +``` + +Test shows how long are the GC pauses for caches filled with 20mln of entries. +Bigcache and freecache have very similar GC pause time. +It is clear that both reduce GC overhead in contrast to map +which GC pause time took more than 10 seconds. + +## How it works + +BigCache relies on optimization presented in 1.5 version of Go ([issue-9477](https://github.com/golang/go/issues/9477)). +This optimization states that if map without pointers in keys and values is used then GC will omit its content. +Therefore BigCache uses `map[uint64]uint32` where keys are hashed and values are offsets of entries. + +Entries are kept in bytes array, to omit GC again. +Bytes array size can grow to gigabytes without impact on performance +because GC will only see single pointer to it. + +## Bigcache vs Freecache + +Both caches provide the same core features but they reduce GC overhead in different ways. +Bigcache relies on `map[uint64]uint32`, freecache implements its own mapping built on +slices to reduce number of pointers. + +Results from benchmark tests are presented above. +One of the advantage of bigcache over freecache is that you don’t need to know +the size of the cache in advance, because when bigcache is full, +it can allocate additional memory for new entries instead of +overwriting existing ones as freecache does currently. +However hard max size in bigcache also can be set, check [HardMaxCacheSize](https://godoc.org/github.com/allegro/bigcache#Config). + +## HTTP Server + +This package also includes an easily deployable HTTP implementation of BigCache, which can be found in the [server](/server) package. + +## More + +Bigcache genesis is described in allegro.tech blog post: [writing a very fast cache service in Go](http://allegro.tech/2016/03/writing-fast-cache-service-in-go.html) + +## License + +BigCache is released under the Apache 2.0 license (see [LICENSE](LICENSE)) diff --git a/vendor/github.com/allegro/bigcache/bigcache.go b/vendor/github.com/allegro/bigcache/bigcache.go new file mode 100644 index 000000000..b3879264a --- /dev/null +++ b/vendor/github.com/allegro/bigcache/bigcache.go @@ -0,0 +1,202 @@ +package bigcache + +import ( + "fmt" + "time" +) + +const ( + minimumEntriesInShard = 10 // Minimum number of entries in single shard +) + +// BigCache is fast, concurrent, evicting cache created to keep big number of entries without impact on performance. +// It keeps entries on heap but omits GC for them. To achieve that, operations take place on byte arrays, +// therefore entries (de)serialization in front of the cache will be needed in most use cases. +type BigCache struct { + shards []*cacheShard + lifeWindow uint64 + clock clock + hash Hasher + config Config + shardMask uint64 + maxShardSize uint32 + close chan struct{} +} + +// RemoveReason is a value used to signal to the user why a particular key was removed in the OnRemove callback. +type RemoveReason uint32 + +const ( + // Expired means the key is past its LifeWindow. + Expired RemoveReason = iota + // NoSpace means the key is the oldest and the cache size was at its maximum when Set was called, or the + // entry exceeded the maximum shard size. + NoSpace + // Deleted means Delete was called and this key was removed as a result. + Deleted +) + +// NewBigCache initialize new instance of BigCache +func NewBigCache(config Config) (*BigCache, error) { + return newBigCache(config, &systemClock{}) +} + +func newBigCache(config Config, clock clock) (*BigCache, error) { + + if !isPowerOfTwo(config.Shards) { + return nil, fmt.Errorf("Shards number must be power of two") + } + + if config.Hasher == nil { + config.Hasher = newDefaultHasher() + } + + cache := &BigCache{ + shards: make([]*cacheShard, config.Shards), + lifeWindow: uint64(config.LifeWindow.Seconds()), + clock: clock, + hash: config.Hasher, + config: config, + shardMask: uint64(config.Shards - 1), + maxShardSize: uint32(config.maximumShardSize()), + close: make(chan struct{}), + } + + var onRemove func(wrappedEntry []byte, reason RemoveReason) + if config.OnRemove != nil { + onRemove = cache.providedOnRemove + } else if config.OnRemoveWithReason != nil { + onRemove = cache.providedOnRemoveWithReason + } else { + onRemove = cache.notProvidedOnRemove + } + + for i := 0; i < config.Shards; i++ { + cache.shards[i] = initNewShard(config, onRemove, clock) + } + + if config.CleanWindow > 0 { + go func() { + ticker := time.NewTicker(config.CleanWindow) + defer ticker.Stop() + for { + select { + case t := <-ticker.C: + cache.cleanUp(uint64(t.Unix())) + case <-cache.close: + return + } + } + }() + } + + return cache, nil +} + +// Close is used to signal a shutdown of the cache when you are done with it. +// This allows the cleaning goroutines to exit and ensures references are not +// kept to the cache preventing GC of the entire cache. +func (c *BigCache) Close() error { + close(c.close) + return nil +} + +// Get reads entry for the key. +// It returns an ErrEntryNotFound when +// no entry exists for the given key. +func (c *BigCache) Get(key string) ([]byte, error) { + hashedKey := c.hash.Sum64(key) + shard := c.getShard(hashedKey) + return shard.get(key, hashedKey) +} + +// Set saves entry under the key +func (c *BigCache) Set(key string, entry []byte) error { + hashedKey := c.hash.Sum64(key) + shard := c.getShard(hashedKey) + return shard.set(key, hashedKey, entry) +} + +// Delete removes the key +func (c *BigCache) Delete(key string) error { + hashedKey := c.hash.Sum64(key) + shard := c.getShard(hashedKey) + return shard.del(key, hashedKey) +} + +// Reset empties all cache shards +func (c *BigCache) Reset() error { + for _, shard := range c.shards { + shard.reset(c.config) + } + return nil +} + +// Len computes number of entries in cache +func (c *BigCache) Len() int { + var len int + for _, shard := range c.shards { + len += shard.len() + } + return len +} + +// Capacity returns amount of bytes store in the cache. +func (c *BigCache) Capacity() int { + var len int + for _, shard := range c.shards { + len += shard.capacity() + } + return len +} + +// Stats returns cache's statistics +func (c *BigCache) Stats() Stats { + var s Stats + for _, shard := range c.shards { + tmp := shard.getStats() + s.Hits += tmp.Hits + s.Misses += tmp.Misses + s.DelHits += tmp.DelHits + s.DelMisses += tmp.DelMisses + s.Collisions += tmp.Collisions + } + return s +} + +// Iterator returns iterator function to iterate over EntryInfo's from whole cache. +func (c *BigCache) Iterator() *EntryInfoIterator { + return newIterator(c) +} + +func (c *BigCache) onEvict(oldestEntry []byte, currentTimestamp uint64, evict func(reason RemoveReason) error) bool { + oldestTimestamp := readTimestampFromEntry(oldestEntry) + if currentTimestamp-oldestTimestamp > c.lifeWindow { + evict(Expired) + return true + } + return false +} + +func (c *BigCache) cleanUp(currentTimestamp uint64) { + for _, shard := range c.shards { + shard.cleanUp(currentTimestamp) + } +} + +func (c *BigCache) getShard(hashedKey uint64) (shard *cacheShard) { + return c.shards[hashedKey&c.shardMask] +} + +func (c *BigCache) providedOnRemove(wrappedEntry []byte, reason RemoveReason) { + c.config.OnRemove(readKeyFromEntry(wrappedEntry), readEntry(wrappedEntry)) +} + +func (c *BigCache) providedOnRemoveWithReason(wrappedEntry []byte, reason RemoveReason) { + if c.config.onRemoveFilter == 0 || (1< 0 { + c.config.OnRemoveWithReason(readKeyFromEntry(wrappedEntry), readEntry(wrappedEntry), reason) + } +} + +func (c *BigCache) notProvidedOnRemove(wrappedEntry []byte, reason RemoveReason) { +} diff --git a/vendor/github.com/allegro/bigcache/bytes.go b/vendor/github.com/allegro/bigcache/bytes.go new file mode 100644 index 000000000..3944bfe13 --- /dev/null +++ b/vendor/github.com/allegro/bigcache/bytes.go @@ -0,0 +1,14 @@ +// +build !appengine + +package bigcache + +import ( + "reflect" + "unsafe" +) + +func bytesToString(b []byte) string { + bytesHeader := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + strHeader := reflect.StringHeader{Data: bytesHeader.Data, Len: bytesHeader.Len} + return *(*string)(unsafe.Pointer(&strHeader)) +} diff --git a/vendor/github.com/allegro/bigcache/bytes_appengine.go b/vendor/github.com/allegro/bigcache/bytes_appengine.go new file mode 100644 index 000000000..3892f3b54 --- /dev/null +++ b/vendor/github.com/allegro/bigcache/bytes_appengine.go @@ -0,0 +1,7 @@ +// +build appengine + +package bigcache + +func bytesToString(b []byte) string { + return string(b) +} diff --git a/vendor/github.com/allegro/bigcache/clock.go b/vendor/github.com/allegro/bigcache/clock.go new file mode 100644 index 000000000..f8b535e13 --- /dev/null +++ b/vendor/github.com/allegro/bigcache/clock.go @@ -0,0 +1,14 @@ +package bigcache + +import "time" + +type clock interface { + epoch() int64 +} + +type systemClock struct { +} + +func (c systemClock) epoch() int64 { + return time.Now().Unix() +} diff --git a/vendor/github.com/allegro/bigcache/config.go b/vendor/github.com/allegro/bigcache/config.go new file mode 100644 index 000000000..9654143ab --- /dev/null +++ b/vendor/github.com/allegro/bigcache/config.go @@ -0,0 +1,86 @@ +package bigcache + +import "time" + +// Config for BigCache +type Config struct { + // Number of cache shards, value must be a power of two + Shards int + // Time after which entry can be evicted + LifeWindow time.Duration + // Interval between removing expired entries (clean up). + // If set to <= 0 then no action is performed. Setting to < 1 second is counterproductive — bigcache has a one second resolution. + CleanWindow time.Duration + // Max number of entries in life window. Used only to calculate initial size for cache shards. + // When proper value is set then additional memory allocation does not occur. + MaxEntriesInWindow int + // Max size of entry in bytes. Used only to calculate initial size for cache shards. + MaxEntrySize int + // Verbose mode prints information about new memory allocation + Verbose bool + // Hasher used to map between string keys and unsigned 64bit integers, by default fnv64 hashing is used. + Hasher Hasher + // HardMaxCacheSize is a limit for cache size in MB. Cache will not allocate more memory than this limit. + // It can protect application from consuming all available memory on machine, therefore from running OOM Killer. + // Default value is 0 which means unlimited size. When the limit is higher than 0 and reached then + // the oldest entries are overridden for the new ones. + HardMaxCacheSize int + // OnRemove is a callback fired when the oldest entry is removed because of its expiration time or no space left + // for the new entry, or because delete was called. + // Default value is nil which means no callback and it prevents from unwrapping the oldest entry. + OnRemove func(key string, entry []byte) + // OnRemoveWithReason is a callback fired when the oldest entry is removed because of its expiration time or no space left + // for the new entry, or because delete was called. A constant representing the reason will be passed through. + // Default value is nil which means no callback and it prevents from unwrapping the oldest entry. + // Ignored if OnRemove is specified. + OnRemoveWithReason func(key string, entry []byte, reason RemoveReason) + + onRemoveFilter int + + // Logger is a logging interface and used in combination with `Verbose` + // Defaults to `DefaultLogger()` + Logger Logger +} + +// DefaultConfig initializes config with default values. +// When load for BigCache can be predicted in advance then it is better to use custom config. +func DefaultConfig(eviction time.Duration) Config { + return Config{ + Shards: 1024, + LifeWindow: eviction, + CleanWindow: 0, + MaxEntriesInWindow: 1000 * 10 * 60, + MaxEntrySize: 500, + Verbose: true, + Hasher: newDefaultHasher(), + HardMaxCacheSize: 0, + Logger: DefaultLogger(), + } +} + +// initialShardSize computes initial shard size +func (c Config) initialShardSize() int { + return max(c.MaxEntriesInWindow/c.Shards, minimumEntriesInShard) +} + +// maximumShardSize computes maximum shard size +func (c Config) maximumShardSize() int { + maxShardSize := 0 + + if c.HardMaxCacheSize > 0 { + maxShardSize = convertMBToBytes(c.HardMaxCacheSize) / c.Shards + } + + return maxShardSize +} + +// OnRemoveFilterSet sets which remove reasons will trigger a call to OnRemoveWithReason. +// Filtering out reasons prevents bigcache from unwrapping them, which saves cpu. +func (c Config) OnRemoveFilterSet(reasons ...RemoveReason) Config { + c.onRemoveFilter = 0 + for i := range reasons { + c.onRemoveFilter |= 1 << uint(reasons[i]) + } + + return c +} diff --git a/vendor/github.com/allegro/bigcache/encoding.go b/vendor/github.com/allegro/bigcache/encoding.go new file mode 100644 index 000000000..4d434e5dc --- /dev/null +++ b/vendor/github.com/allegro/bigcache/encoding.go @@ -0,0 +1,62 @@ +package bigcache + +import ( + "encoding/binary" +) + +const ( + timestampSizeInBytes = 8 // Number of bytes used for timestamp + hashSizeInBytes = 8 // Number of bytes used for hash + keySizeInBytes = 2 // Number of bytes used for size of entry key + headersSizeInBytes = timestampSizeInBytes + hashSizeInBytes + keySizeInBytes // Number of bytes used for all headers +) + +func wrapEntry(timestamp uint64, hash uint64, key string, entry []byte, buffer *[]byte) []byte { + keyLength := len(key) + blobLength := len(entry) + headersSizeInBytes + keyLength + + if blobLength > len(*buffer) { + *buffer = make([]byte, blobLength) + } + blob := *buffer + + binary.LittleEndian.PutUint64(blob, timestamp) + binary.LittleEndian.PutUint64(blob[timestampSizeInBytes:], hash) + binary.LittleEndian.PutUint16(blob[timestampSizeInBytes+hashSizeInBytes:], uint16(keyLength)) + copy(blob[headersSizeInBytes:], key) + copy(blob[headersSizeInBytes+keyLength:], entry) + + return blob[:blobLength] +} + +func readEntry(data []byte) []byte { + length := binary.LittleEndian.Uint16(data[timestampSizeInBytes+hashSizeInBytes:]) + + // copy on read + dst := make([]byte, len(data)-int(headersSizeInBytes+length)) + copy(dst, data[headersSizeInBytes+length:]) + + return dst +} + +func readTimestampFromEntry(data []byte) uint64 { + return binary.LittleEndian.Uint64(data) +} + +func readKeyFromEntry(data []byte) string { + length := binary.LittleEndian.Uint16(data[timestampSizeInBytes+hashSizeInBytes:]) + + // copy on read + dst := make([]byte, length) + copy(dst, data[headersSizeInBytes:headersSizeInBytes+length]) + + return bytesToString(dst) +} + +func readHashFromEntry(data []byte) uint64 { + return binary.LittleEndian.Uint64(data[timestampSizeInBytes:]) +} + +func resetKeyFromEntry(data []byte) { + binary.LittleEndian.PutUint64(data[timestampSizeInBytes:], 0) +} diff --git a/vendor/github.com/allegro/bigcache/entry_not_found_error.go b/vendor/github.com/allegro/bigcache/entry_not_found_error.go new file mode 100644 index 000000000..051a71230 --- /dev/null +++ b/vendor/github.com/allegro/bigcache/entry_not_found_error.go @@ -0,0 +1,6 @@ +package bigcache + +import "errors" + +// ErrEntryNotFound is an error type struct which is returned when entry was not found for provided key +var ErrEntryNotFound = errors.New("Entry not found") diff --git a/vendor/github.com/allegro/bigcache/fnv.go b/vendor/github.com/allegro/bigcache/fnv.go new file mode 100644 index 000000000..188c9aa6d --- /dev/null +++ b/vendor/github.com/allegro/bigcache/fnv.go @@ -0,0 +1,28 @@ +package bigcache + +// newDefaultHasher returns a new 64-bit FNV-1a Hasher which makes no memory allocations. +// Its Sum64 method will lay the value out in big-endian byte order. +// See https://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function +func newDefaultHasher() Hasher { + return fnv64a{} +} + +type fnv64a struct{} + +const ( + // offset64 FNVa offset basis. See https://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function#FNV-1a_hash + offset64 = 14695981039346656037 + // prime64 FNVa prime value. See https://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function#FNV-1a_hash + prime64 = 1099511628211 +) + +// Sum64 gets the string and returns its uint64 hash value. +func (f fnv64a) Sum64(key string) uint64 { + var hash uint64 = offset64 + for i := 0; i < len(key); i++ { + hash ^= uint64(key[i]) + hash *= prime64 + } + + return hash +} diff --git a/vendor/github.com/allegro/bigcache/hash.go b/vendor/github.com/allegro/bigcache/hash.go new file mode 100644 index 000000000..5f8ade774 --- /dev/null +++ b/vendor/github.com/allegro/bigcache/hash.go @@ -0,0 +1,8 @@ +package bigcache + +// Hasher is responsible for generating unsigned, 64 bit hash of provided string. Hasher should minimize collisions +// (generating same hash for different strings) and while performance is also important fast functions are preferable (i.e. +// you can use FarmHash family). +type Hasher interface { + Sum64(string) uint64 +} diff --git a/vendor/github.com/allegro/bigcache/iterator.go b/vendor/github.com/allegro/bigcache/iterator.go new file mode 100644 index 000000000..70b98d900 --- /dev/null +++ b/vendor/github.com/allegro/bigcache/iterator.go @@ -0,0 +1,122 @@ +package bigcache + +import "sync" + +type iteratorError string + +func (e iteratorError) Error() string { + return string(e) +} + +// ErrInvalidIteratorState is reported when iterator is in invalid state +const ErrInvalidIteratorState = iteratorError("Iterator is in invalid state. Use SetNext() to move to next position") + +// ErrCannotRetrieveEntry is reported when entry cannot be retrieved from underlying +const ErrCannotRetrieveEntry = iteratorError("Could not retrieve entry from cache") + +var emptyEntryInfo = EntryInfo{} + +// EntryInfo holds informations about entry in the cache +type EntryInfo struct { + timestamp uint64 + hash uint64 + key string + value []byte +} + +// Key returns entry's underlying key +func (e EntryInfo) Key() string { + return e.key +} + +// Hash returns entry's hash value +func (e EntryInfo) Hash() uint64 { + return e.hash +} + +// Timestamp returns entry's timestamp (time of insertion) +func (e EntryInfo) Timestamp() uint64 { + return e.timestamp +} + +// Value returns entry's underlying value +func (e EntryInfo) Value() []byte { + return e.value +} + +// EntryInfoIterator allows to iterate over entries in the cache +type EntryInfoIterator struct { + mutex sync.Mutex + cache *BigCache + currentShard int + currentIndex int + elements []uint32 + elementsCount int + valid bool +} + +// SetNext moves to next element and returns true if it exists. +func (it *EntryInfoIterator) SetNext() bool { + it.mutex.Lock() + + it.valid = false + it.currentIndex++ + + if it.elementsCount > it.currentIndex { + it.valid = true + it.mutex.Unlock() + return true + } + + for i := it.currentShard + 1; i < it.cache.config.Shards; i++ { + it.elements, it.elementsCount = it.cache.shards[i].copyKeys() + + // Non empty shard - stick with it + if it.elementsCount > 0 { + it.currentIndex = 0 + it.currentShard = i + it.valid = true + it.mutex.Unlock() + return true + } + } + it.mutex.Unlock() + return false +} + +func newIterator(cache *BigCache) *EntryInfoIterator { + elements, count := cache.shards[0].copyKeys() + + return &EntryInfoIterator{ + cache: cache, + currentShard: 0, + currentIndex: -1, + elements: elements, + elementsCount: count, + } +} + +// Value returns current value from the iterator +func (it *EntryInfoIterator) Value() (EntryInfo, error) { + it.mutex.Lock() + + if !it.valid { + it.mutex.Unlock() + return emptyEntryInfo, ErrInvalidIteratorState + } + + entry, err := it.cache.shards[it.currentShard].getEntry(int(it.elements[it.currentIndex])) + + if err != nil { + it.mutex.Unlock() + return emptyEntryInfo, ErrCannotRetrieveEntry + } + it.mutex.Unlock() + + return EntryInfo{ + timestamp: readTimestampFromEntry(entry), + hash: readHashFromEntry(entry), + key: readKeyFromEntry(entry), + value: readEntry(entry), + }, nil +} diff --git a/vendor/github.com/allegro/bigcache/logger.go b/vendor/github.com/allegro/bigcache/logger.go new file mode 100644 index 000000000..50e84abc8 --- /dev/null +++ b/vendor/github.com/allegro/bigcache/logger.go @@ -0,0 +1,30 @@ +package bigcache + +import ( + "log" + "os" +) + +// Logger is invoked when `Config.Verbose=true` +type Logger interface { + Printf(format string, v ...interface{}) +} + +// this is a safeguard, breaking on compile time in case +// `log.Logger` does not adhere to our `Logger` interface. +// see https://golang.org/doc/faq#guarantee_satisfies_interface +var _ Logger = &log.Logger{} + +// DefaultLogger returns a `Logger` implementation +// backed by stdlib's log +func DefaultLogger() *log.Logger { + return log.New(os.Stdout, "", log.LstdFlags) +} + +func newLogger(custom Logger) Logger { + if custom != nil { + return custom + } + + return DefaultLogger() +} diff --git a/vendor/github.com/allegro/bigcache/queue/bytes_queue.go b/vendor/github.com/allegro/bigcache/queue/bytes_queue.go new file mode 100644 index 000000000..bda737403 --- /dev/null +++ b/vendor/github.com/allegro/bigcache/queue/bytes_queue.go @@ -0,0 +1,238 @@ +package queue + +import ( + "encoding/binary" + "log" + "time" +) + +const ( + // Number of bytes used to keep information about entry size + headerEntrySize = 4 + // Bytes before left margin are not used. Zero index means element does not exist in queue, useful while reading slice from index + leftMarginIndex = 1 + // Minimum empty blob size in bytes. Empty blob fills space between tail and head in additional memory allocation. + // It keeps entries indexes unchanged + minimumEmptyBlobSize = 32 + headerEntrySize +) + +var ( + errEmptyQueue = &queueError{"Empty queue"} + errInvalidIndex = &queueError{"Index must be greater than zero. Invalid index."} + errIndexOutOfBounds = &queueError{"Index out of range"} +) + +// BytesQueue is a non-thread safe queue type of fifo based on bytes array. +// For every push operation index of entry is returned. It can be used to read the entry later +type BytesQueue struct { + array []byte + capacity int + maxCapacity int + head int + tail int + count int + rightMargin int + headerBuffer []byte + verbose bool + initialCapacity int +} + +type queueError struct { + message string +} + +// NewBytesQueue initialize new bytes queue. +// Initial capacity is used in bytes array allocation +// When verbose flag is set then information about memory allocation are printed +func NewBytesQueue(initialCapacity int, maxCapacity int, verbose bool) *BytesQueue { + return &BytesQueue{ + array: make([]byte, initialCapacity), + capacity: initialCapacity, + maxCapacity: maxCapacity, + headerBuffer: make([]byte, headerEntrySize), + tail: leftMarginIndex, + head: leftMarginIndex, + rightMargin: leftMarginIndex, + verbose: verbose, + initialCapacity: initialCapacity, + } +} + +// Reset removes all entries from queue +func (q *BytesQueue) Reset() { + // Just reset indexes + q.tail = leftMarginIndex + q.head = leftMarginIndex + q.rightMargin = leftMarginIndex + q.count = 0 +} + +// Push copies entry at the end of queue and moves tail pointer. Allocates more space if needed. +// Returns index for pushed data or error if maximum size queue limit is reached. +func (q *BytesQueue) Push(data []byte) (int, error) { + dataLen := len(data) + + if q.availableSpaceAfterTail() < dataLen+headerEntrySize { + if q.availableSpaceBeforeHead() >= dataLen+headerEntrySize { + q.tail = leftMarginIndex + } else if q.capacity+headerEntrySize+dataLen >= q.maxCapacity && q.maxCapacity > 0 { + return -1, &queueError{"Full queue. Maximum size limit reached."} + } else { + q.allocateAdditionalMemory(dataLen + headerEntrySize) + } + } + + index := q.tail + + q.push(data, dataLen) + + return index, nil +} + +func (q *BytesQueue) allocateAdditionalMemory(minimum int) { + start := time.Now() + if q.capacity < minimum { + q.capacity += minimum + } + q.capacity = q.capacity * 2 + if q.capacity > q.maxCapacity && q.maxCapacity > 0 { + q.capacity = q.maxCapacity + } + + oldArray := q.array + q.array = make([]byte, q.capacity) + + if leftMarginIndex != q.rightMargin { + copy(q.array, oldArray[:q.rightMargin]) + + if q.tail < q.head { + emptyBlobLen := q.head - q.tail - headerEntrySize + q.push(make([]byte, emptyBlobLen), emptyBlobLen) + q.head = leftMarginIndex + q.tail = q.rightMargin + } + } + + if q.verbose { + log.Printf("Allocated new queue in %s; Capacity: %d \n", time.Since(start), q.capacity) + } +} + +func (q *BytesQueue) push(data []byte, len int) { + binary.LittleEndian.PutUint32(q.headerBuffer, uint32(len)) + q.copy(q.headerBuffer, headerEntrySize) + + q.copy(data, len) + + if q.tail > q.head { + q.rightMargin = q.tail + } + + q.count++ +} + +func (q *BytesQueue) copy(data []byte, len int) { + q.tail += copy(q.array[q.tail:], data[:len]) +} + +// Pop reads the oldest entry from queue and moves head pointer to the next one +func (q *BytesQueue) Pop() ([]byte, error) { + data, size, err := q.peek(q.head) + if err != nil { + return nil, err + } + + q.head += headerEntrySize + size + q.count-- + + if q.head == q.rightMargin { + q.head = leftMarginIndex + if q.tail == q.rightMargin { + q.tail = leftMarginIndex + } + q.rightMargin = q.tail + } + + return data, nil +} + +// Peek reads the oldest entry from list without moving head pointer +func (q *BytesQueue) Peek() ([]byte, error) { + data, _, err := q.peek(q.head) + return data, err +} + +// Get reads entry from index +func (q *BytesQueue) Get(index int) ([]byte, error) { + data, _, err := q.peek(index) + return data, err +} + +// CheckGet checks if an entry can be read from index +func (q *BytesQueue) CheckGet(index int) error { + return q.peekCheckErr(index) +} + +// Capacity returns number of allocated bytes for queue +func (q *BytesQueue) Capacity() int { + return q.capacity +} + +// Len returns number of entries kept in queue +func (q *BytesQueue) Len() int { + return q.count +} + +// Error returns error message +func (e *queueError) Error() string { + return e.message +} + +// peekCheckErr is identical to peek, but does not actually return any data +func (q *BytesQueue) peekCheckErr(index int) error { + + if q.count == 0 { + return errEmptyQueue + } + + if index <= 0 { + return errInvalidIndex + } + + if index+headerEntrySize >= len(q.array) { + return errIndexOutOfBounds + } + return nil +} + +func (q *BytesQueue) peek(index int) ([]byte, int, error) { + + if q.count == 0 { + return nil, 0, errEmptyQueue + } + + if index <= 0 { + return nil, 0, errInvalidIndex + } + + if index+headerEntrySize >= len(q.array) { + return nil, 0, errIndexOutOfBounds + } + + blockSize := int(binary.LittleEndian.Uint32(q.array[index : index+headerEntrySize])) + return q.array[index+headerEntrySize : index+headerEntrySize+blockSize], blockSize, nil +} + +func (q *BytesQueue) availableSpaceAfterTail() int { + if q.tail >= q.head { + return q.capacity - q.tail + } + return q.head - q.tail - minimumEmptyBlobSize +} + +func (q *BytesQueue) availableSpaceBeforeHead() int { + if q.tail >= q.head { + return q.head - leftMarginIndex - minimumEmptyBlobSize + } + return q.head - q.tail - minimumEmptyBlobSize +} diff --git a/vendor/github.com/allegro/bigcache/shard.go b/vendor/github.com/allegro/bigcache/shard.go new file mode 100644 index 000000000..a31094ff3 --- /dev/null +++ b/vendor/github.com/allegro/bigcache/shard.go @@ -0,0 +1,259 @@ +package bigcache + +import ( + "fmt" + "sync" + "sync/atomic" + + "github.com/allegro/bigcache/queue" +) + +type onRemoveCallback func(wrappedEntry []byte, reason RemoveReason) + +type cacheShard struct { + hashmap map[uint64]uint32 + entries queue.BytesQueue + lock sync.RWMutex + entryBuffer []byte + onRemove onRemoveCallback + + isVerbose bool + logger Logger + clock clock + lifeWindow uint64 + + stats Stats +} + +func (s *cacheShard) get(key string, hashedKey uint64) ([]byte, error) { + s.lock.RLock() + itemIndex := s.hashmap[hashedKey] + + if itemIndex == 0 { + s.lock.RUnlock() + s.miss() + return nil, ErrEntryNotFound + } + + wrappedEntry, err := s.entries.Get(int(itemIndex)) + if err != nil { + s.lock.RUnlock() + s.miss() + return nil, err + } + if entryKey := readKeyFromEntry(wrappedEntry); key != entryKey { + if s.isVerbose { + s.logger.Printf("Collision detected. Both %q and %q have the same hash %x", key, entryKey, hashedKey) + } + s.lock.RUnlock() + s.collision() + return nil, ErrEntryNotFound + } + entry := readEntry(wrappedEntry) + s.lock.RUnlock() + s.hit() + return entry, nil +} + +func (s *cacheShard) set(key string, hashedKey uint64, entry []byte) error { + currentTimestamp := uint64(s.clock.epoch()) + + s.lock.Lock() + + if previousIndex := s.hashmap[hashedKey]; previousIndex != 0 { + if previousEntry, err := s.entries.Get(int(previousIndex)); err == nil { + resetKeyFromEntry(previousEntry) + } + } + + if oldestEntry, err := s.entries.Peek(); err == nil { + s.onEvict(oldestEntry, currentTimestamp, s.removeOldestEntry) + } + + w := wrapEntry(currentTimestamp, hashedKey, key, entry, &s.entryBuffer) + + for { + if index, err := s.entries.Push(w); err == nil { + s.hashmap[hashedKey] = uint32(index) + s.lock.Unlock() + return nil + } + if s.removeOldestEntry(NoSpace) != nil { + s.lock.Unlock() + return fmt.Errorf("entry is bigger than max shard size") + } + } +} + +func (s *cacheShard) del(key string, hashedKey uint64) error { + // Optimistic pre-check using only readlock + s.lock.RLock() + itemIndex := s.hashmap[hashedKey] + + if itemIndex == 0 { + s.lock.RUnlock() + s.delmiss() + return ErrEntryNotFound + } + + if err := s.entries.CheckGet(int(itemIndex)); err != nil { + s.lock.RUnlock() + s.delmiss() + return err + } + s.lock.RUnlock() + + s.lock.Lock() + { + // After obtaining the writelock, we need to read the same again, + // since the data delivered earlier may be stale now + itemIndex = s.hashmap[hashedKey] + + if itemIndex == 0 { + s.lock.Unlock() + s.delmiss() + return ErrEntryNotFound + } + + wrappedEntry, err := s.entries.Get(int(itemIndex)) + if err != nil { + s.lock.Unlock() + s.delmiss() + return err + } + + delete(s.hashmap, hashedKey) + s.onRemove(wrappedEntry, Deleted) + resetKeyFromEntry(wrappedEntry) + } + s.lock.Unlock() + + s.delhit() + return nil +} + +func (s *cacheShard) onEvict(oldestEntry []byte, currentTimestamp uint64, evict func(reason RemoveReason) error) bool { + oldestTimestamp := readTimestampFromEntry(oldestEntry) + if currentTimestamp-oldestTimestamp > s.lifeWindow { + evict(Expired) + return true + } + return false +} + +func (s *cacheShard) cleanUp(currentTimestamp uint64) { + s.lock.Lock() + for { + if oldestEntry, err := s.entries.Peek(); err != nil { + break + } else if evicted := s.onEvict(oldestEntry, currentTimestamp, s.removeOldestEntry); !evicted { + break + } + } + s.lock.Unlock() +} + +func (s *cacheShard) getOldestEntry() ([]byte, error) { + s.lock.RLock() + defer s.lock.RUnlock() + return s.entries.Peek() +} + +func (s *cacheShard) getEntry(index int) ([]byte, error) { + s.lock.RLock() + entry, err := s.entries.Get(index) + s.lock.RUnlock() + + return entry, err +} + +func (s *cacheShard) copyKeys() (keys []uint32, next int) { + s.lock.RLock() + keys = make([]uint32, len(s.hashmap)) + + for _, index := range s.hashmap { + keys[next] = index + next++ + } + + s.lock.RUnlock() + return keys, next +} + +func (s *cacheShard) removeOldestEntry(reason RemoveReason) error { + oldest, err := s.entries.Pop() + if err == nil { + hash := readHashFromEntry(oldest) + delete(s.hashmap, hash) + s.onRemove(oldest, reason) + return nil + } + return err +} + +func (s *cacheShard) reset(config Config) { + s.lock.Lock() + s.hashmap = make(map[uint64]uint32, config.initialShardSize()) + s.entryBuffer = make([]byte, config.MaxEntrySize+headersSizeInBytes) + s.entries.Reset() + s.lock.Unlock() +} + +func (s *cacheShard) len() int { + s.lock.RLock() + res := len(s.hashmap) + s.lock.RUnlock() + return res +} + +func (s *cacheShard) capacity() int { + s.lock.RLock() + res := s.entries.Capacity() + s.lock.RUnlock() + return res +} + +func (s *cacheShard) getStats() Stats { + var stats = Stats{ + Hits: atomic.LoadInt64(&s.stats.Hits), + Misses: atomic.LoadInt64(&s.stats.Misses), + DelHits: atomic.LoadInt64(&s.stats.DelHits), + DelMisses: atomic.LoadInt64(&s.stats.DelMisses), + Collisions: atomic.LoadInt64(&s.stats.Collisions), + } + return stats +} + +func (s *cacheShard) hit() { + atomic.AddInt64(&s.stats.Hits, 1) +} + +func (s *cacheShard) miss() { + atomic.AddInt64(&s.stats.Misses, 1) +} + +func (s *cacheShard) delhit() { + atomic.AddInt64(&s.stats.DelHits, 1) +} + +func (s *cacheShard) delmiss() { + atomic.AddInt64(&s.stats.DelMisses, 1) +} + +func (s *cacheShard) collision() { + atomic.AddInt64(&s.stats.Collisions, 1) +} + +func initNewShard(config Config, callback onRemoveCallback, clock clock) *cacheShard { + return &cacheShard{ + hashmap: make(map[uint64]uint32, config.initialShardSize()), + entries: *queue.NewBytesQueue(config.initialShardSize()*config.MaxEntrySize, config.maximumShardSize(), config.Verbose), + entryBuffer: make([]byte, config.MaxEntrySize+headersSizeInBytes), + onRemove: callback, + + isVerbose: config.Verbose, + logger: newLogger(config.Logger), + clock: clock, + lifeWindow: uint64(config.LifeWindow.Seconds()), + } +} diff --git a/vendor/github.com/allegro/bigcache/stats.go b/vendor/github.com/allegro/bigcache/stats.go new file mode 100644 index 000000000..07157132a --- /dev/null +++ b/vendor/github.com/allegro/bigcache/stats.go @@ -0,0 +1,15 @@ +package bigcache + +// Stats stores cache statistics +type Stats struct { + // Hits is a number of successfully found keys + Hits int64 `json:"hits"` + // Misses is a number of not found keys + Misses int64 `json:"misses"` + // DelHits is a number of successfully deleted keys + DelHits int64 `json:"delete_hits"` + // DelMisses is a number of not deleted keys + DelMisses int64 `json:"delete_misses"` + // Collisions is a number of happened key-collisions + Collisions int64 `json:"collisions"` +} diff --git a/vendor/github.com/allegro/bigcache/utils.go b/vendor/github.com/allegro/bigcache/utils.go new file mode 100644 index 000000000..ca1df79b9 --- /dev/null +++ b/vendor/github.com/allegro/bigcache/utils.go @@ -0,0 +1,16 @@ +package bigcache + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +func convertMBToBytes(value int) int { + return value * 1024 * 1024 +} + +func isPowerOfTwo(number int) bool { + return (number & (number - 1)) == 0 +} diff --git a/vendor/github.com/ghodss/yaml/yaml_go110.go b/vendor/github.com/ghodss/yaml/yaml_go110.go new file mode 100644 index 000000000..ab3e06a22 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/yaml_go110.go @@ -0,0 +1,14 @@ +// This file contains changes that are only compatible with go 1.10 and onwards. + +// +build go1.10 + +package yaml + +import "encoding/json" + +// DisallowUnknownFields configures the JSON decoder to error out if unknown +// fields come along, instead of dropping them by default. +func DisallowUnknownFields(d *json.Decoder) *json.Decoder { + d.DisallowUnknownFields() + return d +} diff --git a/vendor/github.com/pkg/errors/Makefile b/vendor/github.com/pkg/errors/Makefile new file mode 100644 index 000000000..ce9d7cded --- /dev/null +++ b/vendor/github.com/pkg/errors/Makefile @@ -0,0 +1,44 @@ +PKGS := github.com/pkg/errors +SRCDIRS := $(shell go list -f '{{.Dir}}' $(PKGS)) +GO := go + +check: test vet gofmt misspell unconvert staticcheck ineffassign unparam + +test: + $(GO) test $(PKGS) + +vet: | test + $(GO) vet $(PKGS) + +staticcheck: + $(GO) get honnef.co/go/tools/cmd/staticcheck + staticcheck -checks all $(PKGS) + +misspell: + $(GO) get github.com/client9/misspell/cmd/misspell + misspell \ + -locale GB \ + -error \ + *.md *.go + +unconvert: + $(GO) get github.com/mdempsky/unconvert + unconvert -v $(PKGS) + +ineffassign: + $(GO) get github.com/gordonklaus/ineffassign + find $(SRCDIRS) -name '*.go' | xargs ineffassign + +pedantic: check errcheck + +unparam: + $(GO) get mvdan.cc/unparam + unparam ./... + +errcheck: + $(GO) get github.com/kisielk/errcheck + errcheck $(PKGS) + +gofmt: + @echo Checking code is gofmted + @test -z "$(shell gofmt -s -l -d -e $(SRCDIRS) | tee /dev/stderr)" diff --git a/vendor/github.com/pkg/errors/go113.go b/vendor/github.com/pkg/errors/go113.go new file mode 100644 index 000000000..be0d10d0c --- /dev/null +++ b/vendor/github.com/pkg/errors/go113.go @@ -0,0 +1,38 @@ +// +build go1.13 + +package errors + +import ( + stderrors "errors" +) + +// Is reports whether any error in err's chain matches target. +// +// The chain consists of err itself followed by the sequence of errors obtained by +// repeatedly calling Unwrap. +// +// An error is considered to match a target if it is equal to that target or if +// it implements a method Is(error) bool such that Is(target) returns true. +func Is(err, target error) bool { return stderrors.Is(err, target) } + +// As finds the first error in err's chain that matches target, and if so, sets +// target to that error value and returns true. +// +// The chain consists of err itself followed by the sequence of errors obtained by +// repeatedly calling Unwrap. +// +// An error matches target if the error's concrete value is assignable to the value +// pointed to by target, or if the error has a method As(interface{}) bool such that +// As(target) returns true. In the latter case, the As method is responsible for +// setting target. +// +// As will panic if target is not a non-nil pointer to either a type that implements +// error, or to any interface type. As returns false if err is nil. +func As(err error, target interface{}) bool { return stderrors.As(err, target) } + +// Unwrap returns the result of calling the Unwrap method on err, if err's +// type contains an Unwrap method returning error. +// Otherwise, Unwrap returns nil. +func Unwrap(err error) error { + return stderrors.Unwrap(err) +} diff --git a/vendor/github.com/spf13/pflag/float32_slice.go b/vendor/github.com/spf13/pflag/float32_slice.go new file mode 100644 index 000000000..caa352741 --- /dev/null +++ b/vendor/github.com/spf13/pflag/float32_slice.go @@ -0,0 +1,174 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" +) + +// -- float32Slice Value +type float32SliceValue struct { + value *[]float32 + changed bool +} + +func newFloat32SliceValue(val []float32, p *[]float32) *float32SliceValue { + isv := new(float32SliceValue) + isv.value = p + *isv.value = val + return isv +} + +func (s *float32SliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]float32, len(ss)) + for i, d := range ss { + var err error + var temp64 float64 + temp64, err = strconv.ParseFloat(d, 32) + if err != nil { + return err + } + out[i] = float32(temp64) + + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *float32SliceValue) Type() string { + return "float32Slice" +} + +func (s *float32SliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%f", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func (s *float32SliceValue) fromString(val string) (float32, error) { + t64, err := strconv.ParseFloat(val, 32) + if err != nil { + return 0, err + } + return float32(t64), nil +} + +func (s *float32SliceValue) toString(val float32) string { + return fmt.Sprintf("%f", val) +} + +func (s *float32SliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *float32SliceValue) Replace(val []string) error { + out := make([]float32, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *float32SliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + +func float32SliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []float32{}, nil + } + ss := strings.Split(val, ",") + out := make([]float32, len(ss)) + for i, d := range ss { + var err error + var temp64 float64 + temp64, err = strconv.ParseFloat(d, 32) + if err != nil { + return nil, err + } + out[i] = float32(temp64) + + } + return out, nil +} + +// GetFloat32Slice return the []float32 value of a flag with the given name +func (f *FlagSet) GetFloat32Slice(name string) ([]float32, error) { + val, err := f.getFlagType(name, "float32Slice", float32SliceConv) + if err != nil { + return []float32{}, err + } + return val.([]float32), nil +} + +// Float32SliceVar defines a float32Slice flag with specified name, default value, and usage string. +// The argument p points to a []float32 variable in which to store the value of the flag. +func (f *FlagSet) Float32SliceVar(p *[]float32, name string, value []float32, usage string) { + f.VarP(newFloat32SliceValue(value, p), name, "", usage) +} + +// Float32SliceVarP is like Float32SliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float32SliceVarP(p *[]float32, name, shorthand string, value []float32, usage string) { + f.VarP(newFloat32SliceValue(value, p), name, shorthand, usage) +} + +// Float32SliceVar defines a float32[] flag with specified name, default value, and usage string. +// The argument p points to a float32[] variable in which to store the value of the flag. +func Float32SliceVar(p *[]float32, name string, value []float32, usage string) { + CommandLine.VarP(newFloat32SliceValue(value, p), name, "", usage) +} + +// Float32SliceVarP is like Float32SliceVar, but accepts a shorthand letter that can be used after a single dash. +func Float32SliceVarP(p *[]float32, name, shorthand string, value []float32, usage string) { + CommandLine.VarP(newFloat32SliceValue(value, p), name, shorthand, usage) +} + +// Float32Slice defines a []float32 flag with specified name, default value, and usage string. +// The return value is the address of a []float32 variable that stores the value of the flag. +func (f *FlagSet) Float32Slice(name string, value []float32, usage string) *[]float32 { + p := []float32{} + f.Float32SliceVarP(&p, name, "", value, usage) + return &p +} + +// Float32SliceP is like Float32Slice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float32SliceP(name, shorthand string, value []float32, usage string) *[]float32 { + p := []float32{} + f.Float32SliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// Float32Slice defines a []float32 flag with specified name, default value, and usage string. +// The return value is the address of a []float32 variable that stores the value of the flag. +func Float32Slice(name string, value []float32, usage string) *[]float32 { + return CommandLine.Float32SliceP(name, "", value, usage) +} + +// Float32SliceP is like Float32Slice, but accepts a shorthand letter that can be used after a single dash. +func Float32SliceP(name, shorthand string, value []float32, usage string) *[]float32 { + return CommandLine.Float32SliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/float64_slice.go b/vendor/github.com/spf13/pflag/float64_slice.go new file mode 100644 index 000000000..85bf3073d --- /dev/null +++ b/vendor/github.com/spf13/pflag/float64_slice.go @@ -0,0 +1,166 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" +) + +// -- float64Slice Value +type float64SliceValue struct { + value *[]float64 + changed bool +} + +func newFloat64SliceValue(val []float64, p *[]float64) *float64SliceValue { + isv := new(float64SliceValue) + isv.value = p + *isv.value = val + return isv +} + +func (s *float64SliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]float64, len(ss)) + for i, d := range ss { + var err error + out[i], err = strconv.ParseFloat(d, 64) + if err != nil { + return err + } + + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *float64SliceValue) Type() string { + return "float64Slice" +} + +func (s *float64SliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%f", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func (s *float64SliceValue) fromString(val string) (float64, error) { + return strconv.ParseFloat(val, 64) +} + +func (s *float64SliceValue) toString(val float64) string { + return fmt.Sprintf("%f", val) +} + +func (s *float64SliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *float64SliceValue) Replace(val []string) error { + out := make([]float64, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *float64SliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + +func float64SliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []float64{}, nil + } + ss := strings.Split(val, ",") + out := make([]float64, len(ss)) + for i, d := range ss { + var err error + out[i], err = strconv.ParseFloat(d, 64) + if err != nil { + return nil, err + } + + } + return out, nil +} + +// GetFloat64Slice return the []float64 value of a flag with the given name +func (f *FlagSet) GetFloat64Slice(name string) ([]float64, error) { + val, err := f.getFlagType(name, "float64Slice", float64SliceConv) + if err != nil { + return []float64{}, err + } + return val.([]float64), nil +} + +// Float64SliceVar defines a float64Slice flag with specified name, default value, and usage string. +// The argument p points to a []float64 variable in which to store the value of the flag. +func (f *FlagSet) Float64SliceVar(p *[]float64, name string, value []float64, usage string) { + f.VarP(newFloat64SliceValue(value, p), name, "", usage) +} + +// Float64SliceVarP is like Float64SliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float64SliceVarP(p *[]float64, name, shorthand string, value []float64, usage string) { + f.VarP(newFloat64SliceValue(value, p), name, shorthand, usage) +} + +// Float64SliceVar defines a float64[] flag with specified name, default value, and usage string. +// The argument p points to a float64[] variable in which to store the value of the flag. +func Float64SliceVar(p *[]float64, name string, value []float64, usage string) { + CommandLine.VarP(newFloat64SliceValue(value, p), name, "", usage) +} + +// Float64SliceVarP is like Float64SliceVar, but accepts a shorthand letter that can be used after a single dash. +func Float64SliceVarP(p *[]float64, name, shorthand string, value []float64, usage string) { + CommandLine.VarP(newFloat64SliceValue(value, p), name, shorthand, usage) +} + +// Float64Slice defines a []float64 flag with specified name, default value, and usage string. +// The return value is the address of a []float64 variable that stores the value of the flag. +func (f *FlagSet) Float64Slice(name string, value []float64, usage string) *[]float64 { + p := []float64{} + f.Float64SliceVarP(&p, name, "", value, usage) + return &p +} + +// Float64SliceP is like Float64Slice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float64SliceP(name, shorthand string, value []float64, usage string) *[]float64 { + p := []float64{} + f.Float64SliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// Float64Slice defines a []float64 flag with specified name, default value, and usage string. +// The return value is the address of a []float64 variable that stores the value of the flag. +func Float64Slice(name string, value []float64, usage string) *[]float64 { + return CommandLine.Float64SliceP(name, "", value, usage) +} + +// Float64SliceP is like Float64Slice, but accepts a shorthand letter that can be used after a single dash. +func Float64SliceP(name, shorthand string, value []float64, usage string) *[]float64 { + return CommandLine.Float64SliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/go.mod b/vendor/github.com/spf13/pflag/go.mod new file mode 100644 index 000000000..b2287eec1 --- /dev/null +++ b/vendor/github.com/spf13/pflag/go.mod @@ -0,0 +1,3 @@ +module github.com/spf13/pflag + +go 1.12 diff --git a/vendor/github.com/spf13/pflag/go.sum b/vendor/github.com/spf13/pflag/go.sum new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/spf13/pflag/int32_slice.go b/vendor/github.com/spf13/pflag/int32_slice.go new file mode 100644 index 000000000..ff128ff06 --- /dev/null +++ b/vendor/github.com/spf13/pflag/int32_slice.go @@ -0,0 +1,174 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" +) + +// -- int32Slice Value +type int32SliceValue struct { + value *[]int32 + changed bool +} + +func newInt32SliceValue(val []int32, p *[]int32) *int32SliceValue { + isv := new(int32SliceValue) + isv.value = p + *isv.value = val + return isv +} + +func (s *int32SliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]int32, len(ss)) + for i, d := range ss { + var err error + var temp64 int64 + temp64, err = strconv.ParseInt(d, 0, 32) + if err != nil { + return err + } + out[i] = int32(temp64) + + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *int32SliceValue) Type() string { + return "int32Slice" +} + +func (s *int32SliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%d", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func (s *int32SliceValue) fromString(val string) (int32, error) { + t64, err := strconv.ParseInt(val, 0, 32) + if err != nil { + return 0, err + } + return int32(t64), nil +} + +func (s *int32SliceValue) toString(val int32) string { + return fmt.Sprintf("%d", val) +} + +func (s *int32SliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *int32SliceValue) Replace(val []string) error { + out := make([]int32, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *int32SliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + +func int32SliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []int32{}, nil + } + ss := strings.Split(val, ",") + out := make([]int32, len(ss)) + for i, d := range ss { + var err error + var temp64 int64 + temp64, err = strconv.ParseInt(d, 0, 32) + if err != nil { + return nil, err + } + out[i] = int32(temp64) + + } + return out, nil +} + +// GetInt32Slice return the []int32 value of a flag with the given name +func (f *FlagSet) GetInt32Slice(name string) ([]int32, error) { + val, err := f.getFlagType(name, "int32Slice", int32SliceConv) + if err != nil { + return []int32{}, err + } + return val.([]int32), nil +} + +// Int32SliceVar defines a int32Slice flag with specified name, default value, and usage string. +// The argument p points to a []int32 variable in which to store the value of the flag. +func (f *FlagSet) Int32SliceVar(p *[]int32, name string, value []int32, usage string) { + f.VarP(newInt32SliceValue(value, p), name, "", usage) +} + +// Int32SliceVarP is like Int32SliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int32SliceVarP(p *[]int32, name, shorthand string, value []int32, usage string) { + f.VarP(newInt32SliceValue(value, p), name, shorthand, usage) +} + +// Int32SliceVar defines a int32[] flag with specified name, default value, and usage string. +// The argument p points to a int32[] variable in which to store the value of the flag. +func Int32SliceVar(p *[]int32, name string, value []int32, usage string) { + CommandLine.VarP(newInt32SliceValue(value, p), name, "", usage) +} + +// Int32SliceVarP is like Int32SliceVar, but accepts a shorthand letter that can be used after a single dash. +func Int32SliceVarP(p *[]int32, name, shorthand string, value []int32, usage string) { + CommandLine.VarP(newInt32SliceValue(value, p), name, shorthand, usage) +} + +// Int32Slice defines a []int32 flag with specified name, default value, and usage string. +// The return value is the address of a []int32 variable that stores the value of the flag. +func (f *FlagSet) Int32Slice(name string, value []int32, usage string) *[]int32 { + p := []int32{} + f.Int32SliceVarP(&p, name, "", value, usage) + return &p +} + +// Int32SliceP is like Int32Slice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int32SliceP(name, shorthand string, value []int32, usage string) *[]int32 { + p := []int32{} + f.Int32SliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// Int32Slice defines a []int32 flag with specified name, default value, and usage string. +// The return value is the address of a []int32 variable that stores the value of the flag. +func Int32Slice(name string, value []int32, usage string) *[]int32 { + return CommandLine.Int32SliceP(name, "", value, usage) +} + +// Int32SliceP is like Int32Slice, but accepts a shorthand letter that can be used after a single dash. +func Int32SliceP(name, shorthand string, value []int32, usage string) *[]int32 { + return CommandLine.Int32SliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/int64_slice.go b/vendor/github.com/spf13/pflag/int64_slice.go new file mode 100644 index 000000000..25464638f --- /dev/null +++ b/vendor/github.com/spf13/pflag/int64_slice.go @@ -0,0 +1,166 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" +) + +// -- int64Slice Value +type int64SliceValue struct { + value *[]int64 + changed bool +} + +func newInt64SliceValue(val []int64, p *[]int64) *int64SliceValue { + isv := new(int64SliceValue) + isv.value = p + *isv.value = val + return isv +} + +func (s *int64SliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]int64, len(ss)) + for i, d := range ss { + var err error + out[i], err = strconv.ParseInt(d, 0, 64) + if err != nil { + return err + } + + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *int64SliceValue) Type() string { + return "int64Slice" +} + +func (s *int64SliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%d", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func (s *int64SliceValue) fromString(val string) (int64, error) { + return strconv.ParseInt(val, 0, 64) +} + +func (s *int64SliceValue) toString(val int64) string { + return fmt.Sprintf("%d", val) +} + +func (s *int64SliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *int64SliceValue) Replace(val []string) error { + out := make([]int64, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *int64SliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + +func int64SliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []int64{}, nil + } + ss := strings.Split(val, ",") + out := make([]int64, len(ss)) + for i, d := range ss { + var err error + out[i], err = strconv.ParseInt(d, 0, 64) + if err != nil { + return nil, err + } + + } + return out, nil +} + +// GetInt64Slice return the []int64 value of a flag with the given name +func (f *FlagSet) GetInt64Slice(name string) ([]int64, error) { + val, err := f.getFlagType(name, "int64Slice", int64SliceConv) + if err != nil { + return []int64{}, err + } + return val.([]int64), nil +} + +// Int64SliceVar defines a int64Slice flag with specified name, default value, and usage string. +// The argument p points to a []int64 variable in which to store the value of the flag. +func (f *FlagSet) Int64SliceVar(p *[]int64, name string, value []int64, usage string) { + f.VarP(newInt64SliceValue(value, p), name, "", usage) +} + +// Int64SliceVarP is like Int64SliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int64SliceVarP(p *[]int64, name, shorthand string, value []int64, usage string) { + f.VarP(newInt64SliceValue(value, p), name, shorthand, usage) +} + +// Int64SliceVar defines a int64[] flag with specified name, default value, and usage string. +// The argument p points to a int64[] variable in which to store the value of the flag. +func Int64SliceVar(p *[]int64, name string, value []int64, usage string) { + CommandLine.VarP(newInt64SliceValue(value, p), name, "", usage) +} + +// Int64SliceVarP is like Int64SliceVar, but accepts a shorthand letter that can be used after a single dash. +func Int64SliceVarP(p *[]int64, name, shorthand string, value []int64, usage string) { + CommandLine.VarP(newInt64SliceValue(value, p), name, shorthand, usage) +} + +// Int64Slice defines a []int64 flag with specified name, default value, and usage string. +// The return value is the address of a []int64 variable that stores the value of the flag. +func (f *FlagSet) Int64Slice(name string, value []int64, usage string) *[]int64 { + p := []int64{} + f.Int64SliceVarP(&p, name, "", value, usage) + return &p +} + +// Int64SliceP is like Int64Slice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int64SliceP(name, shorthand string, value []int64, usage string) *[]int64 { + p := []int64{} + f.Int64SliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// Int64Slice defines a []int64 flag with specified name, default value, and usage string. +// The return value is the address of a []int64 variable that stores the value of the flag. +func Int64Slice(name string, value []int64, usage string) *[]int64 { + return CommandLine.Int64SliceP(name, "", value, usage) +} + +// Int64SliceP is like Int64Slice, but accepts a shorthand letter that can be used after a single dash. +func Int64SliceP(name, shorthand string, value []int64, usage string) *[]int64 { + return CommandLine.Int64SliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/string_to_int64.go b/vendor/github.com/spf13/pflag/string_to_int64.go new file mode 100644 index 000000000..a807a04a0 --- /dev/null +++ b/vendor/github.com/spf13/pflag/string_to_int64.go @@ -0,0 +1,149 @@ +package pflag + +import ( + "bytes" + "fmt" + "strconv" + "strings" +) + +// -- stringToInt64 Value +type stringToInt64Value struct { + value *map[string]int64 + changed bool +} + +func newStringToInt64Value(val map[string]int64, p *map[string]int64) *stringToInt64Value { + ssv := new(stringToInt64Value) + ssv.value = p + *ssv.value = val + return ssv +} + +// Format: a=1,b=2 +func (s *stringToInt64Value) Set(val string) error { + ss := strings.Split(val, ",") + out := make(map[string]int64, len(ss)) + for _, pair := range ss { + kv := strings.SplitN(pair, "=", 2) + if len(kv) != 2 { + return fmt.Errorf("%s must be formatted as key=value", pair) + } + var err error + out[kv[0]], err = strconv.ParseInt(kv[1], 10, 64) + if err != nil { + return err + } + } + if !s.changed { + *s.value = out + } else { + for k, v := range out { + (*s.value)[k] = v + } + } + s.changed = true + return nil +} + +func (s *stringToInt64Value) Type() string { + return "stringToInt64" +} + +func (s *stringToInt64Value) String() string { + var buf bytes.Buffer + i := 0 + for k, v := range *s.value { + if i > 0 { + buf.WriteRune(',') + } + buf.WriteString(k) + buf.WriteRune('=') + buf.WriteString(strconv.FormatInt(v, 10)) + i++ + } + return "[" + buf.String() + "]" +} + +func stringToInt64Conv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // An empty string would cause an empty map + if len(val) == 0 { + return map[string]int64{}, nil + } + ss := strings.Split(val, ",") + out := make(map[string]int64, len(ss)) + for _, pair := range ss { + kv := strings.SplitN(pair, "=", 2) + if len(kv) != 2 { + return nil, fmt.Errorf("%s must be formatted as key=value", pair) + } + var err error + out[kv[0]], err = strconv.ParseInt(kv[1], 10, 64) + if err != nil { + return nil, err + } + } + return out, nil +} + +// GetStringToInt64 return the map[string]int64 value of a flag with the given name +func (f *FlagSet) GetStringToInt64(name string) (map[string]int64, error) { + val, err := f.getFlagType(name, "stringToInt64", stringToInt64Conv) + if err != nil { + return map[string]int64{}, err + } + return val.(map[string]int64), nil +} + +// StringToInt64Var defines a string flag with specified name, default value, and usage string. +// The argument p point64s to a map[string]int64 variable in which to store the values of the multiple flags. +// The value of each argument will not try to be separated by comma +func (f *FlagSet) StringToInt64Var(p *map[string]int64, name string, value map[string]int64, usage string) { + f.VarP(newStringToInt64Value(value, p), name, "", usage) +} + +// StringToInt64VarP is like StringToInt64Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringToInt64VarP(p *map[string]int64, name, shorthand string, value map[string]int64, usage string) { + f.VarP(newStringToInt64Value(value, p), name, shorthand, usage) +} + +// StringToInt64Var defines a string flag with specified name, default value, and usage string. +// The argument p point64s to a map[string]int64 variable in which to store the value of the flag. +// The value of each argument will not try to be separated by comma +func StringToInt64Var(p *map[string]int64, name string, value map[string]int64, usage string) { + CommandLine.VarP(newStringToInt64Value(value, p), name, "", usage) +} + +// StringToInt64VarP is like StringToInt64Var, but accepts a shorthand letter that can be used after a single dash. +func StringToInt64VarP(p *map[string]int64, name, shorthand string, value map[string]int64, usage string) { + CommandLine.VarP(newStringToInt64Value(value, p), name, shorthand, usage) +} + +// StringToInt64 defines a string flag with specified name, default value, and usage string. +// The return value is the address of a map[string]int64 variable that stores the value of the flag. +// The value of each argument will not try to be separated by comma +func (f *FlagSet) StringToInt64(name string, value map[string]int64, usage string) *map[string]int64 { + p := map[string]int64{} + f.StringToInt64VarP(&p, name, "", value, usage) + return &p +} + +// StringToInt64P is like StringToInt64, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringToInt64P(name, shorthand string, value map[string]int64, usage string) *map[string]int64 { + p := map[string]int64{} + f.StringToInt64VarP(&p, name, shorthand, value, usage) + return &p +} + +// StringToInt64 defines a string flag with specified name, default value, and usage string. +// The return value is the address of a map[string]int64 variable that stores the value of the flag. +// The value of each argument will not try to be separated by comma +func StringToInt64(name string, value map[string]int64, usage string) *map[string]int64 { + return CommandLine.StringToInt64P(name, "", value, usage) +} + +// StringToInt64P is like StringToInt64, but accepts a shorthand letter that can be used after a single dash. +func StringToInt64P(name, shorthand string, value map[string]int64, usage string) *map[string]int64 { + return CommandLine.StringToInt64P(name, shorthand, value, usage) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 42545057a..722b30ffb 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -229,7 +229,7 @@ github.com/onsi/gomega/matchers/support/goraph/util github.com/onsi/gomega/types # github.com/peterbourgon/diskv v2.0.1+incompatible github.com/peterbourgon/diskv -# github.com/pkg/errors v0.8.1 +# github.com/pkg/errors v0.9.1 github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.0 github.com/pmezard/go-difflib/difflib @@ -263,9 +263,9 @@ github.com/spf13/afero/mem # github.com/spf13/cobra v0.0.5 github.com/spf13/cobra github.com/spf13/cobra/doc -# github.com/spf13/pflag v1.0.3 +# github.com/spf13/pflag v1.0.5 github.com/spf13/pflag -# github.com/stretchr/testify v1.4.0 +# github.com/stretchr/testify v1.5.1 github.com/stretchr/testify/assert # github.com/xanzy/go-gitlab v0.22.3 github.com/xanzy/go-gitlab @@ -464,7 +464,7 @@ gopkg.in/square/go-jose.v2/json gopkg.in/tomb.v1 # gopkg.in/yaml.v2 v2.2.4 gopkg.in/yaml.v2 -# k8s.io/api v0.0.0-20191114100352-16d7abae0d2a => k8s.io/api v0.0.0-20191114100352-16d7abae0d2a +# k8s.io/api v0.18.2 => k8s.io/api v0.0.0-20191114100352-16d7abae0d2a k8s.io/api/admissionregistration/v1 k8s.io/api/admissionregistration/v1beta1 k8s.io/api/apps/v1 @@ -503,7 +503,7 @@ k8s.io/api/settings/v1alpha1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apimachinery v0.16.5-beta.1 => github.com/kmodules/apimachinery v0.0.0-20191119091232-0553326db082 +# k8s.io/apimachinery v0.18.2 => github.com/kmodules/apimachinery v0.0.0-20191119091232-0553326db082 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/meta