From 88e4f5526dc33c5ff57fe6d8e1256c7c95650b49 Mon Sep 17 00:00:00 2001 From: Viktor Kramarenko Date: Tue, 24 Sep 2024 17:33:27 +0300 Subject: [PATCH 1/4] first draft Signed-off-by: Viktor Kramarenko --- .../src/pkg/cache/cache.go | 60 ++++++++++++++++--- .../src/pkg/controller/lvg_watcher_cache.go | 9 +-- 2 files changed, 54 insertions(+), 15 deletions(-) diff --git a/images/sds-local-volume-scheduler-extender/src/pkg/cache/cache.go b/images/sds-local-volume-scheduler-extender/src/pkg/cache/cache.go index 16954fdb..60f169dc 100644 --- a/images/sds-local-volume-scheduler-extender/src/pkg/cache/cache.go +++ b/images/sds-local-volume-scheduler-extender/src/pkg/cache/cache.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "sync" + "time" snc "github.com/deckhouse/sds-node-configurator/api/v1alpha1" v1 "k8s.io/api/core/v1" @@ -21,10 +22,11 @@ const ( ) type Cache struct { - lvgs sync.Map // map[string]*lvgCache - pvcLVGs sync.Map // map[string][]string - nodeLVGs sync.Map // map[string][]string - log logger.Logger + lvgs sync.Map // map[string]*lvgCache + pvcLVGs sync.Map // map[string][]string + nodeLVGs sync.Map // map[string][]string + log logger.Logger + expiredDuration time.Duration } type lvgCache struct { @@ -44,17 +46,57 @@ type pvcCache struct { // NewCache initialize new cache. func NewCache(logger logger.Logger) *Cache { - return &Cache{ - log: logger, + ch := &Cache{ + log: logger, + expiredDuration: 30 * time.Second, } + + go func() { + timer := time.NewTimer(ch.expiredDuration) + + for { + select { + case <-timer.C: + ch.clearBoundExpiredPVC() + timer.Reset(ch.expiredDuration) + } + } + }() + return ch +} + +func (c *Cache) clearBoundExpiredPVC() { + c.log.Debug("[clearBoundExpiredPVC] starts to clear expired PVC") + c.lvgs.Range(func(lvgName, _ any) bool { + pvcs, err := c.GetAllPVCForLVG(lvgName.(string)) + if err != nil { + c.log.Error(err, fmt.Sprintf("[clearBoundExpiredPVC] unable to get PVCs for the LVMVolumeGroup %s", lvgName.(string))) + return false + } + + for _, pvc := range pvcs { + if pvc.Status.Phase != v1.ClaimBound { + c.log.Trace(fmt.Sprintf("[clearBoundExpiredPVC] PVC %s is not in a Bound state", pvc.Name)) + continue + } + + if time.Now().Sub(pvc.CreationTimestamp.Time) > c.expiredDuration { + c.log.Warning(fmt.Sprintf("[clearBoundExpiredPVC] PVC %s is in a Bound state and expired, remove it from the cache", pvc.Name)) + c.RemovePVCFromTheCache(pvc) + } else { + c.log.Trace(fmt.Sprintf("[clearBoundExpiredPVC] PVC %s is in a Bound state but not expired yet.", pvc.Name)) + } + } + + return true + }) + c.log.Debug("[clearBoundExpiredPVC] finished the expired PVC clearing") } // AddLVG adds selected LVMVolumeGroup resource to the cache. If it is already stored, does nothing. func (c *Cache) AddLVG(lvg *snc.LVMVolumeGroup) { _, loaded := c.lvgs.LoadOrStore(lvg.Name, &lvgCache{ - lvg: lvg, - thickPVCs: sync.Map{}, - thinPools: sync.Map{}, + lvg: lvg, }) if loaded { c.log.Debug(fmt.Sprintf("[AddLVG] the LVMVolumeGroup %s has been already added to the cache", lvg.Name)) diff --git a/images/sds-local-volume-scheduler-extender/src/pkg/controller/lvg_watcher_cache.go b/images/sds-local-volume-scheduler-extender/src/pkg/controller/lvg_watcher_cache.go index 7c6e4ea3..7d7b31f5 100644 --- a/images/sds-local-volume-scheduler-extender/src/pkg/controller/lvg_watcher_cache.go +++ b/images/sds-local-volume-scheduler-extender/src/pkg/controller/lvg_watcher_cache.go @@ -3,20 +3,18 @@ package controller import ( "context" "fmt" - "reflect" - snc "github.com/deckhouse/sds-node-configurator/api/v1alpha1" v1 "k8s.io/api/core/v1" "k8s.io/client-go/util/workqueue" + "reflect" + "sds-local-volume-scheduler-extender/pkg/cache" + "sds-local-volume-scheduler-extender/pkg/logger" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" - - "sds-local-volume-scheduler-extender/pkg/cache" - "sds-local-volume-scheduler-extender/pkg/logger" ) const ( @@ -90,7 +88,6 @@ func RunLVGWatcherCacheController( log.Info(fmt.Sprintf("[RunCacheWatcherController] UpdateFunc starts the cache reconciliation for the LVMVolumeGroup %s", e.ObjectNew.GetName())) oldLvg := e.ObjectOld newLvg := e.ObjectNew - err := cache.UpdateLVG(newLvg) if err != nil { log.Error(err, fmt.Sprintf("[RunLVGWatcherCacheController] unable to update the LVMVolumeGroup %s cache", newLvg.Name)) From bcc10bb76427cfbcd24eb55c4509b7e3dea8d5b5 Mon Sep 17 00:00:00 2001 From: Viktor Kramarenko Date: Thu, 26 Sep 2024 13:56:16 +0300 Subject: [PATCH 2/4] final refactoring Signed-off-by: Viktor Kramarenko --- .../src/cmd/main.go | 30 ++-- .../src/config/config.go | 54 ------ .../src/pkg/cache/cache.go | 95 +++++------ .../src/pkg/cache/cache_test.go | 156 ++++++++++++++++-- .../src/pkg/controller/lvg_watcher_cache.go | 8 +- 5 files changed, 214 insertions(+), 129 deletions(-) delete mode 100644 images/sds-local-volume-scheduler-extender/src/config/config.go diff --git a/images/sds-local-volume-scheduler-extender/src/cmd/main.go b/images/sds-local-volume-scheduler-extender/src/cmd/main.go index 68ced09d..0550266c 100644 --- a/images/sds-local-volume-scheduler-extender/src/cmd/main.go +++ b/images/sds-local-volume-scheduler-extender/src/cmd/main.go @@ -54,13 +54,14 @@ const ( ) type Config struct { - ListenAddr string `json:"listen"` - DefaultDivisor float64 `json:"default-divisor"` - LogLevel string `json:"log-level"` - CacheSize int `json:"cache-size"` - HealthProbeBindAddress string `json:"health-probe-bind-address"` - CertFile string `json:"cert-file"` - KeyFile string `json:"key-file"` + ListenAddr string `json:"listen"` + DefaultDivisor float64 `json:"default-divisor"` + LogLevel string `json:"log-level"` + CacheSize int `json:"cache-size"` + HealthProbeBindAddress string `json:"health-probe-bind-address"` + CertFile string `json:"cert-file"` + KeyFile string `json:"key-file"` + PVCExpiredDurationSec time.Duration `json:"pvc-expired-duration-sec"` } var cfgFilePath string @@ -73,12 +74,13 @@ var resourcesSchemeFuncs = []func(*runtime.Scheme) error{ } var config = &Config{ - ListenAddr: defaultListenAddr, - DefaultDivisor: defaultDivisor, - LogLevel: "2", - CacheSize: defaultCacheSize, - CertFile: defaultcertFile, - KeyFile: defaultkeyFile, + ListenAddr: defaultListenAddr, + DefaultDivisor: defaultDivisor, + LogLevel: "2", + CacheSize: defaultCacheSize, + CertFile: defaultcertFile, + KeyFile: defaultkeyFile, + PVCExpiredDurationSec: cache.DefaultPVCExpiredDurationSec * time.Second, } var rootCmd = &cobra.Command{ @@ -167,7 +169,7 @@ func subMain(ctx context.Context) error { return err } - schedulerCache := cache.NewCache(*log) + schedulerCache := cache.NewCache(*log, config.PVCExpiredDurationSec) log.Info("[subMain] scheduler cache was initialized") h, err := scheduler.NewHandler(ctx, mgr.GetClient(), *log, schedulerCache, config.DefaultDivisor) diff --git a/images/sds-local-volume-scheduler-extender/src/config/config.go b/images/sds-local-volume-scheduler-extender/src/config/config.go deleted file mode 100644 index 4d90b2aa..00000000 --- a/images/sds-local-volume-scheduler-extender/src/config/config.go +++ /dev/null @@ -1,54 +0,0 @@ -/* -Copyright 2024 Flant JSC -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "os" - - "sds-local-volume-scheduler-extender/pkg/logger" -) - -const ( - NodeName = "KUBE_NODE_NAME" - LogLevel = "LOG_LEVEL" - DefaultHealthProbeBindAddressEnvName = "HEALTH_PROBE_BIND_ADDRESS" - DefaultHealthProbeBindAddress = ":8081" -) - -type Options struct { - NodeName string - Version string - Loglevel logger.Verbosity - HealthProbeBindAddress string -} - -func NewConfig() *Options { - var opts Options - - loglevel := os.Getenv(LogLevel) - if loglevel == "" { - opts.Loglevel = logger.DebugLevel - } else { - opts.Loglevel = logger.Verbosity(loglevel) - } - - opts.HealthProbeBindAddress = os.Getenv(DefaultHealthProbeBindAddressEnvName) - if opts.HealthProbeBindAddress == "" { - opts.HealthProbeBindAddress = DefaultHealthProbeBindAddress - } - - opts.Version = "dev" - - return &opts -} diff --git a/images/sds-local-volume-scheduler-extender/src/pkg/cache/cache.go b/images/sds-local-volume-scheduler-extender/src/pkg/cache/cache.go index 60f169dc..8ca08ae6 100644 --- a/images/sds-local-volume-scheduler-extender/src/pkg/cache/cache.go +++ b/images/sds-local-volume-scheduler-extender/src/pkg/cache/cache.go @@ -15,6 +15,8 @@ import ( ) const ( + DefaultPVCExpiredDurationSec = 30 + pvcPerLVGCount = 150 lvgsPerPVCCount = 5 lvgsPerNodeCount = 5 @@ -45,23 +47,21 @@ type pvcCache struct { } // NewCache initialize new cache. -func NewCache(logger logger.Logger) *Cache { +func NewCache(logger logger.Logger, pvcExpDurSec time.Duration) *Cache { ch := &Cache{ log: logger, - expiredDuration: 30 * time.Second, + expiredDuration: pvcExpDurSec, } go func() { timer := time.NewTimer(ch.expiredDuration) - for { - select { - case <-timer.C: - ch.clearBoundExpiredPVC() - timer.Reset(ch.expiredDuration) - } + for range timer.C { + ch.clearBoundExpiredPVC() + timer.Reset(ch.expiredDuration) } }() + return ch } @@ -80,7 +80,7 @@ func (c *Cache) clearBoundExpiredPVC() { continue } - if time.Now().Sub(pvc.CreationTimestamp.Time) > c.expiredDuration { + if time.Since(pvc.CreationTimestamp.Time) > c.expiredDuration { c.log.Warning(fmt.Sprintf("[clearBoundExpiredPVC] PVC %s is in a Bound state and expired, remove it from the cache", pvc.Name)) c.RemovePVCFromTheCache(pvc) } else { @@ -118,29 +118,30 @@ func (c *Cache) AddLVG(lvg *snc.LVMVolumeGroup) { // UpdateLVG updated selected LVMVolumeGroup resource in the cache. If such LVMVolumeGroup is not stored, returns an error. func (c *Cache) UpdateLVG(lvg *snc.LVMVolumeGroup) error { - if lvgCh, found := c.lvgs.Load(lvg.Name); found { - lvgCh.(*lvgCache).lvg = lvg - - c.log.Trace(fmt.Sprintf("[UpdateLVG] the LVMVolumeGroup %s nodes: %v", lvg.Name, lvg.Status.Nodes)) - for _, node := range lvg.Status.Nodes { - lvgsOnTheNode, _ := c.nodeLVGs.Load(node.Name) - if lvgsOnTheNode == nil { - lvgsOnTheNode = make([]string, 0, lvgsPerNodeCount) - } + lvgCh, found := c.lvgs.Load(lvg.Name) + if !found { + return fmt.Errorf("the LVMVolumeGroup %s was not found in the lvgCh", lvg.Name) + } - if !slices2.Contains(lvgsOnTheNode.([]string), lvg.Name) { - lvgsOnTheNode = append(lvgsOnTheNode.([]string), lvg.Name) - c.log.Debug(fmt.Sprintf("[UpdateLVG] the LVMVolumeGroup %s has been added to the node %s", lvg.Name, node.Name)) - c.nodeLVGs.Store(node.Name, lvgsOnTheNode) - } else { - c.log.Debug(fmt.Sprintf("[UpdateLVG] the LVMVolumeGroup %s has been already added to the node %s", lvg.Name, node.Name)) - } + lvgCh.(*lvgCache).lvg = lvg + + c.log.Trace(fmt.Sprintf("[UpdateLVG] the LVMVolumeGroup %s nodes: %v", lvg.Name, lvg.Status.Nodes)) + for _, node := range lvg.Status.Nodes { + lvgsOnTheNode, _ := c.nodeLVGs.Load(node.Name) + if lvgsOnTheNode == nil { + lvgsOnTheNode = make([]string, 0, lvgsPerNodeCount) } - return nil + if !slices2.Contains(lvgsOnTheNode.([]string), lvg.Name) { + lvgsOnTheNode = append(lvgsOnTheNode.([]string), lvg.Name) + c.log.Debug(fmt.Sprintf("[UpdateLVG] the LVMVolumeGroup %s has been added to the node %s", lvg.Name, node.Name)) + c.nodeLVGs.Store(node.Name, lvgsOnTheNode) + } else { + c.log.Debug(fmt.Sprintf("[UpdateLVG] the LVMVolumeGroup %s has been already added to the node %s", lvg.Name, node.Name)) + } } - return fmt.Errorf("the LVMVolumeGroup %s was not found in the lvgCh", lvg.Name) + return nil } // TryGetLVG returns selected LVMVolumeGroup resource if it is stored in the cache, otherwise returns nil. @@ -228,8 +229,9 @@ func (c *Cache) DeleteLVG(lvgName string) { c.nodeLVGs.Range(func(_, lvgNames any) bool { for i, lvg := range lvgNames.([]string) { if lvg == lvgName { - //nolint:gocritic + //nolint:gocritic,ineffassign lvgNames = append(lvgNames.([]string)[:i], lvgNames.([]string)[i+1:]...) + return false } } @@ -239,8 +241,9 @@ func (c *Cache) DeleteLVG(lvgName string) { c.pvcLVGs.Range(func(_, lvgNames any) bool { for i, lvg := range lvgNames.([]string) { if lvg == lvgName { - //nolint:gocritic + //nolint:gocritic,ineffassign lvgNames = append(lvgNames.([]string)[:i], lvgNames.([]string)[i+1:]...) + return false } } @@ -687,27 +690,24 @@ func (c *Cache) RemoveSpaceReservationForPVCWithSelectedNode(pvc *v1.PersistentV // RemovePVCFromTheCache completely removes selected PVC in the cache. func (c *Cache) RemovePVCFromTheCache(pvc *v1.PersistentVolumeClaim) { - targetPvcKey := configurePVCKey(pvc) - - c.log.Debug(fmt.Sprintf("[RemovePVCFromTheCache] run full cache wipe for PVC %s", targetPvcKey)) - c.pvcLVGs.Range(func(pvcKey, lvgArray any) bool { - if pvcKey == targetPvcKey { - for _, lvgName := range lvgArray.([]string) { - lvgCh, found := c.lvgs.Load(lvgName) - if found { - lvgCh.(*lvgCache).thickPVCs.Delete(pvcKey.(string)) - lvgCh.(*lvgCache).thinPools.Range(func(_, tpCh any) bool { - tpCh.(*thinPoolCache).pvcs.Delete(pvcKey) - return true - }) - } + pvcKey := configurePVCKey(pvc) + + c.log.Debug(fmt.Sprintf("[RemovePVCFromTheCache] run full cache wipe for PVC %s", pvcKey)) + lvgSlice, ok := c.pvcLVGs.Load(pvcKey) + if ok { + for _, lvgName := range lvgSlice.([]string) { + lvgCh, found := c.lvgs.Load(lvgName) + if found { + lvgCh.(*lvgCache).thickPVCs.Delete(pvcKey) + lvgCh.(*lvgCache).thinPools.Range(func(_, tpCh any) bool { + tpCh.(*thinPoolCache).pvcs.Delete(pvcKey) + return true + }) } } + } - return true - }) - - c.pvcLVGs.Delete(targetPvcKey) + c.pvcLVGs.Delete(pvcKey) } // FindLVGForPVCBySelectedNode finds a suitable LVMVolumeGroup resource's name for selected PVC based on selected node. If no such LVMVolumeGroup found, returns empty string. @@ -730,6 +730,7 @@ func (c *Cache) FindLVGForPVCBySelectedNode(pvc *v1.PersistentVolumeClaim, nodeN for _, lvgName := range lvgsForPVC.([]string) { if slices2.Contains(lvgsOnTheNode.([]string), lvgName) { targetLVG = lvgName + break } } diff --git a/images/sds-local-volume-scheduler-extender/src/pkg/cache/cache_test.go b/images/sds-local-volume-scheduler-extender/src/pkg/cache/cache_test.go index e3586bc4..41dc3f6e 100644 --- a/images/sds-local-volume-scheduler-extender/src/pkg/cache/cache_test.go +++ b/images/sds-local-volume-scheduler-extender/src/pkg/cache/cache_test.go @@ -3,6 +3,7 @@ package cache import ( "fmt" "testing" + "time" snc "github.com/deckhouse/sds-node-configurator/api/v1alpha1" "github.com/stretchr/testify/assert" @@ -13,8 +14,141 @@ import ( "sds-local-volume-scheduler-extender/pkg/logger" ) +func TestCache(t *testing.T) { + log := logger.Logger{} + t.Run("clearBoundExpiredPVC", func(t *testing.T) { + const ( + thickBoundExpiredPVC = "thick-bound-expired-pvc" + thickPendingExpiredPVC = "thick-pending-expired-pvc" + thickBoundNotExpiredPVC = "thick-bound-not-expired-pvc" + + thinBoundExpiredPVC = "thin-bound-expired-pvc" + thinPendingExpiredPVC = "thin-pending-expired-pvc" + thinBoundNotExpiredPVC = "thin-bound-not-expired-pvc" + ) + ch := NewCache(log, DefaultPVCExpiredDurationSec*time.Second) + expiredTime := time.Now().Add((-DefaultPVCExpiredDurationSec - 1) * time.Second) + thickPVCs := map[string]*pvcCache{ + "/" + thickBoundExpiredPVC: { + pvc: &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: thickBoundExpiredPVC, + CreationTimestamp: metav1.NewTime(expiredTime), + }, + Status: v1.PersistentVolumeClaimStatus{ + Phase: v1.ClaimBound, + }, + }, + }, + "/" + thickPendingExpiredPVC: { + pvc: &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: thickPendingExpiredPVC, + CreationTimestamp: metav1.NewTime(expiredTime), + }, + Status: v1.PersistentVolumeClaimStatus{ + Phase: v1.ClaimPending, + }, + }, + }, + "/" + thickBoundNotExpiredPVC: { + pvc: &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: thickBoundNotExpiredPVC, + CreationTimestamp: metav1.NewTime(time.Now()), + }, + Status: v1.PersistentVolumeClaimStatus{ + Phase: v1.ClaimBound, + }, + }, + }, + } + thinPVCs := map[string]*pvcCache{ + "/" + thinBoundExpiredPVC: { + pvc: &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: thinBoundExpiredPVC, + CreationTimestamp: metav1.NewTime(expiredTime), + }, + Status: v1.PersistentVolumeClaimStatus{ + Phase: v1.ClaimBound, + }, + }, + }, + "/" + thinPendingExpiredPVC: { + pvc: &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: thinPendingExpiredPVC, + CreationTimestamp: metav1.NewTime(expiredTime), + }, + Status: v1.PersistentVolumeClaimStatus{ + Phase: v1.ClaimPending, + }, + }, + }, + "/" + thinBoundNotExpiredPVC: { + pvc: &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: thinBoundNotExpiredPVC, + CreationTimestamp: metav1.NewTime(time.Now()), + }, + Status: v1.PersistentVolumeClaimStatus{ + Phase: v1.ClaimBound, + }, + }, + }, + } + + const tpName = "thin-pool" + thinPools := map[string]*thinPoolCache{ + tpName: {}, + } + for pvcName, pvc := range thinPVCs { + thinPools[tpName].pvcs.Store(pvcName, pvc) + } + + const lvgName = "lvg-name" + lvgs := map[string]*lvgCache{ + lvgName: {}, + } + + for name, pvc := range thickPVCs { + lvgs[lvgName].thickPVCs.Store(name, pvc) + } + for name, tp := range thinPools { + lvgs[lvgName].thinPools.Store(name, tp) + } + + ch.lvgs.Store(lvgName, lvgs[lvgName]) + ch.pvcLVGs.Store("/"+thickBoundExpiredPVC, []string{lvgName}) + ch.pvcLVGs.Store("/"+thickPendingExpiredPVC, []string{lvgName}) + ch.pvcLVGs.Store("/"+thickBoundNotExpiredPVC, []string{lvgName}) + ch.pvcLVGs.Store("/"+thinBoundExpiredPVC, []string{lvgName}) + ch.pvcLVGs.Store("/"+thinBoundNotExpiredPVC, []string{lvgName}) + ch.pvcLVGs.Store("/"+thinPendingExpiredPVC, []string{lvgName}) + + ch.clearBoundExpiredPVC() + + lvgCh, _ := ch.lvgs.Load(lvgName) + _, found := lvgCh.(*lvgCache).thickPVCs.Load("/" + thickBoundExpiredPVC) + assert.False(t, found) + _, found = lvgCh.(*lvgCache).thickPVCs.Load("/" + thickPendingExpiredPVC) + assert.True(t, found) + _, found = lvgCh.(*lvgCache).thickPVCs.Load("/" + thickBoundNotExpiredPVC) + assert.True(t, found) + + tpCh, _ := lvgCh.(*lvgCache).thinPools.Load(tpName) + _, found = tpCh.(*thinPoolCache).pvcs.Load("/" + thinBoundExpiredPVC) + assert.False(t, found) + _, found = tpCh.(*thinPoolCache).pvcs.Load("/" + thinPendingExpiredPVC) + assert.True(t, found) + _, found = tpCh.(*thinPoolCache).pvcs.Load("/" + thinBoundNotExpiredPVC) + assert.True(t, found) + }) +} + func BenchmarkCache_DeleteLVG(b *testing.B) { - cache := NewCache(logger.Logger{}) + cache := NewCache(logger.Logger{}, DefaultPVCExpiredDurationSec*time.Second) lvg := &snc.LVMVolumeGroup{ ObjectMeta: metav1.ObjectMeta{ Name: "first", @@ -33,7 +167,7 @@ func BenchmarkCache_DeleteLVG(b *testing.B) { } func BenchmarkCache_GetLVGReservedSpace(b *testing.B) { - cache := NewCache(logger.Logger{}) + cache := NewCache(logger.Logger{}, DefaultPVCExpiredDurationSec*time.Second) lvg := &snc.LVMVolumeGroup{ ObjectMeta: metav1.ObjectMeta{ Name: "first", @@ -99,7 +233,7 @@ func BenchmarkCache_GetLVGReservedSpace(b *testing.B) { } func BenchmarkCache_AddPVC(b *testing.B) { - cache := NewCache(logger.Logger{}) + cache := NewCache(logger.Logger{}, DefaultPVCExpiredDurationSec*time.Second) lvg1 := &snc.LVMVolumeGroup{ ObjectMeta: metav1.ObjectMeta{ @@ -167,7 +301,7 @@ func BenchmarkCache_AddPVC(b *testing.B) { } func BenchmarkCache_GetAllLVG(b *testing.B) { - cache := NewCache(logger.Logger{}) + cache := NewCache(logger.Logger{}, DefaultPVCExpiredDurationSec*time.Second) lvgs := map[string]*lvgCache{ "first": { lvg: &snc.LVMVolumeGroup{ @@ -201,7 +335,7 @@ func BenchmarkCache_GetAllLVG(b *testing.B) { } func BenchmarkCache_GetLVGNamesByNodeName(b *testing.B) { - cache := NewCache(logger.Logger{}) + cache := NewCache(logger.Logger{}, DefaultPVCExpiredDurationSec*time.Second) lvgs := []string{ "first", "second", @@ -222,7 +356,7 @@ func BenchmarkCache_GetLVGNamesByNodeName(b *testing.B) { } func BenchmarkCache_TryGetLVG(b *testing.B) { - cache := NewCache(logger.Logger{}) + cache := NewCache(logger.Logger{}, DefaultPVCExpiredDurationSec*time.Second) name := "test-name" lvg := &snc.LVMVolumeGroup{ @@ -243,7 +377,7 @@ func BenchmarkCache_TryGetLVG(b *testing.B) { } func BenchmarkCache_AddLVG(b *testing.B) { - cache := NewCache(logger.Logger{}) + cache := NewCache(logger.Logger{}, DefaultPVCExpiredDurationSec*time.Second) i := 0 b.RunParallel(func(pb *testing.PB) { @@ -299,7 +433,7 @@ func BenchmarkCache_AddLVG(b *testing.B) { } func TestCache_UpdateLVG(t *testing.T) { - cache := NewCache(logger.Logger{}) + cache := NewCache(logger.Logger{}, DefaultPVCExpiredDurationSec*time.Second) name := "test-lvg" lvg := &snc.LVMVolumeGroup{ ObjectMeta: metav1.ObjectMeta{ @@ -330,7 +464,7 @@ func TestCache_UpdateLVG(t *testing.T) { } func BenchmarkCache_UpdateLVG(b *testing.B) { - cache := NewCache(logger.Logger{}) + cache := NewCache(logger.Logger{}, DefaultPVCExpiredDurationSec*time.Second) name := "test-name" i := 0 @@ -367,7 +501,7 @@ func BenchmarkCache_UpdateLVG(b *testing.B) { } func BenchmarkCache_UpdatePVC(b *testing.B) { - cache := NewCache(logger.Logger{}) + cache := NewCache(logger.Logger{}, DefaultPVCExpiredDurationSec*time.Second) i := 0 lvg := &snc.LVMVolumeGroup{ ObjectMeta: metav1.ObjectMeta{ @@ -415,7 +549,7 @@ func BenchmarkCache_UpdatePVC(b *testing.B) { } func BenchmarkCache_FullLoad(b *testing.B) { - cache := NewCache(logger.Logger{}) + cache := NewCache(logger.Logger{}, DefaultPVCExpiredDurationSec*time.Second) const ( nodeName = "test-node" diff --git a/images/sds-local-volume-scheduler-extender/src/pkg/controller/lvg_watcher_cache.go b/images/sds-local-volume-scheduler-extender/src/pkg/controller/lvg_watcher_cache.go index 7d7b31f5..7a331e31 100644 --- a/images/sds-local-volume-scheduler-extender/src/pkg/controller/lvg_watcher_cache.go +++ b/images/sds-local-volume-scheduler-extender/src/pkg/controller/lvg_watcher_cache.go @@ -3,18 +3,20 @@ package controller import ( "context" "fmt" + "reflect" + snc "github.com/deckhouse/sds-node-configurator/api/v1alpha1" v1 "k8s.io/api/core/v1" "k8s.io/client-go/util/workqueue" - "reflect" - "sds-local-volume-scheduler-extender/pkg/cache" - "sds-local-volume-scheduler-extender/pkg/logger" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" + + "sds-local-volume-scheduler-extender/pkg/cache" + "sds-local-volume-scheduler-extender/pkg/logger" ) const ( From 73298ce3615850d192834ffd6ee101dffa0eb232 Mon Sep 17 00:00:00 2001 From: Viktor Kramarenko Date: Thu, 26 Sep 2024 15:54:17 +0300 Subject: [PATCH 3/4] final refactoring Signed-off-by: Viktor Kramarenko --- images/sds-local-volume-controller/src/go.mod | 2 +- images/sds-local-volume-controller/src/go.sum | 2 ++ images/sds-local-volume-csi/src/go.mod | 2 +- images/sds-local-volume-csi/src/go.sum | 2 ++ images/sds-local-volume-scheduler-extender/src/go.mod | 2 +- images/sds-local-volume-scheduler-extender/src/go.sum | 2 ++ images/webhooks/src/go.mod | 2 +- images/webhooks/src/go.sum | 2 ++ 8 files changed, 12 insertions(+), 4 deletions(-) diff --git a/images/sds-local-volume-controller/src/go.mod b/images/sds-local-volume-controller/src/go.mod index 1a41696c..7c55e112 100644 --- a/images/sds-local-volume-controller/src/go.mod +++ b/images/sds-local-volume-controller/src/go.mod @@ -4,7 +4,7 @@ go 1.22.2 require ( github.com/deckhouse/sds-local-volume/api v0.0.0-20240816081122-3de604d3d889 - github.com/deckhouse/sds-node-configurator/api v0.0.0-20240925090458-249de2896583 + github.com/deckhouse/sds-node-configurator/api v0.0.0-20240926063625-6815fd9556ea github.com/go-logr/logr v1.4.2 github.com/onsi/ginkgo/v2 v2.20.0 github.com/onsi/gomega v1.34.1 diff --git a/images/sds-local-volume-controller/src/go.sum b/images/sds-local-volume-controller/src/go.sum index 59738604..f8b1f80d 100644 --- a/images/sds-local-volume-controller/src/go.sum +++ b/images/sds-local-volume-controller/src/go.sum @@ -14,6 +14,8 @@ github.com/deckhouse/sds-node-configurator/api v0.0.0-20240919102704-a035b4a92e7 github.com/deckhouse/sds-node-configurator/api v0.0.0-20240919102704-a035b4a92e77/go.mod h1:H71+9G0Jr46Qs0BA3z3/xt0h9lbnJnCEYcaCJCWFBf0= github.com/deckhouse/sds-node-configurator/api v0.0.0-20240925090458-249de2896583 h1:HQd5YFQqoHj/CQwBKFCyuVCQmNV0PdML8QJiyDka4fQ= github.com/deckhouse/sds-node-configurator/api v0.0.0-20240925090458-249de2896583/go.mod h1:H71+9G0Jr46Qs0BA3z3/xt0h9lbnJnCEYcaCJCWFBf0= +github.com/deckhouse/sds-node-configurator/api v0.0.0-20240926063625-6815fd9556ea h1:RIbBqkoLvWHFNIpmq5LYObcwpRNWAEE6itzvwi/bvEQ= +github.com/deckhouse/sds-node-configurator/api v0.0.0-20240926063625-6815fd9556ea/go.mod h1:H71+9G0Jr46Qs0BA3z3/xt0h9lbnJnCEYcaCJCWFBf0= github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= diff --git a/images/sds-local-volume-csi/src/go.mod b/images/sds-local-volume-csi/src/go.mod index 13e994c5..70d22b8a 100644 --- a/images/sds-local-volume-csi/src/go.mod +++ b/images/sds-local-volume-csi/src/go.mod @@ -5,7 +5,7 @@ go 1.22.3 require ( github.com/container-storage-interface/spec v1.10.0 github.com/deckhouse/sds-local-volume/api v0.0.0-20240903071950-ed9d3bba999b - github.com/deckhouse/sds-node-configurator/api v0.0.0-20240925090458-249de2896583 + github.com/deckhouse/sds-node-configurator/api v0.0.0-20240926063625-6815fd9556ea github.com/go-logr/logr v1.4.2 github.com/golang/protobuf v1.5.4 github.com/google/uuid v1.6.0 diff --git a/images/sds-local-volume-csi/src/go.sum b/images/sds-local-volume-csi/src/go.sum index 70c0f71f..67984e4e 100644 --- a/images/sds-local-volume-csi/src/go.sum +++ b/images/sds-local-volume-csi/src/go.sum @@ -12,6 +12,8 @@ github.com/deckhouse/sds-node-configurator/api v0.0.0-20240919102704-a035b4a92e7 github.com/deckhouse/sds-node-configurator/api v0.0.0-20240919102704-a035b4a92e77/go.mod h1:H71+9G0Jr46Qs0BA3z3/xt0h9lbnJnCEYcaCJCWFBf0= github.com/deckhouse/sds-node-configurator/api v0.0.0-20240925090458-249de2896583 h1:HQd5YFQqoHj/CQwBKFCyuVCQmNV0PdML8QJiyDka4fQ= github.com/deckhouse/sds-node-configurator/api v0.0.0-20240925090458-249de2896583/go.mod h1:H71+9G0Jr46Qs0BA3z3/xt0h9lbnJnCEYcaCJCWFBf0= +github.com/deckhouse/sds-node-configurator/api v0.0.0-20240926063625-6815fd9556ea h1:RIbBqkoLvWHFNIpmq5LYObcwpRNWAEE6itzvwi/bvEQ= +github.com/deckhouse/sds-node-configurator/api v0.0.0-20240926063625-6815fd9556ea/go.mod h1:H71+9G0Jr46Qs0BA3z3/xt0h9lbnJnCEYcaCJCWFBf0= github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= diff --git a/images/sds-local-volume-scheduler-extender/src/go.mod b/images/sds-local-volume-scheduler-extender/src/go.mod index 359af893..4f9b5a60 100644 --- a/images/sds-local-volume-scheduler-extender/src/go.mod +++ b/images/sds-local-volume-scheduler-extender/src/go.mod @@ -4,7 +4,7 @@ go 1.22.2 require ( github.com/deckhouse/sds-local-volume/api v0.0.0-20240903071950-ed9d3bba999b - github.com/deckhouse/sds-node-configurator/api v0.0.0-20240925090458-249de2896583 + github.com/deckhouse/sds-node-configurator/api v0.0.0-20240926063625-6815fd9556ea github.com/go-logr/logr v1.4.2 github.com/go-logr/zapr v1.3.0 github.com/spf13/cobra v1.8.1 diff --git a/images/sds-local-volume-scheduler-extender/src/go.sum b/images/sds-local-volume-scheduler-extender/src/go.sum index 8f314298..534d08ed 100644 --- a/images/sds-local-volume-scheduler-extender/src/go.sum +++ b/images/sds-local-volume-scheduler-extender/src/go.sum @@ -15,6 +15,8 @@ github.com/deckhouse/sds-node-configurator/api v0.0.0-20240919102704-a035b4a92e7 github.com/deckhouse/sds-node-configurator/api v0.0.0-20240919102704-a035b4a92e77/go.mod h1:H71+9G0Jr46Qs0BA3z3/xt0h9lbnJnCEYcaCJCWFBf0= github.com/deckhouse/sds-node-configurator/api v0.0.0-20240925090458-249de2896583 h1:HQd5YFQqoHj/CQwBKFCyuVCQmNV0PdML8QJiyDka4fQ= github.com/deckhouse/sds-node-configurator/api v0.0.0-20240925090458-249de2896583/go.mod h1:H71+9G0Jr46Qs0BA3z3/xt0h9lbnJnCEYcaCJCWFBf0= +github.com/deckhouse/sds-node-configurator/api v0.0.0-20240926063625-6815fd9556ea h1:RIbBqkoLvWHFNIpmq5LYObcwpRNWAEE6itzvwi/bvEQ= +github.com/deckhouse/sds-node-configurator/api v0.0.0-20240926063625-6815fd9556ea/go.mod h1:H71+9G0Jr46Qs0BA3z3/xt0h9lbnJnCEYcaCJCWFBf0= github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k= diff --git a/images/webhooks/src/go.mod b/images/webhooks/src/go.mod index 88004bf6..b524428b 100644 --- a/images/webhooks/src/go.mod +++ b/images/webhooks/src/go.mod @@ -4,7 +4,7 @@ go 1.22.3 require ( github.com/deckhouse/sds-local-volume/api v0.0.0-20240813100234-cf7ae5802ee1 - github.com/deckhouse/sds-node-configurator/api v0.0.0-20240925090458-249de2896583 + github.com/deckhouse/sds-node-configurator/api v0.0.0-20240926063625-6815fd9556ea github.com/sirupsen/logrus v1.9.3 github.com/slok/kubewebhook/v2 v2.6.0 k8s.io/api v0.30.3 diff --git a/images/webhooks/src/go.sum b/images/webhooks/src/go.sum index 9b66fbf4..2af48da2 100644 --- a/images/webhooks/src/go.sum +++ b/images/webhooks/src/go.sum @@ -9,6 +9,8 @@ github.com/deckhouse/sds-node-configurator/api v0.0.0-20240919102704-a035b4a92e7 github.com/deckhouse/sds-node-configurator/api v0.0.0-20240919102704-a035b4a92e77/go.mod h1:H71+9G0Jr46Qs0BA3z3/xt0h9lbnJnCEYcaCJCWFBf0= github.com/deckhouse/sds-node-configurator/api v0.0.0-20240925090458-249de2896583 h1:HQd5YFQqoHj/CQwBKFCyuVCQmNV0PdML8QJiyDka4fQ= github.com/deckhouse/sds-node-configurator/api v0.0.0-20240925090458-249de2896583/go.mod h1:H71+9G0Jr46Qs0BA3z3/xt0h9lbnJnCEYcaCJCWFBf0= +github.com/deckhouse/sds-node-configurator/api v0.0.0-20240926063625-6815fd9556ea h1:RIbBqkoLvWHFNIpmq5LYObcwpRNWAEE6itzvwi/bvEQ= +github.com/deckhouse/sds-node-configurator/api v0.0.0-20240926063625-6815fd9556ea/go.mod h1:H71+9G0Jr46Qs0BA3z3/xt0h9lbnJnCEYcaCJCWFBf0= github.com/emicklei/go-restful/v3 v3.12.0 h1:y2DdzBAURM29NFF94q6RaY4vjIH1rtwDapwQtU84iWk= github.com/emicklei/go-restful/v3 v3.12.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= From 956b9c4deba907699b46a2f5c4fd7a5e573e238a Mon Sep 17 00:00:00 2001 From: Viktor Kramarenko Date: Thu, 26 Sep 2024 18:44:39 +0300 Subject: [PATCH 4/4] refactoring by comments Signed-off-by: Viktor Kramarenko --- .../src/cmd/main.go | 18 +++++++------- .../src/pkg/cache/cache.go | 4 ++-- .../src/pkg/cache/cache_test.go | 24 +++++++++---------- 3 files changed, 23 insertions(+), 23 deletions(-) diff --git a/images/sds-local-volume-scheduler-extender/src/cmd/main.go b/images/sds-local-volume-scheduler-extender/src/cmd/main.go index 0550266c..4bf1e76c 100644 --- a/images/sds-local-volume-scheduler-extender/src/cmd/main.go +++ b/images/sds-local-volume-scheduler-extender/src/cmd/main.go @@ -54,14 +54,14 @@ const ( ) type Config struct { - ListenAddr string `json:"listen"` - DefaultDivisor float64 `json:"default-divisor"` - LogLevel string `json:"log-level"` - CacheSize int `json:"cache-size"` - HealthProbeBindAddress string `json:"health-probe-bind-address"` - CertFile string `json:"cert-file"` - KeyFile string `json:"key-file"` - PVCExpiredDurationSec time.Duration `json:"pvc-expired-duration-sec"` + ListenAddr string `json:"listen"` + DefaultDivisor float64 `json:"default-divisor"` + LogLevel string `json:"log-level"` + CacheSize int `json:"cache-size"` + HealthProbeBindAddress string `json:"health-probe-bind-address"` + CertFile string `json:"cert-file"` + KeyFile string `json:"key-file"` + PVCExpiredDurationSec int `json:"pvc-expired-duration-sec"` } var cfgFilePath string @@ -80,7 +80,7 @@ var config = &Config{ CacheSize: defaultCacheSize, CertFile: defaultcertFile, KeyFile: defaultkeyFile, - PVCExpiredDurationSec: cache.DefaultPVCExpiredDurationSec * time.Second, + PVCExpiredDurationSec: cache.DefaultPVCExpiredDurationSec, } var rootCmd = &cobra.Command{ diff --git a/images/sds-local-volume-scheduler-extender/src/pkg/cache/cache.go b/images/sds-local-volume-scheduler-extender/src/pkg/cache/cache.go index 8ca08ae6..db7c121b 100644 --- a/images/sds-local-volume-scheduler-extender/src/pkg/cache/cache.go +++ b/images/sds-local-volume-scheduler-extender/src/pkg/cache/cache.go @@ -47,10 +47,10 @@ type pvcCache struct { } // NewCache initialize new cache. -func NewCache(logger logger.Logger, pvcExpDurSec time.Duration) *Cache { +func NewCache(logger logger.Logger, pvcExpDurSec int) *Cache { ch := &Cache{ log: logger, - expiredDuration: pvcExpDurSec, + expiredDuration: time.Duration(pvcExpDurSec) * time.Second, } go func() { diff --git a/images/sds-local-volume-scheduler-extender/src/pkg/cache/cache_test.go b/images/sds-local-volume-scheduler-extender/src/pkg/cache/cache_test.go index 41dc3f6e..9d9232be 100644 --- a/images/sds-local-volume-scheduler-extender/src/pkg/cache/cache_test.go +++ b/images/sds-local-volume-scheduler-extender/src/pkg/cache/cache_test.go @@ -26,7 +26,7 @@ func TestCache(t *testing.T) { thinPendingExpiredPVC = "thin-pending-expired-pvc" thinBoundNotExpiredPVC = "thin-bound-not-expired-pvc" ) - ch := NewCache(log, DefaultPVCExpiredDurationSec*time.Second) + ch := NewCache(log, DefaultPVCExpiredDurationSec) expiredTime := time.Now().Add((-DefaultPVCExpiredDurationSec - 1) * time.Second) thickPVCs := map[string]*pvcCache{ "/" + thickBoundExpiredPVC: { @@ -148,7 +148,7 @@ func TestCache(t *testing.T) { } func BenchmarkCache_DeleteLVG(b *testing.B) { - cache := NewCache(logger.Logger{}, DefaultPVCExpiredDurationSec*time.Second) + cache := NewCache(logger.Logger{}, DefaultPVCExpiredDurationSec) lvg := &snc.LVMVolumeGroup{ ObjectMeta: metav1.ObjectMeta{ Name: "first", @@ -167,7 +167,7 @@ func BenchmarkCache_DeleteLVG(b *testing.B) { } func BenchmarkCache_GetLVGReservedSpace(b *testing.B) { - cache := NewCache(logger.Logger{}, DefaultPVCExpiredDurationSec*time.Second) + cache := NewCache(logger.Logger{}, DefaultPVCExpiredDurationSec) lvg := &snc.LVMVolumeGroup{ ObjectMeta: metav1.ObjectMeta{ Name: "first", @@ -233,7 +233,7 @@ func BenchmarkCache_GetLVGReservedSpace(b *testing.B) { } func BenchmarkCache_AddPVC(b *testing.B) { - cache := NewCache(logger.Logger{}, DefaultPVCExpiredDurationSec*time.Second) + cache := NewCache(logger.Logger{}, DefaultPVCExpiredDurationSec) lvg1 := &snc.LVMVolumeGroup{ ObjectMeta: metav1.ObjectMeta{ @@ -301,7 +301,7 @@ func BenchmarkCache_AddPVC(b *testing.B) { } func BenchmarkCache_GetAllLVG(b *testing.B) { - cache := NewCache(logger.Logger{}, DefaultPVCExpiredDurationSec*time.Second) + cache := NewCache(logger.Logger{}, DefaultPVCExpiredDurationSec) lvgs := map[string]*lvgCache{ "first": { lvg: &snc.LVMVolumeGroup{ @@ -335,7 +335,7 @@ func BenchmarkCache_GetAllLVG(b *testing.B) { } func BenchmarkCache_GetLVGNamesByNodeName(b *testing.B) { - cache := NewCache(logger.Logger{}, DefaultPVCExpiredDurationSec*time.Second) + cache := NewCache(logger.Logger{}, DefaultPVCExpiredDurationSec) lvgs := []string{ "first", "second", @@ -356,7 +356,7 @@ func BenchmarkCache_GetLVGNamesByNodeName(b *testing.B) { } func BenchmarkCache_TryGetLVG(b *testing.B) { - cache := NewCache(logger.Logger{}, DefaultPVCExpiredDurationSec*time.Second) + cache := NewCache(logger.Logger{}, DefaultPVCExpiredDurationSec) name := "test-name" lvg := &snc.LVMVolumeGroup{ @@ -377,7 +377,7 @@ func BenchmarkCache_TryGetLVG(b *testing.B) { } func BenchmarkCache_AddLVG(b *testing.B) { - cache := NewCache(logger.Logger{}, DefaultPVCExpiredDurationSec*time.Second) + cache := NewCache(logger.Logger{}, DefaultPVCExpiredDurationSec) i := 0 b.RunParallel(func(pb *testing.PB) { @@ -433,7 +433,7 @@ func BenchmarkCache_AddLVG(b *testing.B) { } func TestCache_UpdateLVG(t *testing.T) { - cache := NewCache(logger.Logger{}, DefaultPVCExpiredDurationSec*time.Second) + cache := NewCache(logger.Logger{}, DefaultPVCExpiredDurationSec) name := "test-lvg" lvg := &snc.LVMVolumeGroup{ ObjectMeta: metav1.ObjectMeta{ @@ -464,7 +464,7 @@ func TestCache_UpdateLVG(t *testing.T) { } func BenchmarkCache_UpdateLVG(b *testing.B) { - cache := NewCache(logger.Logger{}, DefaultPVCExpiredDurationSec*time.Second) + cache := NewCache(logger.Logger{}, DefaultPVCExpiredDurationSec) name := "test-name" i := 0 @@ -501,7 +501,7 @@ func BenchmarkCache_UpdateLVG(b *testing.B) { } func BenchmarkCache_UpdatePVC(b *testing.B) { - cache := NewCache(logger.Logger{}, DefaultPVCExpiredDurationSec*time.Second) + cache := NewCache(logger.Logger{}, DefaultPVCExpiredDurationSec) i := 0 lvg := &snc.LVMVolumeGroup{ ObjectMeta: metav1.ObjectMeta{ @@ -549,7 +549,7 @@ func BenchmarkCache_UpdatePVC(b *testing.B) { } func BenchmarkCache_FullLoad(b *testing.B) { - cache := NewCache(logger.Logger{}, DefaultPVCExpiredDurationSec*time.Second) + cache := NewCache(logger.Logger{}, DefaultPVCExpiredDurationSec) const ( nodeName = "test-node"