Skip to content

Commit

Permalink
fixed linter issues for sds-local-volume-scheduler
Browse files Browse the repository at this point in the history
Signed-off-by: Viktor Kramarenko <viktor.kramarenko@flant.com>
  • Loading branch information
ViktorKram committed Aug 8, 2024
1 parent 9399f6b commit a717712
Show file tree
Hide file tree
Showing 14 changed files with 83 additions and 77 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ func TestAccessLogHandler(t *testing.T) {
ctrl.SetLogger(zapr.NewLogger(zap.New(observer)))

mux := http.NewServeMux()
mux.HandleFunc("/hello", func(w http.ResponseWriter, r *http.Request) {
mux.HandleFunc("/hello", func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusOK)
w.Write([]byte("hello"))
})
Expand Down
22 changes: 11 additions & 11 deletions images/sds-local-volume-scheduler-extender/src/cmd/cmd/root.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,28 +20,28 @@ import (
"context"
"errors"
"fmt"
slv "github.com/deckhouse/sds-local-volume/api/v1alpha1"
snc "github.com/deckhouse/sds-node-configurator/api/v1alpha1"
"net/http"
"os"
"os/signal"
"sds-local-volume-scheduler-extender/pkg/cache"
"sds-local-volume-scheduler-extender/pkg/controller"
"sds-local-volume-scheduler-extender/pkg/kubutils"
"sds-local-volume-scheduler-extender/pkg/logger"
"sds-local-volume-scheduler-extender/pkg/scheduler"
"sigs.k8s.io/controller-runtime/pkg/healthz"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sync"
"syscall"
"time"

slv "github.com/deckhouse/sds-local-volume/api/v1alpha1"
snc "github.com/deckhouse/sds-node-configurator/api/v1alpha1"
"github.com/spf13/cobra"
v1 "k8s.io/api/core/v1"
sv1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/runtime"
apiruntime "k8s.io/apimachinery/pkg/runtime"
"sds-local-volume-scheduler-extender/pkg/cache"
"sds-local-volume-scheduler-extender/pkg/controller"
"sds-local-volume-scheduler-extender/pkg/kubutils"
"sds-local-volume-scheduler-extender/pkg/logger"
"sds-local-volume-scheduler-extender/pkg/scheduler"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/healthz"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/yaml"
)

Expand Down Expand Up @@ -88,7 +88,7 @@ It scores nodes with this formula:
min(10, max(0, log2(capacity >> 30 / divisor)))
The default divisor is 1. It can be changed with a command-line option.
`,
RunE: func(cmd *cobra.Command, args []string) error {
RunE: func(cmd *cobra.Command, _ []string) error {
cmd.SilenceUsage = true
return subMain(cmd.Context())
},
Expand All @@ -109,7 +109,7 @@ func subMain(parentCtx context.Context) error {
ctx := context.Background()
log, err := logger.NewLogger(logger.Verbosity(config.LogLevel))
if err != nil {
fmt.Println(fmt.Sprintf("[subMain] unable to initialize logger, err: %s", err.Error()))
fmt.Printf("[subMain] unable to initialize logger, err: %s\n", err.Error())
}
log.Info(fmt.Sprintf("[subMain] logger has been initialized, log level: %s", config.LogLevel))
ctrl.SetLogger(log.GetLogger())
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ package config

import (
"os"

"sds-local-volume-scheduler-extender/pkg/logger"
)

Expand Down
45 changes: 24 additions & 21 deletions images/sds-local-volume-scheduler-extender/src/pkg/cache/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,13 @@ package cache
import (
"errors"
"fmt"
"sync"

snc "github.com/deckhouse/sds-node-configurator/api/v1alpha1"
v1 "k8s.io/api/core/v1"
slices2 "k8s.io/utils/strings/slices"
"sds-local-volume-scheduler-extender/pkg/consts"
"sds-local-volume-scheduler-extender/pkg/logger"
"sync"
)

const (
Expand All @@ -19,20 +20,20 @@ const (
)

type Cache struct {
lvgs sync.Map //map[string]*lvgCache
pvcLVGs sync.Map //map[string][]string
nodeLVGs sync.Map //map[string][]string
lvgs sync.Map // map[string]*lvgCache
pvcLVGs sync.Map // map[string][]string
nodeLVGs sync.Map // map[string][]string
log logger.Logger
}

type lvgCache struct {
lvg *snc.LvmVolumeGroup
thickPVCs sync.Map //map[string]*pvcCache
thinPools sync.Map //map[string]*thinPoolCache
thickPVCs sync.Map // map[string]*pvcCache
thinPools sync.Map // map[string]*thinPoolCache
}

type thinPoolCache struct {
pvcs sync.Map //map[string]*pvcCache
pvcs sync.Map // map[string]*pvcCache
}

type pvcCache struct {
Expand Down Expand Up @@ -126,7 +127,7 @@ func (c *Cache) GetAllLVG() map[string]*snc.LvmVolumeGroup {
lvgs := make(map[string]*snc.LvmVolumeGroup)
c.lvgs.Range(func(lvgName, lvgCh any) bool {
if lvgCh.(*lvgCache).lvg == nil {
c.log.Error(fmt.Errorf("LVMVolumeGroup %s is not initialized", lvgName), fmt.Sprintf("[GetAllLVG] an error occurs while iterating the LVMVolumeGroups"))
c.log.Error(fmt.Errorf("LVMVolumeGroup %s is not initialized", lvgName), "[GetAllLVG] an error occurs while iterating the LVMVolumeGroups")
return true
}

Expand All @@ -146,7 +147,7 @@ func (c *Cache) GetLVGThickReservedSpace(lvgName string) (int64, error) {
}

var space int64
lvg.(*lvgCache).thickPVCs.Range(func(pvcName, pvcCh any) bool {
lvg.(*lvgCache).thickPVCs.Range(func(_, pvcCh any) bool {
space += pvcCh.(*pvcCache).pvc.Spec.Resources.Requests.Storage().Value()
return true
})
Expand All @@ -169,7 +170,7 @@ func (c *Cache) GetLVGThinReservedSpace(lvgName string, thinPoolName string) (in
}

var space int64
thinPool.(*thinPoolCache).pvcs.Range(func(pvcName, pvcCh any) bool {
thinPool.(*thinPoolCache).pvcs.Range(func(_, pvcCh any) bool {
space += pvcCh.(*pvcCache).pvc.Spec.Resources.Requests.Storage().Value()
return true
})
Expand All @@ -181,19 +182,21 @@ func (c *Cache) GetLVGThinReservedSpace(lvgName string, thinPoolName string) (in
func (c *Cache) DeleteLVG(lvgName string) {
c.lvgs.Delete(lvgName)

c.nodeLVGs.Range(func(nodeName, lvgNames any) bool {
c.nodeLVGs.Range(func(_, lvgNames any) bool {
for i, lvg := range lvgNames.([]string) {
if lvg == lvgName {
//nolint:gocritic
lvgNames = append(lvgNames.([]string)[:i], lvgNames.([]string)[i+1:]...)
}
}

return true
})

c.pvcLVGs.Range(func(pvcName, lvgNames any) bool {
c.pvcLVGs.Range(func(_, lvgNames any) bool {
for i, lvg := range lvgNames.([]string) {
if lvg == lvgName {
//nolint:gocritic
lvgNames = append(lvgNames.([]string)[:i], lvgNames.([]string)[i+1:]...)
}
}
Expand Down Expand Up @@ -468,12 +471,12 @@ func (c *Cache) GetAllPVCForLVG(lvgName string) ([]*v1.PersistentVolumeClaim, er

// TODO: fix this to struct size field after refactoring
size := 0
lvgCh.(*lvgCache).thickPVCs.Range(func(key, value any) bool {
lvgCh.(*lvgCache).thickPVCs.Range(func(_, _ any) bool {
size++
return true
})
lvgCh.(*lvgCache).thinPools.Range(func(tpName, tpCh any) bool {
tpCh.(*thinPoolCache).pvcs.Range(func(key, value any) bool {
lvgCh.(*lvgCache).thinPools.Range(func(_, tpCh any) bool {
tpCh.(*thinPoolCache).pvcs.Range(func(_, _ any) bool {
size++
return true
})
Expand All @@ -482,14 +485,14 @@ func (c *Cache) GetAllPVCForLVG(lvgName string) ([]*v1.PersistentVolumeClaim, er

result := make([]*v1.PersistentVolumeClaim, 0, size)
// collect Thick PVC for the LVG
lvgCh.(*lvgCache).thickPVCs.Range(func(pvcName, pvcCh any) bool {
lvgCh.(*lvgCache).thickPVCs.Range(func(_, pvcCh any) bool {
result = append(result, pvcCh.(*pvcCache).pvc)
return true
})

// collect Thin PVC for the LVG
lvgCh.(*lvgCache).thinPools.Range(func(tpName, tpCh any) bool {
tpCh.(*thinPoolCache).pvcs.Range(func(pvcName, pvcCh any) bool {
lvgCh.(*lvgCache).thinPools.Range(func(_, tpCh any) bool {
tpCh.(*thinPoolCache).pvcs.Range(func(_, pvcCh any) bool {
result = append(result, pvcCh.(*pvcCache).pvc)
return true
})
Expand All @@ -510,7 +513,7 @@ func (c *Cache) GetAllThickPVCLVG(lvgName string) ([]*v1.PersistentVolumeClaim,

result := make([]*v1.PersistentVolumeClaim, 0, pvcPerLVGCount)
// collect Thick PVC for the LVG
lvgCh.(*lvgCache).thickPVCs.Range(func(pvcName, pvcCh any) bool {
lvgCh.(*lvgCache).thickPVCs.Range(func(_, pvcCh any) bool {
result = append(result, pvcCh.(*pvcCache).pvc)
return true
})
Expand All @@ -534,7 +537,7 @@ func (c *Cache) GetAllPVCFromLVGThinPool(lvgName, thinPoolName string) ([]*v1.Pe
}

result := make([]*v1.PersistentVolumeClaim, 0, pvcPerLVGCount)
thinPoolCh.(*thinPoolCache).pvcs.Range(func(pvcName, pvcCh any) bool {
thinPoolCh.(*thinPoolCache).pvcs.Range(func(_, pvcCh any) bool {
result = append(result, pvcCh.(*pvcCache).pvc)
return true
})
Expand Down Expand Up @@ -650,7 +653,7 @@ func (c *Cache) RemovePVCFromTheCache(pvc *v1.PersistentVolumeClaim) {
lvgCh, found := c.lvgs.Load(lvgName)
if found {
lvgCh.(*lvgCache).thickPVCs.Delete(pvcKey.(string))
lvgCh.(*lvgCache).thinPools.Range(func(tpName, tpCh any) bool {
lvgCh.(*lvgCache).thinPools.Range(func(_, tpCh any) bool {
tpCh.(*thinPoolCache).pvcs.Delete(pvcKey)
return true
})
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,14 @@ package cache

import (
"fmt"
"testing"

snc "github.com/deckhouse/sds-node-configurator/api/v1alpha1"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sds-local-volume-scheduler-extender/pkg/logger"
"testing"
)

func BenchmarkCache_DeleteLVG(b *testing.B) {
Expand All @@ -23,7 +24,7 @@ func BenchmarkCache_DeleteLVG(b *testing.B) {
for pb.Next() {
cache.AddLVG(lvg)
if _, found := cache.lvgs.Load(lvg.Name); found {
//b.Log("lvg found, delete it")
// b.Log("lvg found, delete it")
cache.DeleteLVG(lvg.Name)
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,11 @@ import (
"context"
"errors"
"fmt"
"reflect"

snc "github.com/deckhouse/sds-node-configurator/api/v1alpha1"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/util/workqueue"
"reflect"
"sds-local-volume-scheduler-extender/pkg/cache"
"sds-local-volume-scheduler-extender/pkg/logger"
"sigs.k8s.io/controller-runtime/pkg/controller"
Expand All @@ -30,7 +31,7 @@ func RunLVGWatcherCacheController(
log.Info("[RunLVGWatcherCacheController] starts the work")

c, err := controller.New(LVGWatcherCacheCtrlName, mgr, controller.Options{
Reconciler: reconcile.Func(func(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
Reconciler: reconcile.Func(func(_ context.Context, _ reconcile.Request) (reconcile.Result, error) {
return reconcile.Result{}, nil
}),
})
Expand All @@ -40,7 +41,7 @@ func RunLVGWatcherCacheController(
}

err = c.Watch(source.Kind(mgr.GetCache(), &snc.LvmVolumeGroup{}), handler.Funcs{
CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) {
CreateFunc: func(_ context.Context, e event.CreateEvent, _ workqueue.RateLimitingInterface) {
log.Info(fmt.Sprintf("[RunLVGWatcherCacheController] CreateFunc starts the cache reconciliation for the LVMVolumeGroup %s", e.Object.GetName()))

lvg, ok := e.Object.(*snc.LvmVolumeGroup)
Expand Down Expand Up @@ -90,7 +91,7 @@ func RunLVGWatcherCacheController(

log.Info(fmt.Sprintf("[RunLVGWatcherCacheController] cache for the LVMVolumeGroup %s was reconciled by CreateFunc", lvg.Name))
},
UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) {
UpdateFunc: func(_ context.Context, e event.UpdateEvent, _ workqueue.RateLimitingInterface) {
log.Info(fmt.Sprintf("[RunCacheWatcherController] UpdateFunc starts the cache reconciliation for the LVMVolumeGroup %s", e.ObjectNew.GetName()))
oldLvg, ok := e.ObjectOld.(*snc.LvmVolumeGroup)
if !ok {
Expand Down Expand Up @@ -140,7 +141,7 @@ func RunLVGWatcherCacheController(

log.Debug(fmt.Sprintf("[RunLVGWatcherCacheController] Update Func ends reconciliation the LVMVolumeGroup %s cache", newLvg.Name))
},
DeleteFunc: func(ctx context.Context, e event.DeleteEvent, q workqueue.RateLimitingInterface) {
DeleteFunc: func(_ context.Context, e event.DeleteEvent, _ workqueue.RateLimitingInterface) {
log.Info(fmt.Sprintf("[RunCacheWatcherController] DeleteFunc starts the cache reconciliation for the LVMVolumeGroup %s", e.Object.GetName()))
lvg, ok := e.Object.(*snc.LvmVolumeGroup)
if !ok {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
"context"
"errors"
"fmt"

v1 "k8s.io/api/core/v1"
v12 "k8s.io/api/storage/v1"
"k8s.io/client-go/util/workqueue"
Expand Down Expand Up @@ -33,7 +34,7 @@ func RunPVCWatcherCacheController(
log.Info("[RunPVCWatcherCacheController] starts the work")

c, err := controller.New("test-pvc-watcher", mgr, controller.Options{
Reconciler: reconcile.Func(func(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
Reconciler: reconcile.Func(func(_ context.Context, _ reconcile.Request) (reconcile.Result, error) {
return reconcile.Result{}, nil
}),
})
Expand All @@ -43,7 +44,7 @@ func RunPVCWatcherCacheController(
}

err = c.Watch(source.Kind(mgr.GetCache(), &v1.PersistentVolumeClaim{}), handler.Funcs{
CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) {
CreateFunc: func(ctx context.Context, e event.CreateEvent, _ workqueue.RateLimitingInterface) {
log.Info("[RunPVCWatcherCacheController] CreateFunc reconciliation starts")
pvc, ok := e.Object.(*v1.PersistentVolumeClaim)
if !ok {
Expand All @@ -70,7 +71,7 @@ func RunPVCWatcherCacheController(
reconcilePVC(ctx, mgr, log, schedulerCache, pvc, selectedNodeName)
log.Info("[RunPVCWatcherCacheController] CreateFunc reconciliation ends")
},
UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) {
UpdateFunc: func(ctx context.Context, e event.UpdateEvent, _ workqueue.RateLimitingInterface) {
log.Info("[RunPVCWatcherCacheController] Update Func reconciliation starts")
pvc, ok := e.ObjectNew.(*v1.PersistentVolumeClaim)
if !ok {
Expand All @@ -95,7 +96,7 @@ func RunPVCWatcherCacheController(
reconcilePVC(ctx, mgr, log, schedulerCache, pvc, selectedNodeName)
log.Info("[RunPVCWatcherCacheController] Update Func reconciliation ends")
},
DeleteFunc: func(ctx context.Context, e event.DeleteEvent, q workqueue.RateLimitingInterface) {
DeleteFunc: func(_ context.Context, e event.DeleteEvent, _ workqueue.RateLimitingInterface) {
log.Info("[RunPVCWatcherCacheController] Delete Func reconciliation starts")
pvc, ok := e.Object.(*v1.PersistentVolumeClaim)
if !ok {
Expand Down Expand Up @@ -140,7 +141,7 @@ func reconcilePVC(ctx context.Context, mgr manager.Manager, log logger.Logger, s
log.Debug(fmt.Sprintf("[reconcilePVC] successfully extracted LVGs from the Storage Class %s for PVC %s/%s", sc.Name, pvc.Namespace, pvc.Name))

lvgsForPVC := schedulerCache.GetLVGNamesForPVC(pvc)
if lvgsForPVC == nil || len(lvgsForPVC) == 0 {
if len(lvgsForPVC) == 0 {
log.Debug(fmt.Sprintf("[reconcilePVC] no LVMVolumeGroups were found in the cache for PVC %s/%s. Use Storage Class %s instead", pvc.Namespace, pvc.Name, *pvc.Spec.StorageClassName))

for _, lvg := range lvgsFromSc {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,12 @@ package kubutils

import (
"fmt"

"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)

func KubernetesDefaultConfigCreate() (*rest.Config, error) {
//todo validate empty
clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
clientcmd.NewDefaultClientConfigLoadingRules(),
&clientcmd.ConfigOverrides{},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,11 +14,11 @@ limitations under the License.
package logger

import (
"flag"
"fmt"
"strconv"

"github.com/go-logr/logr"
"k8s.io/klog/v2"
"k8s.io/klog/v2/klogr"
"k8s.io/klog/v2/textlogger"
)

const (
Expand Down Expand Up @@ -47,13 +47,12 @@ type Logger struct {
}

func NewLogger(level Verbosity) (*Logger, error) {
klog.InitFlags(nil)
if err := flag.Set("v", string(level)); err != nil {
v, err := strconv.Atoi(string(level))
if err != nil {
return nil, err
}
flag.Parse()

log := klogr.New().WithCallDepth(1)
log := textlogger.NewLogger(textlogger.NewConfig(textlogger.Verbosity(v))).WithCallDepth(1)

return &Logger{log: log}, nil
}
Expand Down
Loading

0 comments on commit a717712

Please sign in to comment.