diff --git a/images/sds-local-volume-scheduler-extender/src/cmd/cmd/access_log_test.go b/images/sds-local-volume-scheduler-extender/src/cmd/cmd/access_log_test.go index b19b40b8..c4f64a95 100644 --- a/images/sds-local-volume-scheduler-extender/src/cmd/cmd/access_log_test.go +++ b/images/sds-local-volume-scheduler-extender/src/cmd/cmd/access_log_test.go @@ -52,7 +52,7 @@ func TestAccessLogHandler(t *testing.T) { ctrl.SetLogger(zapr.NewLogger(zap.New(observer))) mux := http.NewServeMux() - mux.HandleFunc("/hello", func(w http.ResponseWriter, r *http.Request) { + mux.HandleFunc("/hello", func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusOK) w.Write([]byte("hello")) }) diff --git a/images/sds-local-volume-scheduler-extender/src/cmd/cmd/root.go b/images/sds-local-volume-scheduler-extender/src/cmd/cmd/root.go index 64986e5c..35baf586 100644 --- a/images/sds-local-volume-scheduler-extender/src/cmd/cmd/root.go +++ b/images/sds-local-volume-scheduler-extender/src/cmd/cmd/root.go @@ -20,28 +20,28 @@ import ( "context" "errors" "fmt" - slv "github.com/deckhouse/sds-local-volume/api/v1alpha1" - snc "github.com/deckhouse/sds-node-configurator/api/v1alpha1" "net/http" "os" "os/signal" - "sds-local-volume-scheduler-extender/pkg/cache" - "sds-local-volume-scheduler-extender/pkg/controller" - "sds-local-volume-scheduler-extender/pkg/kubutils" - "sds-local-volume-scheduler-extender/pkg/logger" - "sds-local-volume-scheduler-extender/pkg/scheduler" - "sigs.k8s.io/controller-runtime/pkg/healthz" - "sigs.k8s.io/controller-runtime/pkg/manager" "sync" "syscall" "time" + slv "github.com/deckhouse/sds-local-volume/api/v1alpha1" + snc "github.com/deckhouse/sds-node-configurator/api/v1alpha1" "github.com/spf13/cobra" v1 "k8s.io/api/core/v1" sv1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/runtime" apiruntime "k8s.io/apimachinery/pkg/runtime" + "sds-local-volume-scheduler-extender/pkg/cache" + "sds-local-volume-scheduler-extender/pkg/controller" + "sds-local-volume-scheduler-extender/pkg/kubutils" + "sds-local-volume-scheduler-extender/pkg/logger" + "sds-local-volume-scheduler-extender/pkg/scheduler" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/healthz" + "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/yaml" ) @@ -88,7 +88,7 @@ It scores nodes with this formula: min(10, max(0, log2(capacity >> 30 / divisor))) The default divisor is 1. It can be changed with a command-line option. `, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, _ []string) error { cmd.SilenceUsage = true return subMain(cmd.Context()) }, @@ -109,7 +109,7 @@ func subMain(parentCtx context.Context) error { ctx := context.Background() log, err := logger.NewLogger(logger.Verbosity(config.LogLevel)) if err != nil { - fmt.Println(fmt.Sprintf("[subMain] unable to initialize logger, err: %s", err.Error())) + fmt.Printf("[subMain] unable to initialize logger, err: %s\n", err.Error()) } log.Info(fmt.Sprintf("[subMain] logger has been initialized, log level: %s", config.LogLevel)) ctrl.SetLogger(log.GetLogger()) diff --git a/images/sds-local-volume-scheduler-extender/src/config/config.go b/images/sds-local-volume-scheduler-extender/src/config/config.go index 7b0fdf71..4d90b2aa 100644 --- a/images/sds-local-volume-scheduler-extender/src/config/config.go +++ b/images/sds-local-volume-scheduler-extender/src/config/config.go @@ -15,6 +15,7 @@ package config import ( "os" + "sds-local-volume-scheduler-extender/pkg/logger" ) diff --git a/images/sds-local-volume-scheduler-extender/src/pkg/cache/cache.go b/images/sds-local-volume-scheduler-extender/src/pkg/cache/cache.go index 9668e3b8..e1649a7e 100644 --- a/images/sds-local-volume-scheduler-extender/src/pkg/cache/cache.go +++ b/images/sds-local-volume-scheduler-extender/src/pkg/cache/cache.go @@ -3,12 +3,13 @@ package cache import ( "errors" "fmt" + "sync" + snc "github.com/deckhouse/sds-node-configurator/api/v1alpha1" v1 "k8s.io/api/core/v1" slices2 "k8s.io/utils/strings/slices" "sds-local-volume-scheduler-extender/pkg/consts" "sds-local-volume-scheduler-extender/pkg/logger" - "sync" ) const ( @@ -19,20 +20,20 @@ const ( ) type Cache struct { - lvgs sync.Map //map[string]*lvgCache - pvcLVGs sync.Map //map[string][]string - nodeLVGs sync.Map //map[string][]string + lvgs sync.Map // map[string]*lvgCache + pvcLVGs sync.Map // map[string][]string + nodeLVGs sync.Map // map[string][]string log logger.Logger } type lvgCache struct { lvg *snc.LvmVolumeGroup - thickPVCs sync.Map //map[string]*pvcCache - thinPools sync.Map //map[string]*thinPoolCache + thickPVCs sync.Map // map[string]*pvcCache + thinPools sync.Map // map[string]*thinPoolCache } type thinPoolCache struct { - pvcs sync.Map //map[string]*pvcCache + pvcs sync.Map // map[string]*pvcCache } type pvcCache struct { @@ -126,7 +127,7 @@ func (c *Cache) GetAllLVG() map[string]*snc.LvmVolumeGroup { lvgs := make(map[string]*snc.LvmVolumeGroup) c.lvgs.Range(func(lvgName, lvgCh any) bool { if lvgCh.(*lvgCache).lvg == nil { - c.log.Error(fmt.Errorf("LVMVolumeGroup %s is not initialized", lvgName), fmt.Sprintf("[GetAllLVG] an error occurs while iterating the LVMVolumeGroups")) + c.log.Error(fmt.Errorf("LVMVolumeGroup %s is not initialized", lvgName), "[GetAllLVG] an error occurs while iterating the LVMVolumeGroups") return true } @@ -146,7 +147,7 @@ func (c *Cache) GetLVGThickReservedSpace(lvgName string) (int64, error) { } var space int64 - lvg.(*lvgCache).thickPVCs.Range(func(pvcName, pvcCh any) bool { + lvg.(*lvgCache).thickPVCs.Range(func(_, pvcCh any) bool { space += pvcCh.(*pvcCache).pvc.Spec.Resources.Requests.Storage().Value() return true }) @@ -169,7 +170,7 @@ func (c *Cache) GetLVGThinReservedSpace(lvgName string, thinPoolName string) (in } var space int64 - thinPool.(*thinPoolCache).pvcs.Range(func(pvcName, pvcCh any) bool { + thinPool.(*thinPoolCache).pvcs.Range(func(_, pvcCh any) bool { space += pvcCh.(*pvcCache).pvc.Spec.Resources.Requests.Storage().Value() return true }) @@ -181,9 +182,10 @@ func (c *Cache) GetLVGThinReservedSpace(lvgName string, thinPoolName string) (in func (c *Cache) DeleteLVG(lvgName string) { c.lvgs.Delete(lvgName) - c.nodeLVGs.Range(func(nodeName, lvgNames any) bool { + c.nodeLVGs.Range(func(_, lvgNames any) bool { for i, lvg := range lvgNames.([]string) { if lvg == lvgName { + //nolint:gocritic lvgNames = append(lvgNames.([]string)[:i], lvgNames.([]string)[i+1:]...) } } @@ -191,9 +193,10 @@ func (c *Cache) DeleteLVG(lvgName string) { return true }) - c.pvcLVGs.Range(func(pvcName, lvgNames any) bool { + c.pvcLVGs.Range(func(_, lvgNames any) bool { for i, lvg := range lvgNames.([]string) { if lvg == lvgName { + //nolint:gocritic lvgNames = append(lvgNames.([]string)[:i], lvgNames.([]string)[i+1:]...) } } @@ -468,12 +471,12 @@ func (c *Cache) GetAllPVCForLVG(lvgName string) ([]*v1.PersistentVolumeClaim, er // TODO: fix this to struct size field after refactoring size := 0 - lvgCh.(*lvgCache).thickPVCs.Range(func(key, value any) bool { + lvgCh.(*lvgCache).thickPVCs.Range(func(_, _ any) bool { size++ return true }) - lvgCh.(*lvgCache).thinPools.Range(func(tpName, tpCh any) bool { - tpCh.(*thinPoolCache).pvcs.Range(func(key, value any) bool { + lvgCh.(*lvgCache).thinPools.Range(func(_, tpCh any) bool { + tpCh.(*thinPoolCache).pvcs.Range(func(_, _ any) bool { size++ return true }) @@ -482,14 +485,14 @@ func (c *Cache) GetAllPVCForLVG(lvgName string) ([]*v1.PersistentVolumeClaim, er result := make([]*v1.PersistentVolumeClaim, 0, size) // collect Thick PVC for the LVG - lvgCh.(*lvgCache).thickPVCs.Range(func(pvcName, pvcCh any) bool { + lvgCh.(*lvgCache).thickPVCs.Range(func(_, pvcCh any) bool { result = append(result, pvcCh.(*pvcCache).pvc) return true }) // collect Thin PVC for the LVG - lvgCh.(*lvgCache).thinPools.Range(func(tpName, tpCh any) bool { - tpCh.(*thinPoolCache).pvcs.Range(func(pvcName, pvcCh any) bool { + lvgCh.(*lvgCache).thinPools.Range(func(_, tpCh any) bool { + tpCh.(*thinPoolCache).pvcs.Range(func(_, pvcCh any) bool { result = append(result, pvcCh.(*pvcCache).pvc) return true }) @@ -510,7 +513,7 @@ func (c *Cache) GetAllThickPVCLVG(lvgName string) ([]*v1.PersistentVolumeClaim, result := make([]*v1.PersistentVolumeClaim, 0, pvcPerLVGCount) // collect Thick PVC for the LVG - lvgCh.(*lvgCache).thickPVCs.Range(func(pvcName, pvcCh any) bool { + lvgCh.(*lvgCache).thickPVCs.Range(func(_, pvcCh any) bool { result = append(result, pvcCh.(*pvcCache).pvc) return true }) @@ -534,7 +537,7 @@ func (c *Cache) GetAllPVCFromLVGThinPool(lvgName, thinPoolName string) ([]*v1.Pe } result := make([]*v1.PersistentVolumeClaim, 0, pvcPerLVGCount) - thinPoolCh.(*thinPoolCache).pvcs.Range(func(pvcName, pvcCh any) bool { + thinPoolCh.(*thinPoolCache).pvcs.Range(func(_, pvcCh any) bool { result = append(result, pvcCh.(*pvcCache).pvc) return true }) @@ -650,7 +653,7 @@ func (c *Cache) RemovePVCFromTheCache(pvc *v1.PersistentVolumeClaim) { lvgCh, found := c.lvgs.Load(lvgName) if found { lvgCh.(*lvgCache).thickPVCs.Delete(pvcKey.(string)) - lvgCh.(*lvgCache).thinPools.Range(func(tpName, tpCh any) bool { + lvgCh.(*lvgCache).thinPools.Range(func(_, tpCh any) bool { tpCh.(*thinPoolCache).pvcs.Delete(pvcKey) return true }) diff --git a/images/sds-local-volume-scheduler-extender/src/pkg/cache/cache_test.go b/images/sds-local-volume-scheduler-extender/src/pkg/cache/cache_test.go index 3af75605..c921e77f 100644 --- a/images/sds-local-volume-scheduler-extender/src/pkg/cache/cache_test.go +++ b/images/sds-local-volume-scheduler-extender/src/pkg/cache/cache_test.go @@ -2,13 +2,14 @@ package cache import ( "fmt" + "testing" + snc "github.com/deckhouse/sds-node-configurator/api/v1alpha1" "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sds-local-volume-scheduler-extender/pkg/logger" - "testing" ) func BenchmarkCache_DeleteLVG(b *testing.B) { @@ -23,7 +24,7 @@ func BenchmarkCache_DeleteLVG(b *testing.B) { for pb.Next() { cache.AddLVG(lvg) if _, found := cache.lvgs.Load(lvg.Name); found { - //b.Log("lvg found, delete it") + // b.Log("lvg found, delete it") cache.DeleteLVG(lvg.Name) } } diff --git a/images/sds-local-volume-scheduler-extender/src/pkg/controller/lvg_watcher_cache.go b/images/sds-local-volume-scheduler-extender/src/pkg/controller/lvg_watcher_cache.go index 1ff74047..64c1b46c 100644 --- a/images/sds-local-volume-scheduler-extender/src/pkg/controller/lvg_watcher_cache.go +++ b/images/sds-local-volume-scheduler-extender/src/pkg/controller/lvg_watcher_cache.go @@ -4,10 +4,11 @@ import ( "context" "errors" "fmt" + "reflect" + snc "github.com/deckhouse/sds-node-configurator/api/v1alpha1" v1 "k8s.io/api/core/v1" "k8s.io/client-go/util/workqueue" - "reflect" "sds-local-volume-scheduler-extender/pkg/cache" "sds-local-volume-scheduler-extender/pkg/logger" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -30,7 +31,7 @@ func RunLVGWatcherCacheController( log.Info("[RunLVGWatcherCacheController] starts the work") c, err := controller.New(LVGWatcherCacheCtrlName, mgr, controller.Options{ - Reconciler: reconcile.Func(func(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + Reconciler: reconcile.Func(func(_ context.Context, _ reconcile.Request) (reconcile.Result, error) { return reconcile.Result{}, nil }), }) @@ -40,7 +41,7 @@ func RunLVGWatcherCacheController( } err = c.Watch(source.Kind(mgr.GetCache(), &snc.LvmVolumeGroup{}), handler.Funcs{ - CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) { + CreateFunc: func(_ context.Context, e event.CreateEvent, _ workqueue.RateLimitingInterface) { log.Info(fmt.Sprintf("[RunLVGWatcherCacheController] CreateFunc starts the cache reconciliation for the LVMVolumeGroup %s", e.Object.GetName())) lvg, ok := e.Object.(*snc.LvmVolumeGroup) @@ -90,7 +91,7 @@ func RunLVGWatcherCacheController( log.Info(fmt.Sprintf("[RunLVGWatcherCacheController] cache for the LVMVolumeGroup %s was reconciled by CreateFunc", lvg.Name)) }, - UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) { + UpdateFunc: func(_ context.Context, e event.UpdateEvent, _ workqueue.RateLimitingInterface) { log.Info(fmt.Sprintf("[RunCacheWatcherController] UpdateFunc starts the cache reconciliation for the LVMVolumeGroup %s", e.ObjectNew.GetName())) oldLvg, ok := e.ObjectOld.(*snc.LvmVolumeGroup) if !ok { @@ -140,7 +141,7 @@ func RunLVGWatcherCacheController( log.Debug(fmt.Sprintf("[RunLVGWatcherCacheController] Update Func ends reconciliation the LVMVolumeGroup %s cache", newLvg.Name)) }, - DeleteFunc: func(ctx context.Context, e event.DeleteEvent, q workqueue.RateLimitingInterface) { + DeleteFunc: func(_ context.Context, e event.DeleteEvent, _ workqueue.RateLimitingInterface) { log.Info(fmt.Sprintf("[RunCacheWatcherController] DeleteFunc starts the cache reconciliation for the LVMVolumeGroup %s", e.Object.GetName())) lvg, ok := e.Object.(*snc.LvmVolumeGroup) if !ok { diff --git a/images/sds-local-volume-scheduler-extender/src/pkg/controller/pvc_watcher_cache.go b/images/sds-local-volume-scheduler-extender/src/pkg/controller/pvc_watcher_cache.go index af1561c1..84e5b46b 100644 --- a/images/sds-local-volume-scheduler-extender/src/pkg/controller/pvc_watcher_cache.go +++ b/images/sds-local-volume-scheduler-extender/src/pkg/controller/pvc_watcher_cache.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + v1 "k8s.io/api/core/v1" v12 "k8s.io/api/storage/v1" "k8s.io/client-go/util/workqueue" @@ -33,7 +34,7 @@ func RunPVCWatcherCacheController( log.Info("[RunPVCWatcherCacheController] starts the work") c, err := controller.New("test-pvc-watcher", mgr, controller.Options{ - Reconciler: reconcile.Func(func(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + Reconciler: reconcile.Func(func(_ context.Context, _ reconcile.Request) (reconcile.Result, error) { return reconcile.Result{}, nil }), }) @@ -43,7 +44,7 @@ func RunPVCWatcherCacheController( } err = c.Watch(source.Kind(mgr.GetCache(), &v1.PersistentVolumeClaim{}), handler.Funcs{ - CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) { + CreateFunc: func(ctx context.Context, e event.CreateEvent, _ workqueue.RateLimitingInterface) { log.Info("[RunPVCWatcherCacheController] CreateFunc reconciliation starts") pvc, ok := e.Object.(*v1.PersistentVolumeClaim) if !ok { @@ -70,7 +71,7 @@ func RunPVCWatcherCacheController( reconcilePVC(ctx, mgr, log, schedulerCache, pvc, selectedNodeName) log.Info("[RunPVCWatcherCacheController] CreateFunc reconciliation ends") }, - UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) { + UpdateFunc: func(ctx context.Context, e event.UpdateEvent, _ workqueue.RateLimitingInterface) { log.Info("[RunPVCWatcherCacheController] Update Func reconciliation starts") pvc, ok := e.ObjectNew.(*v1.PersistentVolumeClaim) if !ok { @@ -95,7 +96,7 @@ func RunPVCWatcherCacheController( reconcilePVC(ctx, mgr, log, schedulerCache, pvc, selectedNodeName) log.Info("[RunPVCWatcherCacheController] Update Func reconciliation ends") }, - DeleteFunc: func(ctx context.Context, e event.DeleteEvent, q workqueue.RateLimitingInterface) { + DeleteFunc: func(_ context.Context, e event.DeleteEvent, _ workqueue.RateLimitingInterface) { log.Info("[RunPVCWatcherCacheController] Delete Func reconciliation starts") pvc, ok := e.Object.(*v1.PersistentVolumeClaim) if !ok { @@ -140,7 +141,7 @@ func reconcilePVC(ctx context.Context, mgr manager.Manager, log logger.Logger, s log.Debug(fmt.Sprintf("[reconcilePVC] successfully extracted LVGs from the Storage Class %s for PVC %s/%s", sc.Name, pvc.Namespace, pvc.Name)) lvgsForPVC := schedulerCache.GetLVGNamesForPVC(pvc) - if lvgsForPVC == nil || len(lvgsForPVC) == 0 { + if len(lvgsForPVC) == 0 { log.Debug(fmt.Sprintf("[reconcilePVC] no LVMVolumeGroups were found in the cache for PVC %s/%s. Use Storage Class %s instead", pvc.Namespace, pvc.Name, *pvc.Spec.StorageClassName)) for _, lvg := range lvgsFromSc { diff --git a/images/sds-local-volume-scheduler-extender/src/pkg/kubutils/kubernetes.go b/images/sds-local-volume-scheduler-extender/src/pkg/kubutils/kubernetes.go index 4714cfe8..0e0a69d2 100644 --- a/images/sds-local-volume-scheduler-extender/src/pkg/kubutils/kubernetes.go +++ b/images/sds-local-volume-scheduler-extender/src/pkg/kubutils/kubernetes.go @@ -15,12 +15,12 @@ package kubutils import ( "fmt" + "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" ) func KubernetesDefaultConfigCreate() (*rest.Config, error) { - //todo validate empty clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( clientcmd.NewDefaultClientConfigLoadingRules(), &clientcmd.ConfigOverrides{}, diff --git a/images/sds-local-volume-scheduler-extender/src/pkg/logger/logger.go b/images/sds-local-volume-scheduler-extender/src/pkg/logger/logger.go index 0f6bc2de..6bd12c5e 100644 --- a/images/sds-local-volume-scheduler-extender/src/pkg/logger/logger.go +++ b/images/sds-local-volume-scheduler-extender/src/pkg/logger/logger.go @@ -14,11 +14,11 @@ limitations under the License. package logger import ( - "flag" "fmt" + "strconv" + "github.com/go-logr/logr" - "k8s.io/klog/v2" - "k8s.io/klog/v2/klogr" + "k8s.io/klog/v2/textlogger" ) const ( @@ -47,13 +47,12 @@ type Logger struct { } func NewLogger(level Verbosity) (*Logger, error) { - klog.InitFlags(nil) - if err := flag.Set("v", string(level)); err != nil { + v, err := strconv.Atoi(string(level)) + if err != nil { return nil, err } - flag.Parse() - log := klogr.New().WithCallDepth(1) + log := textlogger.NewLogger(textlogger.NewConfig(textlogger.Verbosity(v))).WithCallDepth(1) return &Logger{log: log}, nil } diff --git a/images/sds-local-volume-scheduler-extender/src/pkg/scheduler/filter.go b/images/sds-local-volume-scheduler-extender/src/pkg/scheduler/filter.go index 73139325..2447dd5b 100644 --- a/images/sds-local-volume-scheduler-extender/src/pkg/scheduler/filter.go +++ b/images/sds-local-volume-scheduler-extender/src/pkg/scheduler/filter.go @@ -19,20 +19,20 @@ package scheduler import ( "context" "encoding/json" - "errors" "fmt" + "net/http" + "sync" + snc "github.com/deckhouse/sds-node-configurator/api/v1alpha1" corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/utils/strings/slices" - "net/http" "sds-local-volume-scheduler-extender/pkg/cache" "sds-local-volume-scheduler-extender/pkg/consts" "sds-local-volume-scheduler-extender/pkg/logger" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/yaml" - "sync" ) func (s *scheduler) filter(w http.ResponseWriter, r *http.Request) { @@ -388,7 +388,7 @@ func filterNodes( // we get the specific LVG which the PVC can use on the node as we support only one specified LVG in the Storage Class on each node commonLVG := findMatchedLVG(lvgsFromNode, lvgsFromSC) if commonLVG == nil { - err = errors.New(fmt.Sprintf("unable to match Storage Class's LVMVolumeGroup with the node's one, Storage Class: %s, node: %s", *pvc.Spec.StorageClassName, node.Name)) + err = fmt.Errorf("unable to match Storage Class's LVMVolumeGroup with the node's one, Storage Class: %s, node: %s", *pvc.Spec.StorageClassName, node.Name) errs <- err return } @@ -594,9 +594,7 @@ func GetSortedLVGsFromStorageClasses(scs map[string]*v1.StorageClass) (map[strin return nil, err } - for _, lvg := range lvgs { - result[sc.Name] = append(result[sc.Name], lvg) - } + result[sc.Name] = append(result[sc.Name], lvgs...) } return result, nil @@ -660,7 +658,7 @@ func getStorageClassesUsedByPVCs(ctx context.Context, cl client.Client, pvcs map result := make(map[string]*v1.StorageClass, len(pvcs)) for _, pvc := range pvcs { if pvc.Spec.StorageClassName == nil { - err = errors.New(fmt.Sprintf("not StorageClass specified for PVC %s", pvc.Name)) + err = fmt.Errorf("not StorageClass specified for PVC %s", pvc.Name) return nil, err } diff --git a/images/sds-local-volume-scheduler-extender/src/pkg/scheduler/filter_test.go b/images/sds-local-volume-scheduler-extender/src/pkg/scheduler/filter_test.go index f937d032..ed4289ab 100644 --- a/images/sds-local-volume-scheduler-extender/src/pkg/scheduler/filter_test.go +++ b/images/sds-local-volume-scheduler-extender/src/pkg/scheduler/filter_test.go @@ -1,13 +1,14 @@ package scheduler import ( + "testing" + "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" v12 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sds-local-volume-scheduler-extender/pkg/consts" "sds-local-volume-scheduler-extender/pkg/logger" - "testing" ) func TestFilter(t *testing.T) { diff --git a/images/sds-local-volume-scheduler-extender/src/pkg/scheduler/prioritize.go b/images/sds-local-volume-scheduler-extender/src/pkg/scheduler/prioritize.go index c4e3f4b1..044ae316 100644 --- a/images/sds-local-volume-scheduler-extender/src/pkg/scheduler/prioritize.go +++ b/images/sds-local-volume-scheduler-extender/src/pkg/scheduler/prioritize.go @@ -18,18 +18,17 @@ package scheduler import ( "encoding/json" - "errors" "fmt" "math" "net/http" - "sds-local-volume-scheduler-extender/pkg/cache" - "sds-local-volume-scheduler-extender/pkg/consts" - "sds-local-volume-scheduler-extender/pkg/logger" "sync" corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/resource" + "sds-local-volume-scheduler-extender/pkg/cache" + "sds-local-volume-scheduler-extender/pkg/consts" + "sds-local-volume-scheduler-extender/pkg/logger" ) func (s *scheduler) prioritize(w http.ResponseWriter, r *http.Request) { @@ -148,7 +147,7 @@ func scoreNodes( lvgsFromSC := scLVGs[*pvc.Spec.StorageClassName] commonLVG := findMatchedLVG(lvgsFromNode, lvgsFromSC) if commonLVG == nil { - err = errors.New(fmt.Sprintf("unable to match Storage Class's LVMVolumeGroup with the node's one, Storage Class: %s, node: %s", *pvc.Spec.StorageClassName, node.Name)) + err = fmt.Errorf("unable to match Storage Class's LVMVolumeGroup with the node's one, Storage Class: %s, node: %s", *pvc.Spec.StorageClassName, node.Name) errs <- err return } @@ -172,7 +171,7 @@ func scoreNodes( case consts.Thin: thinPool := findMatchedThinPool(lvg.Status.ThinPools, commonLVG.Thin.PoolName) if thinPool == nil { - err = errors.New(fmt.Sprintf("unable to match Storage Class's ThinPools with the node's one, Storage Class: %s, node: %s", *pvc.Spec.StorageClassName, node.Name)) + err = fmt.Errorf("unable to match Storage Class's ThinPools with the node's one, Storage Class: %s, node: %s", *pvc.Spec.StorageClassName, node.Name) log.Error(err, "[scoreNodes] an error occurs while searching for target LVMVolumeGroup") errs <- err return diff --git a/images/sds-local-volume-scheduler-extender/src/pkg/scheduler/prioritize_test.go b/images/sds-local-volume-scheduler-extender/src/pkg/scheduler/prioritize_test.go index 236bb977..0da1b39b 100644 --- a/images/sds-local-volume-scheduler-extender/src/pkg/scheduler/prioritize_test.go +++ b/images/sds-local-volume-scheduler-extender/src/pkg/scheduler/prioritize_test.go @@ -8,7 +8,7 @@ import ( ) func TestPrioritize(t *testing.T) { - t.Run("getFreeSpaceLeftPercent", func(t *testing.T) { + t.Run("getFreeSpaceLeftPercent", func(_ *testing.T) { requested := resource.MustParse("1Gi") devisor := 1.0 @@ -17,8 +17,6 @@ func TestPrioritize(t *testing.T) { allocated := resource.MustParse("211Gi") freeSize := resource.MustParse(totalSizeString) freeSize.Sub(allocated) - // t.Logf("freeSize=%s, requested=%s, totalSize=%s", freeSize.String(), requested.String(), totalSize.String()) - // t.Logf("freeSize=%d, requested=%d, totalSize=%d", freeSize.Value(), requested.Value(), totalSize.Value()) percent := getFreeSpaceLeftPercent(freeSize.Value(), requested.Value(), totalSize.Value()) t.Logf("First freeSpacePercent %d", percent) @@ -38,8 +36,4 @@ func TestPrioritize(t *testing.T) { rawScore2 := int(math.Round(math.Log2(float64(percent2) / devisor))) t.Logf("rawScore2=%d", rawScore2) }) - - t.Run("getNodeScore", func(t *testing.T) { - - }) } diff --git a/images/sds-local-volume-scheduler-extender/src/pkg/scheduler/route.go b/images/sds-local-volume-scheduler-extender/src/pkg/scheduler/route.go index 788ebc7c..9d8815ad 100644 --- a/images/sds-local-volume-scheduler-extender/src/pkg/scheduler/route.go +++ b/images/sds-local-volume-scheduler-extender/src/pkg/scheduler/route.go @@ -19,8 +19,9 @@ package scheduler import ( "context" "fmt" - "k8s.io/apimachinery/pkg/api/resource" "net/http" + + "k8s.io/apimachinery/pkg/api/resource" "sds-local-volume-scheduler-extender/pkg/cache" "sds-local-volume-scheduler-extender/pkg/logger" "sigs.k8s.io/controller-runtime/pkg/client" @@ -77,7 +78,7 @@ func status(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusOK) _, err := w.Write([]byte("ok")) if err != nil { - fmt.Println(fmt.Sprintf("error occurs on status route, err: %s", err.Error())) + fmt.Printf("error occurs on status route, err: %s\n", err.Error()) } } @@ -107,6 +108,13 @@ func (s *scheduler) getCache(w http.ResponseWriter, _ *http.Request) { } thickPvcs, err := s.cache.GetAllThickPVCLVG(lvg.Name) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + _, err = w.Write([]byte("unable to write the cache")) + if err != nil { + s.log.Error(err, "error write response") + } + } for _, pvc := range thickPvcs { _, err = w.Write([]byte(fmt.Sprintf("\t\tThick PVC: %s, reserved: %s, selected node: %s\n", pvc.Name, pvc.Spec.Resources.Requests.Storage().String(), pvc.Annotations[cache.SelectedNodeAnnotation]))) if err != nil {