From 6737140697c23ad82d89fa9d580b54053689b21d Mon Sep 17 00:00:00 2001 From: Viktor Kramarenko Date: Fri, 20 Sep 2024 16:14:25 +0300 Subject: [PATCH] updated to new sds-node-configuration api version Signed-off-by: Viktor Kramarenko --- images/sds-local-volume-controller/src/go.mod | 2 +- images/sds-local-volume-controller/src/go.sum | 2 + .../pkg/controller/local_csi_node_watcher.go | 16 ++-- .../controller/local_csi_node_watcher_test.go | 8 +- .../local_storage_class_watcher_func.go | 10 +-- .../local_storage_class_watcher_test.go | 30 +++---- .../sds-local-volume-csi/driver/controller.go | 2 +- images/sds-local-volume-csi/go.mod | 2 +- images/sds-local-volume-csi/go.sum | 2 + images/sds-local-volume-csi/pkg/utils/func.go | 30 +++---- .../src/go.mod | 2 +- .../src/go.sum | 2 + .../src/pkg/cache/cache.go | 12 +-- .../src/pkg/cache/cache_test.go | 88 +++++++++---------- .../src/pkg/controller/lvg_watcher_cache.go | 10 +-- .../pkg/controller/lvg_watcher_cache_test.go | 36 ++++---- .../src/pkg/scheduler/filter.go | 20 ++--- images/webhooks/src/go.mod | 2 +- images/webhooks/src/go.sum | 2 + images/webhooks/src/handlers/func.go | 1 - images/webhooks/src/handlers/lscValidator.go | 3 +- images/webhooks/src/main.go | 1 - 22 files changed, 143 insertions(+), 140 deletions(-) diff --git a/images/sds-local-volume-controller/src/go.mod b/images/sds-local-volume-controller/src/go.mod index 0853c644..af9d4230 100644 --- a/images/sds-local-volume-controller/src/go.mod +++ b/images/sds-local-volume-controller/src/go.mod @@ -4,7 +4,7 @@ go 1.22.2 require ( github.com/deckhouse/sds-local-volume/api v0.0.0-20240816081122-3de604d3d889 - github.com/deckhouse/sds-node-configurator/api v0.0.0-20240816031400-b001b5ab8337 + github.com/deckhouse/sds-node-configurator/api v0.0.0-20240919102704-a035b4a92e77 github.com/go-logr/logr v1.4.2 github.com/onsi/ginkgo/v2 v2.20.0 github.com/onsi/gomega v1.34.1 diff --git a/images/sds-local-volume-controller/src/go.sum b/images/sds-local-volume-controller/src/go.sum index 78f5fe4f..d10d4e61 100644 --- a/images/sds-local-volume-controller/src/go.sum +++ b/images/sds-local-volume-controller/src/go.sum @@ -10,6 +10,8 @@ github.com/deckhouse/sds-local-volume/api v0.0.0-20240816081122-3de604d3d889 h1: github.com/deckhouse/sds-local-volume/api v0.0.0-20240816081122-3de604d3d889/go.mod h1:cYxHYJmIl6g9lXb1etqmLeQL/vsPMgscmact/FObd+U= github.com/deckhouse/sds-node-configurator/api v0.0.0-20240816031400-b001b5ab8337 h1:v4HuZxGfTAfqV2Mec/yUXRDO9aVP1+Uhht0+1qZeMsg= github.com/deckhouse/sds-node-configurator/api v0.0.0-20240816031400-b001b5ab8337/go.mod h1:H71+9G0Jr46Qs0BA3z3/xt0h9lbnJnCEYcaCJCWFBf0= +github.com/deckhouse/sds-node-configurator/api v0.0.0-20240919102704-a035b4a92e77 h1:Y3vswUk/rnCpkZzWBk+Mlr9LtMg6EI5LkQ4GvgHCslI= +github.com/deckhouse/sds-node-configurator/api v0.0.0-20240919102704-a035b4a92e77/go.mod h1:H71+9G0Jr46Qs0BA3z3/xt0h9lbnJnCEYcaCJCWFBf0= github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= diff --git a/images/sds-local-volume-controller/src/pkg/controller/local_csi_node_watcher.go b/images/sds-local-volume-controller/src/pkg/controller/local_csi_node_watcher.go index 9e50a08d..52df06cd 100644 --- a/images/sds-local-volume-controller/src/pkg/controller/local_csi_node_watcher.go +++ b/images/sds-local-volume-controller/src/pkg/controller/local_csi_node_watcher.go @@ -253,7 +253,7 @@ func addLabelOnTheLSCIfNotExist(ctx context.Context, cl client.Client, lsc slv.L return true, nil } -func addLabelOnTheLVGIfNotExist(ctx context.Context, cl client.Client, lvg snc.LvmVolumeGroup, label string) (bool, error) { +func addLabelOnTheLVGIfNotExist(ctx context.Context, cl client.Client, lvg snc.LVMVolumeGroup, label string) (bool, error) { if _, exist := lvg.Labels[label]; exist { return false, nil } @@ -303,12 +303,12 @@ func clearManualEvictionLabelsIfNeeded(ctx context.Context, cl client.Client, lo return err } - lvgs := make(map[string]snc.LvmVolumeGroup, len(lvgList.Items)) + lvgs := make(map[string]snc.LVMVolumeGroup, len(lvgList.Items)) for _, lvg := range lvgList.Items { lvgs[lvg.Name] = lvg } - usedLvgs := make(map[string]snc.LvmVolumeGroup, len(lvgList.Items)) + usedLvgs := make(map[string]snc.LVMVolumeGroup, len(lvgList.Items)) for _, lvg := range lvgList.Items { for _, n := range lvg.Status.Nodes { if n.Name == node.Name { @@ -375,13 +375,13 @@ func clearManualEvictionLabelsIfNeeded(ctx context.Context, cl client.Client, lo return nil } -func getManuallyEvictedLVGsAndLSCs(ctx context.Context, cl client.Client, node v1.Node) (map[string]snc.LvmVolumeGroup, map[string]slv.LocalStorageClass, error) { +func getManuallyEvictedLVGsAndLSCs(ctx context.Context, cl client.Client, node v1.Node) (map[string]snc.LVMVolumeGroup, map[string]slv.LocalStorageClass, error) { lvgList, err := getLVMVolumeGroups(ctx, cl) if err != nil { return nil, nil, err } - usedLvgs := make(map[string]snc.LvmVolumeGroup, len(lvgList.Items)) + usedLvgs := make(map[string]snc.LVMVolumeGroup, len(lvgList.Items)) for _, lvg := range lvgList.Items { for _, n := range lvg.Status.Nodes { if n.Name == node.Name { @@ -396,7 +396,7 @@ func getManuallyEvictedLVGsAndLSCs(ctx context.Context, cl client.Client, node v } unhealthyLscs := make(map[string]slv.LocalStorageClass, len(lscList.Items)) - unhealthyLvgs := make(map[string]snc.LvmVolumeGroup, len(usedLvgs)) + unhealthyLvgs := make(map[string]snc.LVMVolumeGroup, len(usedLvgs)) // This case is a base case, when the controller did not label any resource. for _, lsc := range lscList.Items { @@ -418,8 +418,8 @@ func getManuallyEvictedLVGsAndLSCs(ctx context.Context, cl client.Client, node v return unhealthyLvgs, unhealthyLscs, nil } -func getLVMVolumeGroups(ctx context.Context, cl client.Client) (*snc.LvmVolumeGroupList, error) { - lvgList := &snc.LvmVolumeGroupList{} +func getLVMVolumeGroups(ctx context.Context, cl client.Client) (*snc.LVMVolumeGroupList, error) { + lvgList := &snc.LVMVolumeGroupList{} err := cl.List(ctx, lvgList) return lvgList, err diff --git a/images/sds-local-volume-controller/src/pkg/controller/local_csi_node_watcher_test.go b/images/sds-local-volume-controller/src/pkg/controller/local_csi_node_watcher_test.go index d1199137..6968c4f8 100644 --- a/images/sds-local-volume-controller/src/pkg/controller/local_csi_node_watcher_test.go +++ b/images/sds-local-volume-controller/src/pkg/controller/local_csi_node_watcher_test.go @@ -63,12 +63,12 @@ func TestRunLocalCSINodeWatcherController(t *testing.T) { t.Error(err) } - lvgOnNode4 := &snc.LvmVolumeGroup{ + lvgOnNode4 := &snc.LVMVolumeGroup{ ObjectMeta: metav1.ObjectMeta{ Name: "lvgOnNode4", }, - Status: snc.LvmVolumeGroupStatus{ - Nodes: []snc.LvmVolumeGroupNode{ + Status: snc.LVMVolumeGroupStatus{ + Nodes: []snc.LVMVolumeGroupNode{ { Name: "test-node4", }, @@ -190,7 +190,7 @@ func TestRunLocalCSINodeWatcherController(t *testing.T) { _, exist = node4.Labels[localCsiNodeSelectorLabel] assert.True(t, exist) - updateLvg := &snc.LvmVolumeGroup{} + updateLvg := &snc.LVMVolumeGroup{} err = cl.Get(ctx, client.ObjectKey{ Name: "lvgOnNode4", diff --git a/images/sds-local-volume-controller/src/pkg/controller/local_storage_class_watcher_func.go b/images/sds-local-volume-controller/src/pkg/controller/local_storage_class_watcher_func.go index a12d9345..0025c561 100644 --- a/images/sds-local-volume-controller/src/pkg/controller/local_storage_class_watcher_func.go +++ b/images/sds-local-volume-controller/src/pkg/controller/local_storage_class_watcher_func.go @@ -505,7 +505,7 @@ func validateLocalStorageClass( failedMsgBuilder.WriteString(fmt.Sprintf("There already is a storage class with the same name: %s but it is not managed by the LocalStorageClass controller\n", unmanagedScName)) } - lvgList := &snc.LvmVolumeGroupList{} + lvgList := &snc.LVMVolumeGroupList{} err := cl.List(ctx, lvgList) if err != nil { valid = false @@ -569,8 +569,8 @@ func findAnyThinPool(lsc *slv.LocalStorageClass) []string { return badLvgs } -func findNonexistentThinPools(lvgList *snc.LvmVolumeGroupList, lsc *slv.LocalStorageClass) []string { - lvgs := make(map[string]snc.LvmVolumeGroup, len(lvgList.Items)) +func findNonexistentThinPools(lvgList *snc.LVMVolumeGroupList, lsc *slv.LocalStorageClass) []string { + lvgs := make(map[string]snc.LVMVolumeGroup, len(lvgList.Items)) for _, lvg := range lvgList.Items { lvgs[lvg.Name] = lvg } @@ -600,7 +600,7 @@ func findNonexistentThinPools(lvgList *snc.LvmVolumeGroupList, lsc *slv.LocalSto return badLvgs } -func findNonexistentLVGs(lvgList *snc.LvmVolumeGroupList, lsc *slv.LocalStorageClass) []string { +func findNonexistentLVGs(lvgList *snc.LVMVolumeGroupList, lsc *slv.LocalStorageClass) []string { lvgs := make(map[string]struct{}, len(lvgList.Items)) for _, lvg := range lvgList.Items { lvgs[lvg.Name] = struct{}{} @@ -616,7 +616,7 @@ func findNonexistentLVGs(lvgList *snc.LvmVolumeGroupList, lsc *slv.LocalStorageC return nonexistent } -func findLVMVolumeGroupsOnTheSameNode(lvgList *snc.LvmVolumeGroupList, lsc *slv.LocalStorageClass) []string { +func findLVMVolumeGroupsOnTheSameNode(lvgList *snc.LVMVolumeGroupList, lsc *slv.LocalStorageClass) []string { nodesWithLVGs := make(map[string][]string, len(lsc.Spec.LVM.LVMVolumeGroups)) usedLVGs := make(map[string]struct{}, len(lsc.Spec.LVM.LVMVolumeGroups)) for _, lvg := range lsc.Spec.LVM.LVMVolumeGroups { diff --git a/images/sds-local-volume-controller/src/pkg/controller/local_storage_class_watcher_test.go b/images/sds-local-volume-controller/src/pkg/controller/local_storage_class_watcher_test.go index 5cf2d508..b9276099 100644 --- a/images/sds-local-volume-controller/src/pkg/controller/local_storage_class_watcher_test.go +++ b/images/sds-local-volume-controller/src/pkg/controller/local_storage_class_watcher_test.go @@ -37,7 +37,6 @@ import ( var _ = Describe(controller.LocalStorageClassCtrlName, func() { const ( - controllerNamespace = "test-namespace" nameForLocalStorageClass = "sds-local-volume-storage-class" existingThickLVG1Name = "test-thick-vg1" @@ -63,13 +62,13 @@ var _ = Describe(controller.LocalStorageClassCtrlName, func() { volumeBindingModeWFFC = string(v1.VolumeBindingWaitForFirstConsumer) volumeBindingModeIM = string(v1.VolumeBindingImmediate) - existingThickLVG1Template = generateLVMVolumeGroup(existingThickLVG1Name, []string{"dev-1111", "dev-2222"}, []string{}) - existingThickLVG2Template = generateLVMVolumeGroup(existingThickLVG2Name, []string{"dev-3333", "dev-4444"}, []string{}) - newThickLVGTemplate = generateLVMVolumeGroup(newThickLVGName, []string{"dev-5555", "dev-6666"}, []string{}) + existingThickLVG1Template = generateLVMVolumeGroup(existingThickLVG1Name, []string{}) + existingThickLVG2Template = generateLVMVolumeGroup(existingThickLVG2Name, []string{}) + newThickLVGTemplate = generateLVMVolumeGroup(newThickLVGName, []string{}) - existingThinLVG1Template = generateLVMVolumeGroup(existingThinLVG1Name, []string{"dev-7777", "dev-8888"}, []string{"thin-pool-1", "thin-pool-2"}) - existingThinLVG2Template = generateLVMVolumeGroup(existingThinLVG2Name, []string{"dev-9999", "dev-1010"}, []string{"thin-pool-1", "thin-pool-2"}) - newThinLVGTemplate = generateLVMVolumeGroup(newThinLVGName, []string{"dev-1111", "dev-1212"}, []string{"thin-pool-1", "thin-pool-2"}) + existingThinLVG1Template = generateLVMVolumeGroup(existingThinLVG1Name, []string{"thin-pool-1", "thin-pool-2"}) + existingThinLVG2Template = generateLVMVolumeGroup(existingThinLVG2Name, []string{"thin-pool-1", "thin-pool-2"}) + newThinLVGTemplate = generateLVMVolumeGroup(newThinLVGName, []string{"thin-pool-1", "thin-pool-2"}) ) It("Create_local_sc_with_existing_lvgs", func() { @@ -742,38 +741,37 @@ var _ = Describe(controller.LocalStorageClassCtrlName, func() { }) -func generateLVMVolumeGroup(name string, devices, thinPoolNames []string) *snc.LvmVolumeGroup { +func generateLVMVolumeGroup(name string, thinPoolNames []string) *snc.LVMVolumeGroup { lvmType := controller.LVMThickType if len(thinPoolNames) > 0 { lvmType = controller.LVMThinType } - thinPoolsSpec := make([]snc.LvmVolumeGroupThinPoolSpec, 0) - thinPoolsStatus := make([]snc.LvmVolumeGroupThinPoolStatus, 0) + thinPoolsSpec := make([]snc.LVMVolumeGroupThinPoolSpec, 0) + thinPoolsStatus := make([]snc.LVMVolumeGroupThinPoolStatus, 0) for i := 0; i < len(thinPoolNames); i++ { - thinPoolsSpec = append(thinPoolsSpec, snc.LvmVolumeGroupThinPoolSpec{ + thinPoolsSpec = append(thinPoolsSpec, snc.LVMVolumeGroupThinPoolSpec{ Name: thinPoolNames[i], Size: "10Gi", }) - thinPoolsStatus = append(thinPoolsStatus, snc.LvmVolumeGroupThinPoolStatus{ + thinPoolsStatus = append(thinPoolsStatus, snc.LVMVolumeGroupThinPoolStatus{ Name: thinPoolNames[i], ActualSize: resource.MustParse("10Gi"), UsedSize: resource.MustParse("0Gi"), }) } - return &snc.LvmVolumeGroup{ + return &snc.LVMVolumeGroup{ ObjectMeta: metav1.ObjectMeta{ Name: name, }, - Spec: snc.LvmVolumeGroupSpec{ + Spec: snc.LVMVolumeGroupSpec{ ActualVGNameOnTheNode: "vg1", - BlockDeviceNames: devices, ThinPools: thinPoolsSpec, Type: lvmType, }, - Status: snc.LvmVolumeGroupStatus{ + Status: snc.LVMVolumeGroupStatus{ ThinPools: thinPoolsStatus, }, } diff --git a/images/sds-local-volume-csi/driver/controller.go b/images/sds-local-volume-csi/driver/controller.go index 12520d00..68dbdd63 100644 --- a/images/sds-local-volume-csi/driver/controller.go +++ b/images/sds-local-volume-csi/driver/controller.go @@ -321,7 +321,7 @@ func (d *Driver) ControllerExpandVolume(ctx context.Context, request *csi.Contro }, nil } - lvg, err := utils.GetLVMVolumeGroup(ctx, d.cl, llv.Spec.LvmVolumeGroupName, llv.Namespace) + lvg, err := utils.GetLVMVolumeGroup(ctx, d.cl, llv.Spec.LVMVolumeGroupName, llv.Namespace) if err != nil { d.log.Error(err, fmt.Sprintf("[ControllerExpandVolume][traceID:%s][volumeID:%s] error getting LVMVolumeGroup", traceID, volumeID)) return nil, status.Errorf(codes.Internal, "error getting LVMVolumeGroup: %v", err) diff --git a/images/sds-local-volume-csi/go.mod b/images/sds-local-volume-csi/go.mod index e9e1ea2a..91b2d9ea 100644 --- a/images/sds-local-volume-csi/go.mod +++ b/images/sds-local-volume-csi/go.mod @@ -5,7 +5,7 @@ go 1.22.3 require ( github.com/container-storage-interface/spec v1.10.0 github.com/deckhouse/sds-local-volume/api v0.0.0-20240903071950-ed9d3bba999b - github.com/deckhouse/sds-node-configurator/api v0.0.0-20240902155711-f525d7d805fa + github.com/deckhouse/sds-node-configurator/api v0.0.0-20240919102704-a035b4a92e77 github.com/go-logr/logr v1.4.2 github.com/golang/protobuf v1.5.4 github.com/google/uuid v1.6.0 diff --git a/images/sds-local-volume-csi/go.sum b/images/sds-local-volume-csi/go.sum index ccf8a73a..001ab376 100644 --- a/images/sds-local-volume-csi/go.sum +++ b/images/sds-local-volume-csi/go.sum @@ -10,6 +10,8 @@ github.com/deckhouse/sds-local-volume/api v0.0.0-20240903071950-ed9d3bba999b h1: github.com/deckhouse/sds-local-volume/api v0.0.0-20240903071950-ed9d3bba999b/go.mod h1:cYxHYJmIl6g9lXb1etqmLeQL/vsPMgscmact/FObd+U= github.com/deckhouse/sds-node-configurator/api v0.0.0-20240902155711-f525d7d805fa h1:VaqIf8flnAgl+MY59EYq5zUwRwaSfoLvxTZ4NfXxtgE= github.com/deckhouse/sds-node-configurator/api v0.0.0-20240902155711-f525d7d805fa/go.mod h1:H71+9G0Jr46Qs0BA3z3/xt0h9lbnJnCEYcaCJCWFBf0= +github.com/deckhouse/sds-node-configurator/api v0.0.0-20240919102704-a035b4a92e77 h1:Y3vswUk/rnCpkZzWBk+Mlr9LtMg6EI5LkQ4GvgHCslI= +github.com/deckhouse/sds-node-configurator/api v0.0.0-20240919102704-a035b4a92e77/go.mod h1:H71+9G0Jr46Qs0BA3z3/xt0h9lbnJnCEYcaCJCWFBf0= github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= diff --git a/images/sds-local-volume-csi/pkg/utils/func.go b/images/sds-local-volume-csi/pkg/utils/func.go index 121b6c36..eece3acd 100644 --- a/images/sds-local-volume-csi/pkg/utils/func.go +++ b/images/sds-local-volume-csi/pkg/utils/func.go @@ -154,7 +154,7 @@ func AreSizesEqualWithinDelta(leftSize, rightSize, allowedDelta resource.Quantit return math.Abs(leftSizeFloat-rightSizeFloat) < float64(allowedDelta.Value()) } -func GetNodeWithMaxFreeSpace(lvgs []snc.LvmVolumeGroup, storageClassLVGParametersMap map[string]string, lvmType string) (nodeName string, freeSpace resource.Quantity, err error) { +func GetNodeWithMaxFreeSpace(lvgs []snc.LVMVolumeGroup, storageClassLVGParametersMap map[string]string, lvmType string) (nodeName string, freeSpace resource.Quantity, err error) { var maxFreeSpace int64 for _, lvg := range lvgs { switch lvmType { @@ -182,9 +182,9 @@ func GetNodeWithMaxFreeSpace(lvgs []snc.LvmVolumeGroup, storageClassLVGParameter // TODO: delete the method below? func GetLVMVolumeGroupParams(ctx context.Context, kc client.Client, log logger.Logger, lvmVG map[string]string, nodeName, lvmType string) (lvgName, vgName string, err error) { - listLvgs := &snc.LvmVolumeGroupList{ + listLvgs := &snc.LVMVolumeGroupList{ ListMeta: metav1.ListMeta{}, - Items: []snc.LvmVolumeGroup{}, + Items: []snc.LVMVolumeGroup{}, } err = kc.List(ctx, listLvgs) @@ -218,8 +218,8 @@ func GetLVMVolumeGroupParams(ctx context.Context, kc client.Client, log logger.L return "", "", errors.New("there are no matches") } -func GetLVMVolumeGroup(ctx context.Context, kc client.Client, lvgName, namespace string) (*snc.LvmVolumeGroup, error) { - var lvg snc.LvmVolumeGroup +func GetLVMVolumeGroup(ctx context.Context, kc client.Client, lvgName, namespace string) (*snc.LVMVolumeGroup, error) { + var lvg snc.LVMVolumeGroup err := kc.Get(ctx, client.ObjectKey{ Name: lvgName, @@ -229,14 +229,14 @@ func GetLVMVolumeGroup(ctx context.Context, kc client.Client, lvgName, namespace return &lvg, err } -func GetLVMVolumeGroupFreeSpace(lvg snc.LvmVolumeGroup) (vgFreeSpace resource.Quantity) { +func GetLVMVolumeGroupFreeSpace(lvg snc.LVMVolumeGroup) (vgFreeSpace resource.Quantity) { vgFreeSpace = lvg.Status.VGSize vgFreeSpace.Sub(lvg.Status.AllocatedSize) return vgFreeSpace } -func GetLVMThinPoolFreeSpace(lvg snc.LvmVolumeGroup, thinPoolName string) (thinPoolFreeSpace resource.Quantity, err error) { - var storagePoolThinPool *snc.LvmVolumeGroupThinPoolStatus +func GetLVMThinPoolFreeSpace(lvg snc.LVMVolumeGroup, thinPoolName string) (thinPoolFreeSpace resource.Quantity, err error) { + var storagePoolThinPool *snc.LVMVolumeGroupThinPoolStatus for _, thinPool := range lvg.Status.ThinPools { if thinPool.Name == thinPoolName { storagePoolThinPool = &thinPool @@ -256,7 +256,7 @@ func ExpandLVMLogicalVolume(ctx context.Context, kc client.Client, llv *snc.LVML return kc.Update(ctx, llv) } -func GetStorageClassLVGsAndParameters(ctx context.Context, kc client.Client, log *logger.Logger, storageClassLVGParametersString string) (storageClassLVGs []snc.LvmVolumeGroup, storageClassLVGParametersMap map[string]string, err error) { +func GetStorageClassLVGsAndParameters(ctx context.Context, kc client.Client, log *logger.Logger, storageClassLVGParametersString string) (storageClassLVGs []snc.LVMVolumeGroup, storageClassLVGParametersMap map[string]string, err error) { var storageClassLVGParametersList LVMVolumeGroups err = yaml.Unmarshal([]byte(storageClassLVGParametersString), &storageClassLVGParametersList) if err != nil { @@ -291,17 +291,17 @@ func GetStorageClassLVGsAndParameters(ctx context.Context, kc client.Client, log return storageClassLVGs, storageClassLVGParametersMap, nil } -func GetLVGList(ctx context.Context, kc client.Client) (*snc.LvmVolumeGroupList, error) { - listLvgs := &snc.LvmVolumeGroupList{} +func GetLVGList(ctx context.Context, kc client.Client) (*snc.LVMVolumeGroupList, error) { + listLvgs := &snc.LVMVolumeGroupList{} return listLvgs, kc.List(ctx, listLvgs) } -func GetLLVSpec(log *logger.Logger, lvName string, selectedLVG snc.LvmVolumeGroup, storageClassLVGParametersMap map[string]string, lvmType string, llvSize resource.Quantity, contiguous bool) snc.LVMLogicalVolumeSpec { +func GetLLVSpec(log *logger.Logger, lvName string, selectedLVG snc.LVMVolumeGroup, storageClassLVGParametersMap map[string]string, lvmType string, llvSize resource.Quantity, contiguous bool) snc.LVMLogicalVolumeSpec { lvmLogicalVolumeSpec := snc.LVMLogicalVolumeSpec{ ActualLVNameOnTheNode: lvName, Type: lvmType, Size: llvSize.String(), - LvmVolumeGroupName: selectedLVG.Name, + LVMVolumeGroupName: selectedLVG.Name, } switch lvmType { @@ -323,13 +323,13 @@ func GetLLVSpec(log *logger.Logger, lvName string, selectedLVG snc.LvmVolumeGrou return lvmLogicalVolumeSpec } -func SelectLVG(storageClassLVGs []snc.LvmVolumeGroup, nodeName string) (snc.LvmVolumeGroup, error) { +func SelectLVG(storageClassLVGs []snc.LVMVolumeGroup, nodeName string) (snc.LVMVolumeGroup, error) { for _, lvg := range storageClassLVGs { if lvg.Status.Nodes[0].Name == nodeName { return lvg, nil } } - return snc.LvmVolumeGroup{}, fmt.Errorf("[SelectLVG] no LVMVolumeGroup found for node %s", nodeName) + return snc.LVMVolumeGroup{}, fmt.Errorf("[SelectLVG] no LVMVolumeGroup found for node %s", nodeName) } func removeLLVFinalizerIfExist(ctx context.Context, kc client.Client, log *logger.Logger, llv *snc.LVMLogicalVolume, finalizer string) (bool, error) { diff --git a/images/sds-local-volume-scheduler-extender/src/go.mod b/images/sds-local-volume-scheduler-extender/src/go.mod index 8837b7e1..2b9a89c9 100644 --- a/images/sds-local-volume-scheduler-extender/src/go.mod +++ b/images/sds-local-volume-scheduler-extender/src/go.mod @@ -4,7 +4,7 @@ go 1.22.2 require ( github.com/deckhouse/sds-local-volume/api v0.0.0-20240903071950-ed9d3bba999b - github.com/deckhouse/sds-node-configurator/api v0.0.0-20240902155711-f525d7d805fa + github.com/deckhouse/sds-node-configurator/api v0.0.0-20240919102704-a035b4a92e77 github.com/go-logr/logr v1.4.2 github.com/go-logr/zapr v1.3.0 github.com/spf13/cobra v1.8.1 diff --git a/images/sds-local-volume-scheduler-extender/src/go.sum b/images/sds-local-volume-scheduler-extender/src/go.sum index 9934acdd..4ee9c2da 100644 --- a/images/sds-local-volume-scheduler-extender/src/go.sum +++ b/images/sds-local-volume-scheduler-extender/src/go.sum @@ -11,6 +11,8 @@ github.com/deckhouse/sds-local-volume/api v0.0.0-20240903071950-ed9d3bba999b h1: github.com/deckhouse/sds-local-volume/api v0.0.0-20240903071950-ed9d3bba999b/go.mod h1:cYxHYJmIl6g9lXb1etqmLeQL/vsPMgscmact/FObd+U= github.com/deckhouse/sds-node-configurator/api v0.0.0-20240902155711-f525d7d805fa h1:VaqIf8flnAgl+MY59EYq5zUwRwaSfoLvxTZ4NfXxtgE= github.com/deckhouse/sds-node-configurator/api v0.0.0-20240902155711-f525d7d805fa/go.mod h1:H71+9G0Jr46Qs0BA3z3/xt0h9lbnJnCEYcaCJCWFBf0= +github.com/deckhouse/sds-node-configurator/api v0.0.0-20240919102704-a035b4a92e77 h1:Y3vswUk/rnCpkZzWBk+Mlr9LtMg6EI5LkQ4GvgHCslI= +github.com/deckhouse/sds-node-configurator/api v0.0.0-20240919102704-a035b4a92e77/go.mod h1:H71+9G0Jr46Qs0BA3z3/xt0h9lbnJnCEYcaCJCWFBf0= github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k= diff --git a/images/sds-local-volume-scheduler-extender/src/pkg/cache/cache.go b/images/sds-local-volume-scheduler-extender/src/pkg/cache/cache.go index 6a635e21..16954fdb 100644 --- a/images/sds-local-volume-scheduler-extender/src/pkg/cache/cache.go +++ b/images/sds-local-volume-scheduler-extender/src/pkg/cache/cache.go @@ -28,7 +28,7 @@ type Cache struct { } type lvgCache struct { - lvg *snc.LvmVolumeGroup + lvg *snc.LVMVolumeGroup thickPVCs sync.Map // map[string]*pvcCache thinPools sync.Map // map[string]*thinPoolCache } @@ -50,7 +50,7 @@ func NewCache(logger logger.Logger) *Cache { } // AddLVG adds selected LVMVolumeGroup resource to the cache. If it is already stored, does nothing. -func (c *Cache) AddLVG(lvg *snc.LvmVolumeGroup) { +func (c *Cache) AddLVG(lvg *snc.LVMVolumeGroup) { _, loaded := c.lvgs.LoadOrStore(lvg.Name, &lvgCache{ lvg: lvg, thickPVCs: sync.Map{}, @@ -75,7 +75,7 @@ func (c *Cache) AddLVG(lvg *snc.LvmVolumeGroup) { } // UpdateLVG updated selected LVMVolumeGroup resource in the cache. If such LVMVolumeGroup is not stored, returns an error. -func (c *Cache) UpdateLVG(lvg *snc.LvmVolumeGroup) error { +func (c *Cache) UpdateLVG(lvg *snc.LVMVolumeGroup) error { if lvgCh, found := c.lvgs.Load(lvg.Name); found { lvgCh.(*lvgCache).lvg = lvg @@ -102,7 +102,7 @@ func (c *Cache) UpdateLVG(lvg *snc.LvmVolumeGroup) error { } // TryGetLVG returns selected LVMVolumeGroup resource if it is stored in the cache, otherwise returns nil. -func (c *Cache) TryGetLVG(name string) *snc.LvmVolumeGroup { +func (c *Cache) TryGetLVG(name string) *snc.LVMVolumeGroup { lvgCh, found := c.lvgs.Load(name) if !found { c.log.Debug(fmt.Sprintf("[TryGetLVG] the LVMVolumeGroup %s was not found in the cache. Return nil", name)) @@ -124,8 +124,8 @@ func (c *Cache) GetLVGNamesByNodeName(nodeName string) []string { } // GetAllLVG returns all the LVMVolumeGroups resources stored in the cache. -func (c *Cache) GetAllLVG() map[string]*snc.LvmVolumeGroup { - lvgs := make(map[string]*snc.LvmVolumeGroup) +func (c *Cache) GetAllLVG() map[string]*snc.LVMVolumeGroup { + lvgs := make(map[string]*snc.LVMVolumeGroup) c.lvgs.Range(func(lvgName, lvgCh any) bool { if lvgCh.(*lvgCache).lvg == nil { c.log.Error(fmt.Errorf("LVMVolumeGroup %s is not initialized", lvgName), "[GetAllLVG] an error occurs while iterating the LVMVolumeGroups") diff --git a/images/sds-local-volume-scheduler-extender/src/pkg/cache/cache_test.go b/images/sds-local-volume-scheduler-extender/src/pkg/cache/cache_test.go index 9a472ccb..e3586bc4 100644 --- a/images/sds-local-volume-scheduler-extender/src/pkg/cache/cache_test.go +++ b/images/sds-local-volume-scheduler-extender/src/pkg/cache/cache_test.go @@ -15,7 +15,7 @@ import ( func BenchmarkCache_DeleteLVG(b *testing.B) { cache := NewCache(logger.Logger{}) - lvg := &snc.LvmVolumeGroup{ + lvg := &snc.LVMVolumeGroup{ ObjectMeta: metav1.ObjectMeta{ Name: "first", }, @@ -34,7 +34,7 @@ func BenchmarkCache_DeleteLVG(b *testing.B) { func BenchmarkCache_GetLVGReservedSpace(b *testing.B) { cache := NewCache(logger.Logger{}) - lvg := &snc.LvmVolumeGroup{ + lvg := &snc.LVMVolumeGroup{ ObjectMeta: metav1.ObjectMeta{ Name: "first", }, @@ -101,32 +101,32 @@ func BenchmarkCache_GetLVGReservedSpace(b *testing.B) { func BenchmarkCache_AddPVC(b *testing.B) { cache := NewCache(logger.Logger{}) - lvg1 := &snc.LvmVolumeGroup{ + lvg1 := &snc.LVMVolumeGroup{ ObjectMeta: metav1.ObjectMeta{ Name: "first", }, - Status: snc.LvmVolumeGroupStatus{ - Nodes: []snc.LvmVolumeGroupNode{ + Status: snc.LVMVolumeGroupStatus{ + Nodes: []snc.LVMVolumeGroupNode{ {Name: "test-node1"}, }, }, } - lvg2 := &snc.LvmVolumeGroup{ + lvg2 := &snc.LVMVolumeGroup{ ObjectMeta: metav1.ObjectMeta{ Name: "second", }, - Status: snc.LvmVolumeGroupStatus{ - Nodes: []snc.LvmVolumeGroupNode{ + Status: snc.LVMVolumeGroupStatus{ + Nodes: []snc.LVMVolumeGroupNode{ {Name: "test-node2"}, }, }, } - lvg3 := &snc.LvmVolumeGroup{ + lvg3 := &snc.LVMVolumeGroup{ ObjectMeta: metav1.ObjectMeta{ Name: "third", }, - Status: snc.LvmVolumeGroupStatus{ - Nodes: []snc.LvmVolumeGroupNode{ + Status: snc.LVMVolumeGroupStatus{ + Nodes: []snc.LVMVolumeGroupNode{ {Name: "test-node3"}, }, }, @@ -170,14 +170,14 @@ func BenchmarkCache_GetAllLVG(b *testing.B) { cache := NewCache(logger.Logger{}) lvgs := map[string]*lvgCache{ "first": { - lvg: &snc.LvmVolumeGroup{ + lvg: &snc.LVMVolumeGroup{ ObjectMeta: metav1.ObjectMeta{ Name: "first", }, }, }, "second": { - lvg: &snc.LvmVolumeGroup{ + lvg: &snc.LVMVolumeGroup{ ObjectMeta: metav1.ObjectMeta{ Name: "second", }, @@ -225,7 +225,7 @@ func BenchmarkCache_TryGetLVG(b *testing.B) { cache := NewCache(logger.Logger{}) name := "test-name" - lvg := &snc.LvmVolumeGroup{ + lvg := &snc.LVMVolumeGroup{ ObjectMeta: metav1.ObjectMeta{ Name: name, }, @@ -249,12 +249,12 @@ func BenchmarkCache_AddLVG(b *testing.B) { b.RunParallel(func(pb *testing.PB) { for pb.Next() { i++ - lvg1 := &snc.LvmVolumeGroup{ + lvg1 := &snc.LVMVolumeGroup{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("test-lvg-%d", i), }, - Status: snc.LvmVolumeGroupStatus{ - Nodes: []snc.LvmVolumeGroupNode{ + Status: snc.LVMVolumeGroupStatus{ + Nodes: []snc.LVMVolumeGroupNode{ { Name: "test-1", }, @@ -262,12 +262,12 @@ func BenchmarkCache_AddLVG(b *testing.B) { }, } - lvg2 := &snc.LvmVolumeGroup{ + lvg2 := &snc.LVMVolumeGroup{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("test-lvg-%d", i+1), }, - Status: snc.LvmVolumeGroupStatus{ - Nodes: []snc.LvmVolumeGroupNode{ + Status: snc.LVMVolumeGroupStatus{ + Nodes: []snc.LVMVolumeGroupNode{ { Name: "test-1", }, @@ -275,12 +275,12 @@ func BenchmarkCache_AddLVG(b *testing.B) { }, } - lvg3 := &snc.LvmVolumeGroup{ + lvg3 := &snc.LVMVolumeGroup{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("test-lvg-%d", i+2), }, - Status: snc.LvmVolumeGroupStatus{ - Nodes: []snc.LvmVolumeGroupNode{ + Status: snc.LVMVolumeGroupStatus{ + Nodes: []snc.LVMVolumeGroupNode{ { Name: "test-1", }, @@ -301,21 +301,21 @@ func BenchmarkCache_AddLVG(b *testing.B) { func TestCache_UpdateLVG(t *testing.T) { cache := NewCache(logger.Logger{}) name := "test-lvg" - lvg := &snc.LvmVolumeGroup{ + lvg := &snc.LVMVolumeGroup{ ObjectMeta: metav1.ObjectMeta{ Name: name, }, - Status: snc.LvmVolumeGroupStatus{ + Status: snc.LVMVolumeGroupStatus{ AllocatedSize: resource.MustParse("1Gi"), }, } cache.AddLVG(lvg) - newLVG := &snc.LvmVolumeGroup{ + newLVG := &snc.LVMVolumeGroup{ ObjectMeta: metav1.ObjectMeta{ Name: name, }, - Status: snc.LvmVolumeGroupStatus{ + Status: snc.LVMVolumeGroupStatus{ AllocatedSize: resource.MustParse("2Gi"), }, } @@ -334,7 +334,7 @@ func BenchmarkCache_UpdateLVG(b *testing.B) { name := "test-name" i := 0 - lvg := &snc.LvmVolumeGroup{ + lvg := &snc.LVMVolumeGroup{ ObjectMeta: metav1.ObjectMeta{ Name: name, }, @@ -349,11 +349,11 @@ func BenchmarkCache_UpdateLVG(b *testing.B) { b.RunParallel(func(pb *testing.PB) { for pb.Next() { i++ - updated := &snc.LvmVolumeGroup{ + updated := &snc.LVMVolumeGroup{ ObjectMeta: metav1.ObjectMeta{ Name: name, }, - Status: snc.LvmVolumeGroupStatus{ + Status: snc.LVMVolumeGroupStatus{ AllocatedSize: resource.MustParse(fmt.Sprintf("2%dGi", i)), }, } @@ -369,12 +369,12 @@ func BenchmarkCache_UpdateLVG(b *testing.B) { func BenchmarkCache_UpdatePVC(b *testing.B) { cache := NewCache(logger.Logger{}) i := 0 - lvg := &snc.LvmVolumeGroup{ + lvg := &snc.LVMVolumeGroup{ ObjectMeta: metav1.ObjectMeta{ Name: "test-lvg", }, - Status: snc.LvmVolumeGroupStatus{ - Nodes: []snc.LvmVolumeGroupNode{ + Status: snc.LVMVolumeGroupStatus{ + Nodes: []snc.LVMVolumeGroupNode{ { Name: "test-node", }, @@ -426,13 +426,13 @@ func BenchmarkCache_FullLoad(b *testing.B) { for pb.Next() { i++ - lvgs := []*snc.LvmVolumeGroup{ + lvgs := []*snc.LVMVolumeGroup{ { ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("test-lvg-%d", i), }, - Status: snc.LvmVolumeGroupStatus{ - Nodes: []snc.LvmVolumeGroupNode{ + Status: snc.LVMVolumeGroupStatus{ + Nodes: []snc.LVMVolumeGroupNode{ { Name: nodeName, }, @@ -444,8 +444,8 @@ func BenchmarkCache_FullLoad(b *testing.B) { ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("test-lvg-%d", i+1), }, - Status: snc.LvmVolumeGroupStatus{ - Nodes: []snc.LvmVolumeGroupNode{ + Status: snc.LVMVolumeGroupStatus{ + Nodes: []snc.LVMVolumeGroupNode{ { Name: nodeName, }, @@ -457,8 +457,8 @@ func BenchmarkCache_FullLoad(b *testing.B) { ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("test-lvg-%d", i+2), }, - Status: snc.LvmVolumeGroupStatus{ - Nodes: []snc.LvmVolumeGroupNode{ + Status: snc.LVMVolumeGroupStatus{ + Nodes: []snc.LVMVolumeGroupNode{ { Name: nodeName, }, @@ -501,12 +501,12 @@ func BenchmarkCache_FullLoad(b *testing.B) { } } - updatedLvgs := []*snc.LvmVolumeGroup{ + updatedLvgs := []*snc.LVMVolumeGroup{ { ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("test-lvg-%d", i), }, - Status: snc.LvmVolumeGroupStatus{ + Status: snc.LVMVolumeGroupStatus{ AllocatedSize: resource.MustParse(fmt.Sprintf("1%dGi", i+1)), }, }, @@ -514,7 +514,7 @@ func BenchmarkCache_FullLoad(b *testing.B) { ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("test-lvg-%d", i+1), }, - Status: snc.LvmVolumeGroupStatus{ + Status: snc.LVMVolumeGroupStatus{ AllocatedSize: resource.MustParse(fmt.Sprintf("1%dGi", i+1)), }, }, @@ -522,7 +522,7 @@ func BenchmarkCache_FullLoad(b *testing.B) { ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("test-lvg-%d", i+2), }, - Status: snc.LvmVolumeGroupStatus{ + Status: snc.LVMVolumeGroupStatus{ AllocatedSize: resource.MustParse(fmt.Sprintf("1%dGi", i+1)), }, }, diff --git a/images/sds-local-volume-scheduler-extender/src/pkg/controller/lvg_watcher_cache.go b/images/sds-local-volume-scheduler-extender/src/pkg/controller/lvg_watcher_cache.go index bd3c11c8..7c6e4ea3 100644 --- a/images/sds-local-volume-scheduler-extender/src/pkg/controller/lvg_watcher_cache.go +++ b/images/sds-local-volume-scheduler-extender/src/pkg/controller/lvg_watcher_cache.go @@ -40,8 +40,8 @@ func RunLVGWatcherCacheController( return nil, err } - err = c.Watch(source.Kind(mgr.GetCache(), &snc.LvmVolumeGroup{}, handler.TypedFuncs[*snc.LvmVolumeGroup, reconcile.Request]{ - CreateFunc: func(_ context.Context, e event.TypedCreateEvent[*snc.LvmVolumeGroup], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) { + err = c.Watch(source.Kind(mgr.GetCache(), &snc.LVMVolumeGroup{}, handler.TypedFuncs[*snc.LVMVolumeGroup, reconcile.Request]{ + CreateFunc: func(_ context.Context, e event.TypedCreateEvent[*snc.LVMVolumeGroup], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) { log.Info(fmt.Sprintf("[RunLVGWatcherCacheController] CreateFunc starts the cache reconciliation for the LVMVolumeGroup %s", e.Object.GetName())) lvg := e.Object @@ -86,7 +86,7 @@ func RunLVGWatcherCacheController( log.Info(fmt.Sprintf("[RunLVGWatcherCacheController] cache for the LVMVolumeGroup %s was reconciled by CreateFunc", lvg.Name)) }, - UpdateFunc: func(_ context.Context, e event.TypedUpdateEvent[*snc.LvmVolumeGroup], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) { + UpdateFunc: func(_ context.Context, e event.TypedUpdateEvent[*snc.LVMVolumeGroup], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) { log.Info(fmt.Sprintf("[RunCacheWatcherController] UpdateFunc starts the cache reconciliation for the LVMVolumeGroup %s", e.ObjectNew.GetName())) oldLvg := e.ObjectOld newLvg := e.ObjectNew @@ -126,7 +126,7 @@ func RunLVGWatcherCacheController( log.Debug(fmt.Sprintf("[RunLVGWatcherCacheController] Update Func ends reconciliation the LVMVolumeGroup %s cache", newLvg.Name)) }, - DeleteFunc: func(_ context.Context, e event.TypedDeleteEvent[*snc.LvmVolumeGroup], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) { + DeleteFunc: func(_ context.Context, e event.TypedDeleteEvent[*snc.LVMVolumeGroup], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) { log.Info(fmt.Sprintf("[RunCacheWatcherController] DeleteFunc starts the cache reconciliation for the LVMVolumeGroup %s", e.Object.GetName())) lvg := e.Object cache.DeleteLVG(lvg.Name) @@ -143,7 +143,7 @@ func RunLVGWatcherCacheController( return c, nil } -func shouldReconcileLVG(oldLVG, newLVG *snc.LvmVolumeGroup) bool { +func shouldReconcileLVG(oldLVG, newLVG *snc.LVMVolumeGroup) bool { if newLVG.DeletionTimestamp != nil { return false } diff --git a/images/sds-local-volume-scheduler-extender/src/pkg/controller/lvg_watcher_cache_test.go b/images/sds-local-volume-scheduler-extender/src/pkg/controller/lvg_watcher_cache_test.go index e5c75517..23bd9e3d 100644 --- a/images/sds-local-volume-scheduler-extender/src/pkg/controller/lvg_watcher_cache_test.go +++ b/images/sds-local-volume-scheduler-extender/src/pkg/controller/lvg_watcher_cache_test.go @@ -12,34 +12,34 @@ import ( func TestLVGWatcherCache(t *testing.T) { t.Run("shouldReconcileLVG", func(t *testing.T) { t.Run("deletion_timestamp_not_nil_returns_false", func(t *testing.T) { - lvg := &snc.LvmVolumeGroup{} + lvg := &snc.LVMVolumeGroup{} lvg.DeletionTimestamp = &v1.Time{} - assert.False(t, shouldReconcileLVG(&snc.LvmVolumeGroup{}, lvg)) + assert.False(t, shouldReconcileLVG(&snc.LVMVolumeGroup{}, lvg)) }) t.Run("allocated_size_and_status_thin_pools_equal_returns_false", func(t *testing.T) { size := resource.MustParse("1G") - thinPools := []snc.LvmVolumeGroupThinPoolStatus{ + thinPools := []snc.LVMVolumeGroupThinPoolStatus{ { Name: "thin", ActualSize: resource.MustParse("1G"), }, } - oldLvg := &snc.LvmVolumeGroup{ + oldLvg := &snc.LVMVolumeGroup{ ObjectMeta: v1.ObjectMeta{ Name: "first", }, - Status: snc.LvmVolumeGroupStatus{ + Status: snc.LVMVolumeGroupStatus{ AllocatedSize: size, ThinPools: thinPools, }, } - newLvg := &snc.LvmVolumeGroup{ + newLvg := &snc.LVMVolumeGroup{ ObjectMeta: v1.ObjectMeta{ Name: "first", }, - Status: snc.LvmVolumeGroupStatus{ + Status: snc.LVMVolumeGroupStatus{ AllocatedSize: size, ThinPools: thinPools, }, @@ -49,26 +49,26 @@ func TestLVGWatcherCache(t *testing.T) { }) t.Run("allocated_size_not_equal_returns_true", func(t *testing.T) { - thinPools := []snc.LvmVolumeGroupThinPoolStatus{ + thinPools := []snc.LVMVolumeGroupThinPoolStatus{ { Name: "thin", ActualSize: resource.MustParse("1G"), }, } - oldLvg := &snc.LvmVolumeGroup{ + oldLvg := &snc.LVMVolumeGroup{ ObjectMeta: v1.ObjectMeta{ Name: "first", }, - Status: snc.LvmVolumeGroupStatus{ + Status: snc.LVMVolumeGroupStatus{ AllocatedSize: resource.MustParse("1G"), ThinPools: thinPools, }, } - newLvg := &snc.LvmVolumeGroup{ + newLvg := &snc.LVMVolumeGroup{ ObjectMeta: v1.ObjectMeta{ Name: "first", }, - Status: snc.LvmVolumeGroupStatus{ + Status: snc.LVMVolumeGroupStatus{ AllocatedSize: resource.MustParse("2G"), ThinPools: thinPools, }, @@ -79,13 +79,13 @@ func TestLVGWatcherCache(t *testing.T) { t.Run("status_thin_pools_not_equal_returns_false", func(t *testing.T) { size := resource.MustParse("1G") - oldLvg := &snc.LvmVolumeGroup{ + oldLvg := &snc.LVMVolumeGroup{ ObjectMeta: v1.ObjectMeta{ Name: "first", }, - Status: snc.LvmVolumeGroupStatus{ + Status: snc.LVMVolumeGroupStatus{ AllocatedSize: size, - ThinPools: []snc.LvmVolumeGroupThinPoolStatus{ + ThinPools: []snc.LVMVolumeGroupThinPoolStatus{ { Name: "thin", ActualSize: resource.MustParse("1G"), @@ -93,13 +93,13 @@ func TestLVGWatcherCache(t *testing.T) { }, }, } - newLvg := &snc.LvmVolumeGroup{ + newLvg := &snc.LVMVolumeGroup{ ObjectMeta: v1.ObjectMeta{ Name: "first", }, - Status: snc.LvmVolumeGroupStatus{ + Status: snc.LVMVolumeGroupStatus{ AllocatedSize: size, - ThinPools: []snc.LvmVolumeGroupThinPoolStatus{ + ThinPools: []snc.LVMVolumeGroupThinPoolStatus{ { Name: "thin", ActualSize: resource.MustParse("2G"), diff --git a/images/sds-local-volume-scheduler-extender/src/pkg/scheduler/filter.go b/images/sds-local-volume-scheduler-extender/src/pkg/scheduler/filter.go index 5ca4d86f..6722424b 100644 --- a/images/sds-local-volume-scheduler-extender/src/pkg/scheduler/filter.go +++ b/images/sds-local-volume-scheduler-extender/src/pkg/scheduler/filter.go @@ -516,7 +516,7 @@ func filterNodes( return result, nil } -func getLVGThinFreeSpaces(lvgs map[string]*snc.LvmVolumeGroup) map[string]map[string]int64 { +func getLVGThinFreeSpaces(lvgs map[string]*snc.LVMVolumeGroup) map[string]map[string]int64 { result := make(map[string]map[string]int64, len(lvgs)) for _, lvg := range lvgs { @@ -532,7 +532,7 @@ func getLVGThinFreeSpaces(lvgs map[string]*snc.LvmVolumeGroup) map[string]map[st return result } -func getLVGThickFreeSpaces(lvgs map[string]*snc.LvmVolumeGroup) map[string]int64 { +func getLVGThickFreeSpaces(lvgs map[string]*snc.LVMVolumeGroup) map[string]int64 { result := make(map[string]int64, len(lvgs)) for _, lvg := range lvgs { @@ -542,7 +542,7 @@ func getLVGThickFreeSpaces(lvgs map[string]*snc.LvmVolumeGroup) map[string]int64 return result } -func findMatchedThinPool(thinPools []snc.LvmVolumeGroupThinPoolStatus, name string) *snc.LvmVolumeGroupThinPoolStatus { +func findMatchedThinPool(thinPools []snc.LVMVolumeGroupThinPoolStatus, name string) *snc.LVMVolumeGroupThinPoolStatus { for _, tp := range thinPools { if tp.Name == name { return &tp @@ -552,7 +552,7 @@ func findMatchedThinPool(thinPools []snc.LvmVolumeGroupThinPoolStatus, name stri return nil } -func findMatchedLVG(nodeLVGs []*snc.LvmVolumeGroup, scLVGs LVMVolumeGroups) *LVMVolumeGroup { +func findMatchedLVG(nodeLVGs []*snc.LVMVolumeGroup, scLVGs LVMVolumeGroups) *LVMVolumeGroup { nodeLVGNames := make(map[string]struct{}, len(nodeLVGs)) for _, lvg := range nodeLVGs { nodeLVGNames[lvg.Name] = struct{}{} @@ -567,8 +567,8 @@ func findMatchedLVG(nodeLVGs []*snc.LvmVolumeGroup, scLVGs LVMVolumeGroups) *LVM return nil } -func getCommonNodesByStorageClasses(scs map[string]*v1.StorageClass, nodesWithLVGs map[string][]*snc.LvmVolumeGroup) (map[string][]*snc.LvmVolumeGroup, error) { - result := make(map[string][]*snc.LvmVolumeGroup, len(nodesWithLVGs)) +func getCommonNodesByStorageClasses(scs map[string]*v1.StorageClass, nodesWithLVGs map[string][]*snc.LVMVolumeGroup) (map[string][]*snc.LVMVolumeGroup, error) { + result := make(map[string][]*snc.LVMVolumeGroup, len(nodesWithLVGs)) for nodeName, lvgs := range nodesWithLVGs { lvgNames := make(map[string]struct{}, len(lvgs)) @@ -605,8 +605,8 @@ func getCommonNodesByStorageClasses(scs map[string]*v1.StorageClass, nodesWithLV return result, nil } -func RemoveUnusedLVGs(lvgs map[string]*snc.LvmVolumeGroup, scsLVGs map[string]LVMVolumeGroups) map[string]*snc.LvmVolumeGroup { - result := make(map[string]*snc.LvmVolumeGroup, len(lvgs)) +func RemoveUnusedLVGs(lvgs map[string]*snc.LVMVolumeGroup, scsLVGs map[string]LVMVolumeGroups) map[string]*snc.LVMVolumeGroup { + result := make(map[string]*snc.LVMVolumeGroup, len(lvgs)) usedLvgs := make(map[string]struct{}, len(lvgs)) for _, scLvgs := range scsLVGs { @@ -656,8 +656,8 @@ func ExtractLVGsFromSC(sc *v1.StorageClass) (LVMVolumeGroups, error) { return lvmVolumeGroups, nil } -func SortLVGsByNodeName(lvgs map[string]*snc.LvmVolumeGroup) map[string][]*snc.LvmVolumeGroup { - sorted := make(map[string][]*snc.LvmVolumeGroup, len(lvgs)) +func SortLVGsByNodeName(lvgs map[string]*snc.LVMVolumeGroup) map[string][]*snc.LVMVolumeGroup { + sorted := make(map[string][]*snc.LVMVolumeGroup, len(lvgs)) for _, lvg := range lvgs { for _, node := range lvg.Status.Nodes { sorted[node.Name] = append(sorted[node.Name], lvg) diff --git a/images/webhooks/src/go.mod b/images/webhooks/src/go.mod index 6ffbb8ce..5cd4ff22 100644 --- a/images/webhooks/src/go.mod +++ b/images/webhooks/src/go.mod @@ -4,7 +4,7 @@ go 1.22.3 require ( github.com/deckhouse/sds-local-volume/api v0.0.0-20240813100234-cf7ae5802ee1 - github.com/deckhouse/sds-node-configurator/api v0.0.0-20240805103635-969dc811217b + github.com/deckhouse/sds-node-configurator/api v0.0.0-20240919102704-a035b4a92e77 github.com/sirupsen/logrus v1.9.3 github.com/slok/kubewebhook/v2 v2.6.0 k8s.io/api v0.30.3 diff --git a/images/webhooks/src/go.sum b/images/webhooks/src/go.sum index 2003d4fd..ad490e0c 100644 --- a/images/webhooks/src/go.sum +++ b/images/webhooks/src/go.sum @@ -5,6 +5,8 @@ github.com/deckhouse/sds-local-volume/api v0.0.0-20240813100234-cf7ae5802ee1 h1: github.com/deckhouse/sds-local-volume/api v0.0.0-20240813100234-cf7ae5802ee1/go.mod h1:cYxHYJmIl6g9lXb1etqmLeQL/vsPMgscmact/FObd+U= github.com/deckhouse/sds-node-configurator/api v0.0.0-20240805103635-969dc811217b h1:EYmHWTWcWMpyxJGZK05ZxlIFnh9s66DRrxLw/LNb/xw= github.com/deckhouse/sds-node-configurator/api v0.0.0-20240805103635-969dc811217b/go.mod h1:H71+9G0Jr46Qs0BA3z3/xt0h9lbnJnCEYcaCJCWFBf0= +github.com/deckhouse/sds-node-configurator/api v0.0.0-20240919102704-a035b4a92e77 h1:Y3vswUk/rnCpkZzWBk+Mlr9LtMg6EI5LkQ4GvgHCslI= +github.com/deckhouse/sds-node-configurator/api v0.0.0-20240919102704-a035b4a92e77/go.mod h1:H71+9G0Jr46Qs0BA3z3/xt0h9lbnJnCEYcaCJCWFBf0= github.com/emicklei/go-restful/v3 v3.12.0 h1:y2DdzBAURM29NFF94q6RaY4vjIH1rtwDapwQtU84iWk= github.com/emicklei/go-restful/v3 v3.12.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= diff --git a/images/webhooks/src/handlers/func.go b/images/webhooks/src/handlers/func.go index 1bd4fd7e..9f7d5cf7 100644 --- a/images/webhooks/src/handlers/func.go +++ b/images/webhooks/src/handlers/func.go @@ -37,7 +37,6 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "sigs.k8s.io/controller-runtime/pkg/client" - mc "webhooks/api" ) diff --git a/images/webhooks/src/handlers/lscValidator.go b/images/webhooks/src/handlers/lscValidator.go index 434e5759..e703df81 100644 --- a/images/webhooks/src/handlers/lscValidator.go +++ b/images/webhooks/src/handlers/lscValidator.go @@ -30,7 +30,6 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" - mc "webhooks/api" ) @@ -50,7 +49,7 @@ func LSCValidate(ctx context.Context, _ *model.AdmissionReview, obj metav1.Objec klog.Fatal(err) } - listDevice := &snc.LvmVolumeGroupList{} + listDevice := &snc.LVMVolumeGroupList{} err = cl.List(ctx, listDevice) if err != nil { diff --git a/images/webhooks/src/main.go b/images/webhooks/src/main.go index 3697cdf4..c4cd9a9a 100644 --- a/images/webhooks/src/main.go +++ b/images/webhooks/src/main.go @@ -27,7 +27,6 @@ import ( kwhlogrus "github.com/slok/kubewebhook/v2/pkg/log/logrus" corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" - "webhooks/handlers" )