diff --git a/images/sds-local-volume-controller/api/v1alpha1/lvm_volume_group.go b/images/sds-local-volume-controller/api/v1alpha1/lvm_volume_group.go index d931267f..70900f72 100644 --- a/images/sds-local-volume-controller/api/v1alpha1/lvm_volume_group.go +++ b/images/sds-local-volume-controller/api/v1alpha1/lvm_volume_group.go @@ -1,9 +1,12 @@ /* Copyright 2024 Flant JSC + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -33,16 +36,24 @@ type LvmVolumeGroup struct { Status LvmVolumeGroupStatus `json:"status,omitempty"` } -type SpecThinPool struct { - Name string `json:"name"` - Size resource.Quantity `json:"size"` +type LvmVolumeGroupSpec struct { + ActualVGNameOnTheNode string `json:"actualVGNameOnTheNode"` + BlockDeviceNames []string `json:"blockDeviceNames"` + ThinPools []LvmVolumeGroupThinPoolSpec `json:"thinPools"` + Type string `json:"type"` } -type LvmVolumeGroupSpec struct { - ActualVGNameOnTheNode string `json:"actualVGNameOnTheNode"` - BlockDeviceNames []string `json:"blockDeviceNames"` - ThinPools []SpecThinPool `json:"thinPools"` - Type string `json:"type"` +type LvmVolumeGroupStatus struct { + AllocatedSize resource.Quantity `json:"allocatedSize"` + Nodes []LvmVolumeGroupNode `json:"nodes"` + ThinPools []LvmVolumeGroupThinPoolStatus `json:"thinPools"` + VGSize resource.Quantity `json:"vgSize"` + VGUuid string `json:"vgUUID"` + Phase string `json:"phase"` + Conditions []metav1.Condition `json:"conditions"` + ThinPoolReady string `json:"thinPoolReady"` + ConfigurationApplied string `json:"configurationApplied"` + VGFree resource.Quantity `json:"vgFree"` } type LvmVolumeGroupDevice struct { @@ -58,18 +69,19 @@ type LvmVolumeGroupNode struct { Name string `json:"name"` } -type StatusThinPool struct { - Name string `json:"name"` - ActualSize resource.Quantity `json:"actualSize"` - UsedSize resource.Quantity `json:"usedSize"` +type LvmVolumeGroupThinPoolStatus struct { + Name string `json:"name"` + ActualSize resource.Quantity `json:"actualSize"` + UsedSize resource.Quantity `json:"usedSize"` + AllocatedSize resource.Quantity `json:"allocatedSize"` + AvailableSpace resource.Quantity `json:"availableSpace"` + AllocationLimit string `json:"allocationLimit"` + Ready bool `json:"ready"` + Message string `json:"message"` } -type LvmVolumeGroupStatus struct { - AllocatedSize resource.Quantity `json:"allocatedSize"` - Health string `json:"health"` - Message string `json:"message"` - Nodes []LvmVolumeGroupNode `json:"nodes"` - ThinPools []StatusThinPool `json:"thinPools"` - VGSize resource.Quantity `json:"vgSize"` - VGUuid string `json:"vgUUID"` +type LvmVolumeGroupThinPoolSpec struct { + Name string `json:"name"` + Size resource.Quantity `json:"size"` + AllocationLimit string `json:"allocationLimit"` } diff --git a/images/sds-local-volume-csi/Dockerfile b/images/sds-local-volume-csi/Dockerfile index 5b781991..04b605c2 100644 --- a/images/sds-local-volume-csi/Dockerfile +++ b/images/sds-local-volume-csi/Dockerfile @@ -1,7 +1,7 @@ ARG BASE_ALPINE=registry.deckhouse.io/base_images/alpine:3.16.3@sha256:5548e9172c24a1b0ca9afdd2bf534e265c94b12b36b3e0c0302f5853eaf00abb -ARG BASE_GOLANG_21_ALPINE_BUILDER=registry.deckhouse.io/base_images/golang:1.21.4-alpine3.18@sha256:cf84f3d6882c49ea04b6478ac514a2582c8922d7e5848b43d2918fff8329f6e6 +ARG BASE_GOLANG_ALPINE_BUILDER=registry.deckhouse.io/base_images/golang:1.21.4-alpine3.18@sha256:cf84f3d6882c49ea04b6478ac514a2582c8922d7e5848b43d2918fff8329f6e6 -FROM $BASE_GOLANG_21_ALPINE_BUILDER as builder +FROM $BASE_GOLANG_ALPINE_BUILDER as builder WORKDIR /go/src diff --git a/images/sds-local-volume-csi/api/v1alpha1/lvm_volume_group.go b/images/sds-local-volume-csi/api/v1alpha1/lvm_volume_group.go index 4e346fc2..70900f72 100644 --- a/images/sds-local-volume-csi/api/v1alpha1/lvm_volume_group.go +++ b/images/sds-local-volume-csi/api/v1alpha1/lvm_volume_group.go @@ -36,16 +36,24 @@ type LvmVolumeGroup struct { Status LvmVolumeGroupStatus `json:"status,omitempty"` } -type SpecThinPool struct { - Name string `json:"name"` - Size resource.Quantity `json:"size"` +type LvmVolumeGroupSpec struct { + ActualVGNameOnTheNode string `json:"actualVGNameOnTheNode"` + BlockDeviceNames []string `json:"blockDeviceNames"` + ThinPools []LvmVolumeGroupThinPoolSpec `json:"thinPools"` + Type string `json:"type"` } -type LvmVolumeGroupSpec struct { - ActualVGNameOnTheNode string `json:"actualVGNameOnTheNode"` - BlockDeviceNames []string `json:"blockDeviceNames"` - ThinPools []SpecThinPool `json:"thinPools"` - Type string `json:"type"` +type LvmVolumeGroupStatus struct { + AllocatedSize resource.Quantity `json:"allocatedSize"` + Nodes []LvmVolumeGroupNode `json:"nodes"` + ThinPools []LvmVolumeGroupThinPoolStatus `json:"thinPools"` + VGSize resource.Quantity `json:"vgSize"` + VGUuid string `json:"vgUUID"` + Phase string `json:"phase"` + Conditions []metav1.Condition `json:"conditions"` + ThinPoolReady string `json:"thinPoolReady"` + ConfigurationApplied string `json:"configurationApplied"` + VGFree resource.Quantity `json:"vgFree"` } type LvmVolumeGroupDevice struct { @@ -61,18 +69,19 @@ type LvmVolumeGroupNode struct { Name string `json:"name"` } -type StatusThinPool struct { - Name string `json:"name"` - ActualSize resource.Quantity `json:"actualSize"` - UsedSize resource.Quantity `json:"usedSize"` +type LvmVolumeGroupThinPoolStatus struct { + Name string `json:"name"` + ActualSize resource.Quantity `json:"actualSize"` + UsedSize resource.Quantity `json:"usedSize"` + AllocatedSize resource.Quantity `json:"allocatedSize"` + AvailableSpace resource.Quantity `json:"availableSpace"` + AllocationLimit string `json:"allocationLimit"` + Ready bool `json:"ready"` + Message string `json:"message"` } -type LvmVolumeGroupStatus struct { - AllocatedSize resource.Quantity `json:"allocatedSize"` - Health string `json:"health"` - Message string `json:"message"` - Nodes []LvmVolumeGroupNode `json:"nodes"` - ThinPools []StatusThinPool `json:"thinPools"` - VGSize resource.Quantity `json:"vgSize"` - VGUuid string `json:"vgUUID"` +type LvmVolumeGroupThinPoolSpec struct { + Name string `json:"name"` + Size resource.Quantity `json:"size"` + AllocationLimit string `json:"allocationLimit"` } diff --git a/images/sds-local-volume-csi/cmd/main.go b/images/sds-local-volume-csi/cmd/main.go index 6a999c0d..06b6edaa 100644 --- a/images/sds-local-volume-csi/cmd/main.go +++ b/images/sds-local-volume-csi/cmd/main.go @@ -56,7 +56,7 @@ func main() { cfgParams, err := config.NewConfig() if err != nil { - klog.Fatal("unable to create NewConfig") + klog.Fatalf("unable to create NewConfig, err: %s", err.Error()) } var ( diff --git a/images/sds-local-volume-csi/pkg/utils/func.go b/images/sds-local-volume-csi/pkg/utils/func.go index 6f084873..7408ed9e 100644 --- a/images/sds-local-volume-csi/pkg/utils/func.go +++ b/images/sds-local-volume-csi/pkg/utils/func.go @@ -178,7 +178,7 @@ func GetNodeWithMaxFreeSpace(lvgs []v1alpha1.LvmVolumeGroup, storageClassLVGPara switch lvmType { case internal.LVMTypeThick: - freeSpace = GetLVMVolumeGroupFreeSpace(lvg) + freeSpace = lvg.Status.VGFree case internal.LVMTypeThin: thinPoolName, ok := storageClassLVGParametersMap[lvg.Name] if !ok { @@ -199,6 +199,7 @@ func GetNodeWithMaxFreeSpace(lvgs []v1alpha1.LvmVolumeGroup, storageClassLVGPara return nodeName, *resource.NewQuantity(maxFreeSpace, resource.BinarySI), nil } +// TODO: delete the method below? func GetLVMVolumeGroupParams(ctx context.Context, kc client.Client, log logger.Logger, lvmVG map[string]string, nodeName, LvmType string) (lvgName, vgName string, err error) { listLvgs := &v1alpha1.LvmVolumeGroupList{ TypeMeta: metav1.TypeMeta{ @@ -273,7 +274,7 @@ func GetLVMVolumeGroupFreeSpace(lvg v1alpha1.LvmVolumeGroup) (vgFreeSpace resour } func GetLVMThinPoolFreeSpace(lvg v1alpha1.LvmVolumeGroup, thinPoolName string) (thinPoolFreeSpace resource.Quantity, err error) { - var storagePoolThinPool *v1alpha1.StatusThinPool + var storagePoolThinPool *v1alpha1.LvmVolumeGroupThinPoolStatus for _, thinPool := range lvg.Status.ThinPools { if thinPool.Name == thinPoolName { storagePoolThinPool = &thinPool @@ -285,11 +286,7 @@ func GetLVMThinPoolFreeSpace(lvg v1alpha1.LvmVolumeGroup, thinPoolName string) ( return thinPoolFreeSpace, fmt.Errorf("[GetLVMThinPoolFreeSpace] thin pool %s not found in lvg %+v", thinPoolName, lvg) } - thinPoolActualSize := storagePoolThinPool.ActualSize - - thinPoolFreeSpace = thinPoolActualSize.DeepCopy() - thinPoolFreeSpace.Sub(storagePoolThinPool.UsedSize) - return thinPoolFreeSpace, nil + return storagePoolThinPool.AvailableSpace, nil } func UpdateLVMLogicalVolume(ctx context.Context, kc client.Client, llv *v1alpha1.LVMLogicalVolume) error { diff --git a/images/sds-local-volume-scheduler-extender/pkg/cache/cache.go b/images/sds-local-volume-scheduler-extender/pkg/cache/cache.go index eb4c5a30..e10a7c45 100644 --- a/images/sds-local-volume-scheduler-extender/pkg/cache/cache.go +++ b/images/sds-local-volume-scheduler-extender/pkg/cache/cache.go @@ -74,8 +74,8 @@ func (c *Cache) AddLVG(lvg *v1alpha1.LvmVolumeGroup) { // UpdateLVG updated selected LVMVolumeGroup resource in the cache. If such LVMVolumeGroup is not stored, returns an error. func (c *Cache) UpdateLVG(lvg *v1alpha1.LvmVolumeGroup) error { - if cache, found := c.lvgs.Load(lvg.Name); found { - cache.(*lvgCache).lvg = lvg + if lvgCh, found := c.lvgs.Load(lvg.Name); found { + lvgCh.(*lvgCache).lvg = lvg c.log.Trace(fmt.Sprintf("[UpdateLVG] the LVMVolumeGroup %s nodes: %v", lvg.Name, lvg.Status.Nodes)) for _, node := range lvg.Status.Nodes { @@ -96,7 +96,7 @@ func (c *Cache) UpdateLVG(lvg *v1alpha1.LvmVolumeGroup) error { return nil } - return fmt.Errorf("the LVMVolumeGroup %s was not found in the cache", lvg.Name) + return fmt.Errorf("the LVMVolumeGroup %s was not found in the lvgCh", lvg.Name) } // TryGetLVG returns selected LVMVolumeGroup resource if it is stored in the cache, otherwise returns nil. @@ -231,29 +231,6 @@ func (c *Cache) AddThickPVC(lvgName string, pvc *v1.PersistentVolumeClaim) error c.log.Debug(fmt.Sprintf("[AddThickPVC] PVC %s should not be added", pvcKey)) return nil } - //if pvc.Annotations[SelectedNodeAnnotation] != "" { - // c.log.Debug(fmt.Sprintf("[AddThickPVC] PVC %s/%s has selected node anotation, selected node: %s", pvc.Namespace, pvc.Name, pvc.Annotations[SelectedNodeAnnotation])) - // - // lvgsOnTheNode, found := c.nodeLVGs.Load(pvc.Annotations[SelectedNodeAnnotation]) - // if !found { - // err := fmt.Errorf("no LVMVolumeGroups found for the node %s", pvc.Annotations[SelectedNodeAnnotation]) - // c.log.Error(err, fmt.Sprintf("[AddThickPVC] an error occured while trying to add PVC %s to the cache", pvcKey)) - // return err - // } - // - // if !slices2.Contains(lvgsOnTheNode.([]string), lvgName) { - // c.log.Debug(fmt.Sprintf("[AddThickPVC] LVMVolumeGroup %s does not belong to PVC %s/%s selected node %s. It will be skipped", lvgName, pvc.Namespace, pvc.Name, pvc.Annotations[SelectedNodeAnnotation])) - // return nil - // } - // - // c.log.Debug(fmt.Sprintf("[AddThickPVC] LVMVolumeGroup %s belongs to PVC %s/%s selected node %s", lvgName, pvc.Namespace, pvc.Name, pvc.Annotations[SelectedNodeAnnotation])) - // - // _, found = lvgCh.(*lvgCache).thickPVCs.Load(pvcKey) - // if found { - // c.log.Warning(fmt.Sprintf("[AddThickPVC] PVC %s cache has been already added to the LVMVolumeGroup %s", pvcKey, lvgName)) - // return nil - // } - //} c.log.Debug(fmt.Sprintf("[AddThickPVC] new PVC %s cache will be added to the LVMVolumeGroup %s", pvcKey, lvgName)) c.addNewThickPVC(lvgCh.(*lvgCache), pvc) @@ -279,12 +256,14 @@ func (c *Cache) shouldAddPVC(pvc *v1.PersistentVolumeClaim, lvgCh *lvgCache, pvc c.log.Debug(fmt.Sprintf("[shouldAddPVC] LVMVolumeGroup %s belongs to PVC %s/%s selected node %s", lvgName, pvc.Namespace, pvc.Name, pvc.Annotations[SelectedNodeAnnotation])) + // if pvc is thick _, found = lvgCh.thickPVCs.Load(pvcKey) if found { c.log.Debug(fmt.Sprintf("[shouldAddPVC] PVC %s was found in the cache of the LVMVolumeGroup %s", pvcKey, lvgName)) return false, nil } + // if pvc is thin if thinPoolName != "" { thinPoolCh, found := lvgCh.thinPools.Load(thinPoolName) if !found { @@ -324,7 +303,7 @@ func (c *Cache) AddThinPVC(lvgName, thinPoolName string, pvc *v1.PersistentVolum return err } - // this case might be triggered if the extender recovers after fail and finds some pending thickPVCs with selected nodes + // this case might be triggered if the extender recovers after fail and finds some pending thin PVCs with selected nodes c.log.Trace(fmt.Sprintf("[AddThinPVC] PVC %s/%s annotations: %v", pvc.Namespace, pvc.Name, pvc.Annotations)) shouldAdd, err := c.shouldAddPVC(pvc, lvgCh.(*lvgCache), pvcKey, lvgName, thinPoolName) if err != nil { @@ -366,13 +345,7 @@ func (c *Cache) addNewThickPVC(lvgCh *lvgCache, pvc *v1.PersistentVolumeClaim) { func (c *Cache) addNewThinPVC(lvgCh *lvgCache, pvc *v1.PersistentVolumeClaim, thinPoolName string) error { pvcKey := configurePVCKey(pvc) - if thinPoolName == "" { - err := errors.New("no thin pool specified") - c.log.Error(err, fmt.Sprintf("[addNewThinPVC] unable to add Thin PVC %s to the cache", pvcKey)) - return err - } - - err := c.addNewThinPool(lvgCh, pvc, thinPoolName) + err := c.addThinPoolIfNotExists(lvgCh, thinPoolName) if err != nil { c.log.Error(err, fmt.Sprintf("[addNewThinPVC] unable to add Thin pool %s in the LVMVolumeGroup %s cache for PVC %s", thinPoolName, lvgCh.lvg.Name, pvc.Name)) return err @@ -431,7 +404,7 @@ func (c *Cache) UpdateThickPVC(lvgName string, pvc *v1.PersistentVolumeClaim) er return nil } -func (c *Cache) UpdateThinPVC(lvgName string, pvc *v1.PersistentVolumeClaim, thinPoolName string) error { +func (c *Cache) UpdateThinPVC(lvgName, thinPoolName string, pvc *v1.PersistentVolumeClaim) error { pvcKey := configurePVCKey(pvc) lvgCh, found := c.lvgs.Load(lvgName) @@ -442,7 +415,7 @@ func (c *Cache) UpdateThinPVC(lvgName string, pvc *v1.PersistentVolumeClaim, thi thinPoolCh, found := lvgCh.(*lvgCache).thinPools.Load(thinPoolName) if !found { c.log.Debug(fmt.Sprintf("[UpdateThinPVC] Thin Pool %s was not found in the LVMVolumeGroup %s, add it.", thinPoolName, lvgName)) - err := c.addNewThinPool(lvgCh.(*lvgCache), pvc, thinPoolName) + err := c.addThinPoolIfNotExists(lvgCh.(*lvgCache), thinPoolName) if err != nil { return err } @@ -467,20 +440,17 @@ func (c *Cache) UpdateThinPVC(lvgName string, pvc *v1.PersistentVolumeClaim, thi return nil } -func (c *Cache) addNewThinPool(lvgCh *lvgCache, pvc *v1.PersistentVolumeClaim, thinPoolName string) error { - pvcKey := configurePVCKey(pvc) - +func (c *Cache) addThinPoolIfNotExists(lvgCh *lvgCache, thinPoolName string) error { if len(thinPoolName) == 0 { err := errors.New("no thin pool name specified") - c.log.Error(err, fmt.Sprintf("[addNewThinPool] unable to add thin pool for PVC %s in the LVMVolumeGroup %s", pvc.Name, lvgCh.lvg.Name)) + c.log.Error(err, fmt.Sprintf("[addThinPoolIfNotExists] unable to add thin pool in the LVMVolumeGroup %s", lvgCh.lvg.Name)) return err } _, found := lvgCh.thinPools.Load(thinPoolName) if found { - err := fmt.Errorf("thin pool %s is already created", thinPoolName) - c.log.Error(err, fmt.Sprintf("[addNewThinPool] unable to add new Thin pool %s to the LVMVolumeGroup %s for PVC %s", thinPoolName, lvgCh.lvg.Name, pvcKey)) - return err + c.log.Debug(fmt.Sprintf("[addThinPoolIfNotExists] Thin pool %s is already created in the LVMVolumeGroup %s. No need to add a new one", thinPoolName, lvgCh.lvg.Name)) + return nil } lvgCh.thinPools.Store(thinPoolName, &thinPoolCache{}) @@ -496,6 +466,7 @@ func (c *Cache) GetAllPVCForLVG(lvgName string) ([]*v1.PersistentVolumeClaim, er return nil, err } + // TODO: fix this to struct size field after refactoring size := 0 lvgCh.(*lvgCache).thickPVCs.Range(func(key, value any) bool { size++ @@ -510,11 +481,13 @@ func (c *Cache) GetAllPVCForLVG(lvgName string) ([]*v1.PersistentVolumeClaim, er }) result := make([]*v1.PersistentVolumeClaim, 0, size) + // collect Thick PVC for the LVG lvgCh.(*lvgCache).thickPVCs.Range(func(pvcName, pvcCh any) bool { result = append(result, pvcCh.(*pvcCache).pvc) return true }) + // collect Thin PVC for the LVG lvgCh.(*lvgCache).thinPools.Range(func(tpName, tpCh any) bool { tpCh.(*thinPoolCache).pvcs.Range(func(pvcName, pvcCh any) bool { result = append(result, pvcCh.(*pvcCache).pvc) @@ -526,18 +499,37 @@ func (c *Cache) GetAllPVCForLVG(lvgName string) ([]*v1.PersistentVolumeClaim, er return result, nil } -// GetAllThinPVCForLVGThinPool returns slice of PVC belonging to selected LVMVolumeGroup resource. If such LVMVolumeGroup is not stored in the cache, returns an error. -func (c *Cache) GetAllThinPVCForLVGThinPool(lvgName, thinPoolName string) ([]*v1.PersistentVolumeClaim, error) { +// GetAllThickPVCLVG returns slice of PVC belonging to selected LVMVolumeGroup resource. If such LVMVolumeGroup is not stored in the cache, returns an error. +func (c *Cache) GetAllThickPVCLVG(lvgName string) ([]*v1.PersistentVolumeClaim, error) { lvgCh, found := c.lvgs.Load(lvgName) if !found { err := fmt.Errorf("cache was not found for the LVMVolumeGroup %s", lvgName) - c.log.Error(err, fmt.Sprintf("[GetAllThinPVCForLVGThinPool] an error occured while trying to get all PVC for the LVMVolumeGroup %s", lvgName)) + c.log.Error(err, fmt.Sprintf("[GetAllPVCForLVG] an error occured while trying to get all PVC for the LVMVolumeGroup %s", lvgName)) + return nil, err + } + + result := make([]*v1.PersistentVolumeClaim, 0, pvcPerLVGCount) + // collect Thick PVC for the LVG + lvgCh.(*lvgCache).thickPVCs.Range(func(pvcName, pvcCh any) bool { + result = append(result, pvcCh.(*pvcCache).pvc) + return true + }) + + return result, nil +} + +// GetAllPVCFromLVGThinPool returns slice of PVC belonging to selected LVMVolumeGroup resource. If such LVMVolumeGroup is not stored in the cache, returns an error. +func (c *Cache) GetAllPVCFromLVGThinPool(lvgName, thinPoolName string) ([]*v1.PersistentVolumeClaim, error) { + lvgCh, found := c.lvgs.Load(lvgName) + if !found { + err := fmt.Errorf("cache was not found for the LVMVolumeGroup %s", lvgName) + c.log.Error(err, fmt.Sprintf("[GetAllPVCFromLVGThinPool] an error occured while trying to get all PVC for the LVMVolumeGroup %s", lvgName)) return nil, err } thinPoolCh, found := lvgCh.(*lvgCache).thinPools.Load(thinPoolName) if !found || thinPoolCh == nil { - c.log.Debug(fmt.Sprintf("[GetAllThinPVCForLVGThinPool] no Thin pool %s in the LVMVolumeGroup %s was found. Returns nil slice", thinPoolName, lvgName)) + c.log.Debug(fmt.Sprintf("[GetAllPVCFromLVGThinPool] no Thin pool %s in the LVMVolumeGroup %s was found. Returns nil slice", thinPoolName, lvgName)) return nil, nil } @@ -562,68 +554,6 @@ func (c *Cache) GetLVGNamesForPVC(pvc *v1.PersistentVolumeClaim) []string { return lvgNames.([]string) } -// RemoveBoundedThickPVCSpaceReservation removes selected bounded PVC space reservation from a target LVMVolumeGroup resource. If no such LVMVolumeGroup found or PVC -// is not in a Status Bound, returns an error. -func (c *Cache) RemoveBoundedThickPVCSpaceReservation(lvgName string, pvc *v1.PersistentVolumeClaim) error { - if pvc.Status.Phase != v1.ClaimBound { - return fmt.Errorf("PVC %s/%s not in a Status.Phase Bound", pvc.Namespace, pvc.Name) - } - - pvcKey := configurePVCKey(pvc) - lvgCh, found := c.lvgs.Load(lvgName) - if !found { - err := fmt.Errorf("LVMVolumeGroup %s was not found in the cache", lvgName) - c.log.Error(err, fmt.Sprintf("[RemoveBoundedThickPVCSpaceReservation] an error occured while trying to remove space reservation for PVC %s in the LVMVolumeGroup %s", pvcKey, lvgName)) - return err - } - - pvcCh, found := lvgCh.(*lvgCache).thickPVCs.Load(pvcKey) - if !found || pvcCh == nil { - err := fmt.Errorf("cache for PVC %s was not found", pvcKey) - c.log.Error(err, fmt.Sprintf("[RemoveBoundedThickPVCSpaceReservation] an error occured while trying to remove space reservation for PVC %s in the LVMVolumeGroup %s", pvcKey, lvgName)) - return err - } - - lvgCh.(*lvgCache).thickPVCs.Delete(pvcKey) - c.pvcLVGs.Delete(pvcKey) - - return nil -} - -// RemoveBoundedThinPVCSpaceReservation removes selected bounded PVC space reservation from a target LVMVolumeGroup resource. If no such LVMVolumeGroup found or PVC -// is not in a Status Bound, returns an error. -func (c *Cache) RemoveBoundedThinPVCSpaceReservation(lvgName, thinPoolName string, pvc *v1.PersistentVolumeClaim) error { - if pvc.Status.Phase != v1.ClaimBound { - return fmt.Errorf("PVC %s/%s not in a Status.Phase Bound", pvc.Namespace, pvc.Name) - } - - pvcKey := configurePVCKey(pvc) - lvgCh, found := c.lvgs.Load(lvgName) - if !found { - err := fmt.Errorf("LVMVolumeGroup %s was not found in the cache", lvgName) - c.log.Error(err, fmt.Sprintf("[RemoveBoundedThinPVCSpaceReservation] an error occured while trying to remove space reservation for PVC %s in the LVMVolumeGroup %s", pvcKey, lvgName)) - return err - } - - thinPoolCh, found := lvgCh.(*lvgCache).thinPools.Load(thinPoolName) - if !found { - c.log.Warning(fmt.Sprintf("[RemoveBoundedThinPVCSpaceReservation] no Thin Pool %s was found in the cache for the LVMVolumeGroup %s", thinPoolName, lvgName)) - return nil - } - - pvcCh, found := thinPoolCh.(*thinPoolCache).pvcs.Load(pvcKey) - if !found || pvcCh == nil { - err := fmt.Errorf("cache for PVC %s was not found", pvcKey) - c.log.Error(err, fmt.Sprintf("[RemoveBoundedThinPVCSpaceReservation] an error occured while trying to remove space reservation for PVC %s in the LVMVolumeGroup %s", pvcKey, lvgName)) - return err - } - - thinPoolCh.(*thinPoolCache).pvcs.Delete(pvcKey) - c.pvcLVGs.Delete(pvcKey) - - return nil -} - // CheckIsPVCStored checks if selected PVC has been already stored in the cache. func (c *Cache) CheckIsPVCStored(pvc *v1.PersistentVolumeClaim) bool { pvcKey := configurePVCKey(pvc) @@ -637,6 +567,7 @@ func (c *Cache) CheckIsPVCStored(pvc *v1.PersistentVolumeClaim) bool { // RemoveSpaceReservationForPVCWithSelectedNode removes space reservation for selected PVC for every LVMVolumeGroup resource, which is not bound to the PVC selected node. func (c *Cache) RemoveSpaceReservationForPVCWithSelectedNode(pvc *v1.PersistentVolumeClaim, deviceType string) error { pvcKey := configurePVCKey(pvc) + // the LVG which is used to store PVC selectedLVGName := "" lvgNamesForPVC, found := c.pvcLVGs.Load(pvcKey) diff --git a/images/sds-local-volume-scheduler-extender/pkg/cache/cache_test.go b/images/sds-local-volume-scheduler-extender/pkg/cache/cache_test.go index 31816c1e..9d4d742f 100644 --- a/images/sds-local-volume-scheduler-extender/pkg/cache/cache_test.go +++ b/images/sds-local-volume-scheduler-extender/pkg/cache/cache_test.go @@ -562,11 +562,19 @@ func BenchmarkCache_FullLoad(b *testing.B) { }, } - for _, pvc := range pvcs { + for d, pvc := range pvcs { for err != nil { err = cache.UpdateThickPVC(lvg.Name, pvc) } + for err != nil { + err = cache.AddThinPVC(lvg.Name, fmt.Sprintf("test-thin-%d", d), pvc) + } + + for err != nil { + err = cache.UpdateThinPVC(lvg.Name, fmt.Sprintf("test-thin-%d", d), pvc) + } + cache.GetLVGNamesForPVC(pvc) } } @@ -581,6 +589,10 @@ func BenchmarkCache_FullLoad(b *testing.B) { if err != nil { b.Error(err) } + _, err = cache.GetLVGThinReservedSpace(lvgName, "test-thin") + if err != nil { + b.Error(err) + } } cache.GetLVGNamesByNodeName(nodeName) diff --git a/images/sds-local-volume-scheduler-extender/pkg/controller/lvg_watcher_cache.go b/images/sds-local-volume-scheduler-extender/pkg/controller/lvg_watcher_cache.go index f9ce77bb..ffc8d69c 100644 --- a/images/sds-local-volume-scheduler-extender/pkg/controller/lvg_watcher_cache.go +++ b/images/sds-local-volume-scheduler-extender/pkg/controller/lvg_watcher_cache.go @@ -133,16 +133,7 @@ func RunLVGWatcherCacheController( log.Trace(fmt.Sprintf("[RunLVGWatcherCacheController] PVC %s/%s has status phase %s", pvc.Namespace, pvc.Name, pvc.Status.Phase)) if pvc.Status.Phase == v1.ClaimBound { log.Debug(fmt.Sprintf("[RunLVGWatcherCacheController] PVC %s/%s from the cache has Status.Phase Bound. It will be removed from the reserved space in the LVMVolumeGroup %s", pvc.Namespace, pvc.Name, newLvg.Name)) - //err = cache.RemoveBoundedThickPVCSpaceReservation(newLvg.Name, pvc) - //if err != nil { - // log.Error(err, fmt.Sprintf("[RunLVGWatcherCacheController] unable to remove PVC %s/%s from the cache in the LVMVolumeGroup %s", pvc.Namespace, pvc.Name, newLvg.Name)) - // continue - //} - cache.RemovePVCFromTheCache(pvc) - - //err = cache.RemoveBoundedThinPVCSpaceReservation() - log.Debug(fmt.Sprintf("[RunLVGWatcherCacheController] PVC %s/%s was removed from the LVMVolumeGroup %s in the cache", pvc.Namespace, pvc.Name, newLvg.Name)) } } diff --git a/images/sds-local-volume-scheduler-extender/pkg/controller/pvc_watcher_cache.go b/images/sds-local-volume-scheduler-extender/pkg/controller/pvc_watcher_cache.go index 0119084f..af1561c1 100644 --- a/images/sds-local-volume-scheduler-extender/pkg/controller/pvc_watcher_cache.go +++ b/images/sds-local-volume-scheduler-extender/pkg/controller/pvc_watcher_cache.go @@ -118,12 +118,6 @@ func RunPVCWatcherCacheController( } func reconcilePVC(ctx context.Context, mgr manager.Manager, log logger.Logger, schedulerCache *cache.Cache, pvc *v1.PersistentVolumeClaim, selectedNodeName string) { - log.Debug(fmt.Sprintf("[reconcilePVC] starts to find common LVMVolumeGroup for the selected node %s and PVC %s/%s", selectedNodeName, pvc.Namespace, pvc.Name)) - lvgsOnTheNode := schedulerCache.GetLVGNamesByNodeName(selectedNodeName) - for _, lvgName := range lvgsOnTheNode { - log.Trace(fmt.Sprintf("[reconcilePVC] LVMVolumeGroup %s belongs to the node %s", lvgName, selectedNodeName)) - } - sc := &v12.StorageClass{} err := mgr.GetClient().Get(ctx, client.ObjectKey{ Name: *pvc.Spec.StorageClassName, @@ -138,10 +132,12 @@ func reconcilePVC(ctx context.Context, mgr manager.Manager, log logger.Logger, s return } + log.Debug(fmt.Sprintf("[reconcilePVC] tries to extract LVGs from the Storage Class %s for PVC %s/%s", sc.Name, pvc.Namespace, pvc.Name)) lvgsFromSc, err := scheduler.ExtractLVGsFromSC(sc) if err != nil { log.Error(err, fmt.Sprintf("[reconcilePVC] unable to extract LVMVolumeGroups from the Storage Class %s", sc.Name)) } + log.Debug(fmt.Sprintf("[reconcilePVC] successfully extracted LVGs from the Storage Class %s for PVC %s/%s", sc.Name, pvc.Namespace, pvc.Name)) lvgsForPVC := schedulerCache.GetLVGNamesForPVC(pvc) if lvgsForPVC == nil || len(lvgsForPVC) == 0 { @@ -155,6 +151,12 @@ func reconcilePVC(ctx context.Context, mgr manager.Manager, log logger.Logger, s log.Trace(fmt.Sprintf("[reconcilePVC] LVMVolumeGroup %s belongs to PVC %s/%s", lvgName, pvc.Namespace, pvc.Name)) } + log.Debug(fmt.Sprintf("[reconcilePVC] starts to find common LVMVolumeGroup for the selected node %s and PVC %s/%s", selectedNodeName, pvc.Namespace, pvc.Name)) + lvgsOnTheNode := schedulerCache.GetLVGNamesByNodeName(selectedNodeName) + for _, lvgName := range lvgsOnTheNode { + log.Trace(fmt.Sprintf("[reconcilePVC] LVMVolumeGroup %s belongs to the node %s", lvgName, selectedNodeName)) + } + var commonLVGName string for _, pvcLvg := range lvgsForPVC { if slices.Contains(lvgsOnTheNode, pvcLvg) { @@ -181,7 +183,7 @@ func reconcilePVC(ctx context.Context, mgr manager.Manager, log logger.Logger, s case consts.Thin: for _, lvg := range lvgsFromSc { if lvg.Name == commonLVGName { - err = schedulerCache.UpdateThinPVC(commonLVGName, pvc, lvg.Thin.PoolName) + err = schedulerCache.UpdateThinPVC(commonLVGName, lvg.Thin.PoolName, pvc) if err != nil { log.Error(err, fmt.Sprintf("[reconcilePVC] unable to update Thin PVC %s/%s in the cache", pvc.Namespace, pvc.Name)) return diff --git a/images/sds-local-volume-scheduler-extender/pkg/scheduler/filter.go b/images/sds-local-volume-scheduler-extender/pkg/scheduler/filter.go index 39d2f7af..ef55c9f6 100644 --- a/images/sds-local-volume-scheduler-extender/pkg/scheduler/filter.go +++ b/images/sds-local-volume-scheduler-extender/pkg/scheduler/filter.go @@ -92,6 +92,11 @@ func (s *scheduler) filter(w http.ResponseWriter, r *http.Request) { s.log.Trace(fmt.Sprintf("[filter] filtered managed PVC %s/%s", pvc.Namespace, pvc.Name)) } + if len(managedPVCs) == 0 { + s.log.Warning(fmt.Sprintf("[filter] Pod %s/%s uses PVCs which are not managed by the module. Unable to filter and score the nodes", input.Pod.Namespace, input.Pod.Name)) + return + } + s.log.Debug(fmt.Sprintf("[filter] starts to extract PVC requested sizes for a Pod %s/%s", input.Pod.Namespace, input.Pod.Name)) pvcRequests, err := extractRequestedSize(s.ctx, s.client, s.log, managedPVCs, scs) if err != nil { @@ -335,6 +340,7 @@ func filterNodes( } } + // these are the nodes which might store every PVC from the Pod commonNodes, err := getCommonNodesByStorageClasses(scs, nodeLVGs) for nodeName := range commonNodes { log.Trace(fmt.Sprintf("[filterNodes] Node %s is a common for every storage class", nodeName)) @@ -345,9 +351,9 @@ func filterNodes( FailedNodes: FailedNodesMap{}, } - thickFreeMtx := &sync.RWMutex{} - thinFreeMtx := &sync.RWMutex{} - failedNodesMutex := &sync.Mutex{} + thickMapMtx := &sync.RWMutex{} + thinMapMtx := &sync.RWMutex{} + failedNodesMapMtx := &sync.Mutex{} wg := &sync.WaitGroup{} wg.Add(len(nodes.Items)) @@ -363,9 +369,9 @@ func filterNodes( if _, common := commonNodes[node.Name]; !common { log.Debug(fmt.Sprintf("[filterNodes] node %s is not common for used Storage Classes", node.Name)) - failedNodesMutex.Lock() + failedNodesMapMtx.Lock() result.FailedNodes[node.Name] = "node is not common for used Storage Classes" - failedNodesMutex.Unlock() + failedNodesMapMtx.Unlock() return } @@ -373,14 +379,14 @@ func filterNodes( lvgsFromNode := commonNodes[node.Name] hasEnoughSpace := true - // now we iterate all over the PVCs to get if we can place all of them on the node (does the node have enough space) + // now we iterate all over the PVCs to see if we can place all of them on the node (does the node have enough space) for _, pvc := range pvcs { pvcReq := pvcRequests[pvc.Name] // we get LVGs which might be used by the PVC lvgsFromSC := scLVGs[*pvc.Spec.StorageClassName] - // we get the specific LVG which the PVC can use on the node + // we get the specific LVG which the PVC can use on the node as we support only one specified LVG in the Storage Class on each node commonLVG := findMatchedLVG(lvgsFromNode, lvgsFromSC) if commonLVG == nil { err = errors.New(fmt.Sprintf("unable to match Storage Class's LVMVolumeGroup with the node's one, Storage Class: %s, node: %s", *pvc.Spec.StorageClassName, node.Name)) @@ -393,9 +399,9 @@ func filterNodes( switch pvcReq.DeviceType { case consts.Thick: lvg := lvgs[commonLVG.Name] - thickFreeMtx.RLock() + thickMapMtx.RLock() freeSpace := lvgsThickFree[lvg.Name] - thickFreeMtx.RUnlock() + thickMapMtx.RUnlock() log.Trace(fmt.Sprintf("[filterNodes] LVMVolumeGroup %s Thick free space: %s, PVC requested space: %s", lvg.Name, resource.NewQuantity(freeSpace, resource.BinarySI), resource.NewQuantity(pvcReq.RequestedSize, resource.BinarySI))) if freeSpace < pvcReq.RequestedSize { @@ -403,9 +409,9 @@ func filterNodes( break } - thickFreeMtx.Lock() + thickMapMtx.Lock() lvgsThickFree[lvg.Name] -= pvcReq.RequestedSize - thickFreeMtx.Unlock() + thickMapMtx.Unlock() case consts.Thin: lvg := lvgs[commonLVG.Name] @@ -417,9 +423,9 @@ func filterNodes( return } - thinFreeMtx.RLock() + thinMapMtx.RLock() freeSpace := lvgsThinFree[lvg.Name][targetThinPool.Name] - thinFreeMtx.RUnlock() + thinMapMtx.RUnlock() log.Trace(fmt.Sprintf("[filterNodes] LVMVolumeGroup %s Thin Pool %s free space: %s, PVC requested space: %s", lvg.Name, targetThinPool.Name, resource.NewQuantity(freeSpace, resource.BinarySI), resource.NewQuantity(pvcReq.RequestedSize, resource.BinarySI))) @@ -428,9 +434,9 @@ func filterNodes( break } - thinFreeMtx.Lock() + thinMapMtx.Lock() lvgsThinFree[lvg.Name][targetThinPool.Name] -= pvcReq.RequestedSize - thinFreeMtx.Unlock() + thinMapMtx.Unlock() } if !hasEnoughSpace { @@ -440,9 +446,9 @@ func filterNodes( } if !hasEnoughSpace { - failedNodesMutex.Lock() + failedNodesMapMtx.Lock() result.FailedNodes[node.Name] = "not enough space" - failedNodesMutex.Unlock() + failedNodesMapMtx.Unlock() return } diff --git a/images/sds-local-volume-scheduler-extender/pkg/scheduler/filter_test.go b/images/sds-local-volume-scheduler-extender/pkg/scheduler/filter_test.go index 42e1c58f..f937d032 100644 --- a/images/sds-local-volume-scheduler-extender/pkg/scheduler/filter_test.go +++ b/images/sds-local-volume-scheduler-extender/pkg/scheduler/filter_test.go @@ -5,6 +5,7 @@ import ( v1 "k8s.io/api/core/v1" v12 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sds-local-volume-scheduler-extender/pkg/consts" "sds-local-volume-scheduler-extender/pkg/logger" "testing" ) @@ -20,13 +21,13 @@ func TestFilter(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: sc1, }, - Provisioner: SdsLocalVolumeProvisioner, + Provisioner: consts.SdsLocalVolumeProvisioner, }, sc2: { ObjectMeta: metav1.ObjectMeta{ Name: sc2, }, - Provisioner: SdsLocalVolumeProvisioner, + Provisioner: consts.SdsLocalVolumeProvisioner, }, sc3: { ObjectMeta: metav1.ObjectMeta{ diff --git a/images/sds-local-volume-scheduler-extender/pkg/scheduler/route.go b/images/sds-local-volume-scheduler-extender/pkg/scheduler/route.go index 8d237701..788ebc7c 100644 --- a/images/sds-local-volume-scheduler-extender/pkg/scheduler/route.go +++ b/images/sds-local-volume-scheduler-extender/pkg/scheduler/route.go @@ -73,7 +73,7 @@ func NewHandler(ctx context.Context, cl client.Client, log logger.Logger, lvgCac }, nil } -func status(w http.ResponseWriter, r *http.Request) { +func status(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusOK) _, err := w.Write([]byte("ok")) if err != nil { @@ -81,45 +81,14 @@ func status(w http.ResponseWriter, r *http.Request) { } } -func (s *scheduler) getCache(w http.ResponseWriter, r *http.Request) { +func (s *scheduler) getCache(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusOK) s.cache.PrintTheCacheLog() - result := make(map[string][]struct { - pvcName string - selectedNode string - status string - size string - }) - lvgs := s.cache.GetAllLVG() for _, lvg := range lvgs { - pvcs, err := s.cache.GetAllPVCForLVG(lvg.Name) - if err != nil { - w.WriteHeader(http.StatusInternalServerError) - s.log.Error(err, "something bad") - } - - result[lvg.Name] = make([]struct { - pvcName string - selectedNode string - status string - size string - }, 0) - - for _, pvc := range pvcs { - result[lvg.Name] = append(result[lvg.Name], struct { - pvcName string - selectedNode string - status string - size string - }{pvcName: pvc.Name, selectedNode: pvc.Annotations[cache.SelectedNodeAnnotation], status: string(pvc.Status.Phase), size: pvc.Spec.Resources.Requests.Storage().String()}) - } - } - - for lvgName, pvcs := range result { - reserved, err := s.cache.GetLVGThickReservedSpace(lvgName) + reserved, err := s.cache.GetLVGThickReservedSpace(lvg.Name) if err != nil { w.WriteHeader(http.StatusInternalServerError) _, err = w.Write([]byte("unable to write the cache")) @@ -128,7 +97,7 @@ func (s *scheduler) getCache(w http.ResponseWriter, r *http.Request) { } } - _, err = w.Write([]byte(fmt.Sprintf("LVMVolumeGroup: %s Reserved: %s\n", lvgName, resource.NewQuantity(reserved, resource.BinarySI)))) + _, err = w.Write([]byte(fmt.Sprintf("LVMVolumeGroup: %s Thick Reserved: %s\n", lvg.Name, resource.NewQuantity(reserved, resource.BinarySI).String()))) if err != nil { w.WriteHeader(http.StatusInternalServerError) _, err = w.Write([]byte("unable to write the cache")) @@ -137,32 +106,45 @@ func (s *scheduler) getCache(w http.ResponseWriter, r *http.Request) { } } - for _, pvc := range pvcs { - _, err = w.Write([]byte(fmt.Sprintf("\tPVC: %s\n", pvc.pvcName))) + thickPvcs, err := s.cache.GetAllThickPVCLVG(lvg.Name) + for _, pvc := range thickPvcs { + _, err = w.Write([]byte(fmt.Sprintf("\t\tThick PVC: %s, reserved: %s, selected node: %s\n", pvc.Name, pvc.Spec.Resources.Requests.Storage().String(), pvc.Annotations[cache.SelectedNodeAnnotation]))) if err != nil { w.WriteHeader(http.StatusInternalServerError) s.log.Error(err, "error write response") } - _, err = w.Write([]byte(fmt.Sprintf("\t\tNodeName: %s\n", pvc.selectedNode))) + } + + for _, tp := range lvg.Status.ThinPools { + thinReserved, err := s.cache.GetLVGThinReservedSpace(lvg.Name, tp.Name) if err != nil { w.WriteHeader(http.StatusInternalServerError) s.log.Error(err, "error write response") } - _, err = w.Write([]byte(fmt.Sprintf("\t\tStatus: %s\n", pvc.status))) + _, err = w.Write([]byte(fmt.Sprintf("\tThinPool: %s, reserved: %s\n", tp.Name, resource.NewQuantity(thinReserved, resource.BinarySI).String()))) if err != nil { w.WriteHeader(http.StatusInternalServerError) s.log.Error(err, "error write response") } - _, err = w.Write([]byte(fmt.Sprintf("\t\tSize: %s\n", pvc.size))) + + thinPvcs, err := s.cache.GetAllPVCFromLVGThinPool(lvg.Name, tp.Name) if err != nil { w.WriteHeader(http.StatusInternalServerError) s.log.Error(err, "error write response") } + + for _, pvc := range thinPvcs { + _, err = w.Write([]byte(fmt.Sprintf("\t\tThin PVC: %s, reserved: %s, selected node:%s\n", pvc.Name, pvc.Spec.Resources.Requests.Storage().String(), pvc.Annotations[cache.SelectedNodeAnnotation]))) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + s.log.Error(err, "error write response") + } + } } } } -func (s *scheduler) getCacheStat(w http.ResponseWriter, r *http.Request) { +func (s *scheduler) getCacheStat(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusOK) pvcTotalCount := 0