Skip to content

Commit

Permalink
[controller] Add thin pool reserved space to the cache
Browse files Browse the repository at this point in the history
Signed-off-by: Viktor Kramarenko <viktor.kramarenko@flant.com>
  • Loading branch information
ViktorKram committed Jul 1, 2024
1 parent 7e8a4a2 commit a019630
Show file tree
Hide file tree
Showing 8 changed files with 127 additions and 89 deletions.
52 changes: 32 additions & 20 deletions images/sds-local-volume-controller/api/v1alpha1/lvm_volume_group.go
Original file line number Diff line number Diff line change
@@ -1,9 +1,12 @@
/*
Copyright 2024 Flant JSC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
Expand Down Expand Up @@ -33,16 +36,24 @@ type LvmVolumeGroup struct {
Status LvmVolumeGroupStatus `json:"status,omitempty"`
}

type SpecThinPool struct {
Name string `json:"name"`
Size resource.Quantity `json:"size"`
type LvmVolumeGroupSpec struct {
ActualVGNameOnTheNode string `json:"actualVGNameOnTheNode"`
BlockDeviceNames []string `json:"blockDeviceNames"`
ThinPools []LvmVolumeGroupThinPoolSpec `json:"thinPools"`
Type string `json:"type"`
}

type LvmVolumeGroupSpec struct {
ActualVGNameOnTheNode string `json:"actualVGNameOnTheNode"`
BlockDeviceNames []string `json:"blockDeviceNames"`
ThinPools []SpecThinPool `json:"thinPools"`
Type string `json:"type"`
type LvmVolumeGroupStatus struct {
AllocatedSize resource.Quantity `json:"allocatedSize"`
Nodes []LvmVolumeGroupNode `json:"nodes"`
ThinPools []LvmVolumeGroupThinPoolStatus `json:"thinPools"`
VGSize resource.Quantity `json:"vgSize"`
VGUuid string `json:"vgUUID"`
Phase string `json:"phase"`
Conditions []metav1.Condition `json:"conditions"`
ThinPoolReady string `json:"thinPoolReady"`
ConfigurationApplied string `json:"configurationApplied"`
VGFree resource.Quantity `json:"vgFree"`
}

type LvmVolumeGroupDevice struct {
Expand All @@ -58,18 +69,19 @@ type LvmVolumeGroupNode struct {
Name string `json:"name"`
}

type StatusThinPool struct {
Name string `json:"name"`
ActualSize resource.Quantity `json:"actualSize"`
UsedSize resource.Quantity `json:"usedSize"`
type LvmVolumeGroupThinPoolStatus struct {
Name string `json:"name"`
ActualSize resource.Quantity `json:"actualSize"`
UsedSize resource.Quantity `json:"usedSize"`
AllocatedSize resource.Quantity `json:"allocatedSize"`
AvailableSpace resource.Quantity `json:"availableSpace"`
AllocationLimit string `json:"allocationLimit"`
Ready bool `json:"ready"`
Message string `json:"message"`
}

type LvmVolumeGroupStatus struct {
AllocatedSize resource.Quantity `json:"allocatedSize"`
Health string `json:"health"`
Message string `json:"message"`
Nodes []LvmVolumeGroupNode `json:"nodes"`
ThinPools []StatusThinPool `json:"thinPools"`
VGSize resource.Quantity `json:"vgSize"`
VGUuid string `json:"vgUUID"`
type LvmVolumeGroupThinPoolSpec struct {
Name string `json:"name"`
Size resource.Quantity `json:"size"`
AllocationLimit string `json:"allocationLimit"`
}
49 changes: 29 additions & 20 deletions images/sds-local-volume-csi/api/v1alpha1/lvm_volume_group.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,16 +36,24 @@ type LvmVolumeGroup struct {
Status LvmVolumeGroupStatus `json:"status,omitempty"`
}

type SpecThinPool struct {
Name string `json:"name"`
Size resource.Quantity `json:"size"`
type LvmVolumeGroupSpec struct {
ActualVGNameOnTheNode string `json:"actualVGNameOnTheNode"`
BlockDeviceNames []string `json:"blockDeviceNames"`
ThinPools []LvmVolumeGroupThinPoolSpec `json:"thinPools"`
Type string `json:"type"`
}

type LvmVolumeGroupSpec struct {
ActualVGNameOnTheNode string `json:"actualVGNameOnTheNode"`
BlockDeviceNames []string `json:"blockDeviceNames"`
ThinPools []SpecThinPool `json:"thinPools"`
Type string `json:"type"`
type LvmVolumeGroupStatus struct {
AllocatedSize resource.Quantity `json:"allocatedSize"`
Nodes []LvmVolumeGroupNode `json:"nodes"`
ThinPools []LvmVolumeGroupThinPoolStatus `json:"thinPools"`
VGSize resource.Quantity `json:"vgSize"`
VGUuid string `json:"vgUUID"`
Phase string `json:"phase"`
Conditions []metav1.Condition `json:"conditions"`
ThinPoolReady string `json:"thinPoolReady"`
ConfigurationApplied string `json:"configurationApplied"`
VGFree resource.Quantity `json:"vgFree"`
}

type LvmVolumeGroupDevice struct {
Expand All @@ -61,18 +69,19 @@ type LvmVolumeGroupNode struct {
Name string `json:"name"`
}

type StatusThinPool struct {
Name string `json:"name"`
ActualSize resource.Quantity `json:"actualSize"`
UsedSize resource.Quantity `json:"usedSize"`
type LvmVolumeGroupThinPoolStatus struct {
Name string `json:"name"`
ActualSize resource.Quantity `json:"actualSize"`
UsedSize resource.Quantity `json:"usedSize"`
AllocatedSize resource.Quantity `json:"allocatedSize"`
AvailableSpace resource.Quantity `json:"availableSpace"`
AllocationLimit string `json:"allocationLimit"`
Ready bool `json:"ready"`
Message string `json:"message"`
}

type LvmVolumeGroupStatus struct {
AllocatedSize resource.Quantity `json:"allocatedSize"`
Health string `json:"health"`
Message string `json:"message"`
Nodes []LvmVolumeGroupNode `json:"nodes"`
ThinPools []StatusThinPool `json:"thinPools"`
VGSize resource.Quantity `json:"vgSize"`
VGUuid string `json:"vgUUID"`
type LvmVolumeGroupThinPoolSpec struct {
Name string `json:"name"`
Size resource.Quantity `json:"size"`
AllocationLimit string `json:"allocationLimit"`
}
2 changes: 1 addition & 1 deletion images/sds-local-volume-csi/pkg/utils/func.go
Original file line number Diff line number Diff line change
Expand Up @@ -273,7 +273,7 @@ func GetLVMVolumeGroupFreeSpace(lvg v1alpha1.LvmVolumeGroup) (vgFreeSpace resour
}

func GetLVMThinPoolFreeSpace(lvg v1alpha1.LvmVolumeGroup, thinPoolName string) (thinPoolFreeSpace resource.Quantity, err error) {
var storagePoolThinPool *v1alpha1.StatusThinPool
var storagePoolThinPool *v1alpha1.LvmVolumeGroupThinPoolStatus
for _, thinPool := range lvg.Status.ThinPools {
if thinPool.Name == thinPoolName {
storagePoolThinPool = &thinPool
Expand Down
37 changes: 17 additions & 20 deletions images/sds-local-volume-scheduler-extender/pkg/cache/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,8 +74,8 @@ func (c *Cache) AddLVG(lvg *v1alpha1.LvmVolumeGroup) {

// UpdateLVG updated selected LVMVolumeGroup resource in the cache. If such LVMVolumeGroup is not stored, returns an error.
func (c *Cache) UpdateLVG(lvg *v1alpha1.LvmVolumeGroup) error {
if cache, found := c.lvgs.Load(lvg.Name); found {
cache.(*lvgCache).lvg = lvg
if lvgCh, found := c.lvgs.Load(lvg.Name); found {
lvgCh.(*lvgCache).lvg = lvg

c.log.Trace(fmt.Sprintf("[UpdateLVG] the LVMVolumeGroup %s nodes: %v", lvg.Name, lvg.Status.Nodes))
for _, node := range lvg.Status.Nodes {
Expand All @@ -96,7 +96,7 @@ func (c *Cache) UpdateLVG(lvg *v1alpha1.LvmVolumeGroup) error {
return nil
}

return fmt.Errorf("the LVMVolumeGroup %s was not found in the cache", lvg.Name)
return fmt.Errorf("the LVMVolumeGroup %s was not found in the lvgCh", lvg.Name)
}

// TryGetLVG returns selected LVMVolumeGroup resource if it is stored in the cache, otherwise returns nil.
Expand Down Expand Up @@ -279,12 +279,14 @@ func (c *Cache) shouldAddPVC(pvc *v1.PersistentVolumeClaim, lvgCh *lvgCache, pvc

c.log.Debug(fmt.Sprintf("[shouldAddPVC] LVMVolumeGroup %s belongs to PVC %s/%s selected node %s", lvgName, pvc.Namespace, pvc.Name, pvc.Annotations[SelectedNodeAnnotation]))

// if pvc is thick
_, found = lvgCh.thickPVCs.Load(pvcKey)
if found {
c.log.Debug(fmt.Sprintf("[shouldAddPVC] PVC %s was found in the cache of the LVMVolumeGroup %s", pvcKey, lvgName))
return false, nil
}

// if pvc is thin
if thinPoolName != "" {
thinPoolCh, found := lvgCh.thinPools.Load(thinPoolName)
if !found {
Expand Down Expand Up @@ -324,7 +326,7 @@ func (c *Cache) AddThinPVC(lvgName, thinPoolName string, pvc *v1.PersistentVolum
return err
}

// this case might be triggered if the extender recovers after fail and finds some pending thickPVCs with selected nodes
// this case might be triggered if the extender recovers after fail and finds some pending thin PVCs with selected nodes
c.log.Trace(fmt.Sprintf("[AddThinPVC] PVC %s/%s annotations: %v", pvc.Namespace, pvc.Name, pvc.Annotations))
shouldAdd, err := c.shouldAddPVC(pvc, lvgCh.(*lvgCache), pvcKey, lvgName, thinPoolName)
if err != nil {
Expand Down Expand Up @@ -366,13 +368,7 @@ func (c *Cache) addNewThickPVC(lvgCh *lvgCache, pvc *v1.PersistentVolumeClaim) {
func (c *Cache) addNewThinPVC(lvgCh *lvgCache, pvc *v1.PersistentVolumeClaim, thinPoolName string) error {
pvcKey := configurePVCKey(pvc)

if thinPoolName == "" {
err := errors.New("no thin pool specified")
c.log.Error(err, fmt.Sprintf("[addNewThinPVC] unable to add Thin PVC %s to the cache", pvcKey))
return err
}

err := c.addNewThinPool(lvgCh, pvc, thinPoolName)
err := c.addThinPoolIfNotExists(lvgCh, thinPoolName)
if err != nil {
c.log.Error(err, fmt.Sprintf("[addNewThinPVC] unable to add Thin pool %s in the LVMVolumeGroup %s cache for PVC %s", thinPoolName, lvgCh.lvg.Name, pvc.Name))
return err
Expand Down Expand Up @@ -431,7 +427,7 @@ func (c *Cache) UpdateThickPVC(lvgName string, pvc *v1.PersistentVolumeClaim) er
return nil
}

func (c *Cache) UpdateThinPVC(lvgName string, pvc *v1.PersistentVolumeClaim, thinPoolName string) error {
func (c *Cache) UpdateThinPVC(lvgName, thinPoolName string, pvc *v1.PersistentVolumeClaim) error {
pvcKey := configurePVCKey(pvc)

lvgCh, found := c.lvgs.Load(lvgName)
Expand All @@ -442,7 +438,7 @@ func (c *Cache) UpdateThinPVC(lvgName string, pvc *v1.PersistentVolumeClaim, thi
thinPoolCh, found := lvgCh.(*lvgCache).thinPools.Load(thinPoolName)
if !found {
c.log.Debug(fmt.Sprintf("[UpdateThinPVC] Thin Pool %s was not found in the LVMVolumeGroup %s, add it.", thinPoolName, lvgName))
err := c.addNewThinPool(lvgCh.(*lvgCache), pvc, thinPoolName)
err := c.addThinPoolIfNotExists(lvgCh.(*lvgCache), thinPoolName)
if err != nil {
return err
}
Expand All @@ -467,20 +463,17 @@ func (c *Cache) UpdateThinPVC(lvgName string, pvc *v1.PersistentVolumeClaim, thi
return nil
}

func (c *Cache) addNewThinPool(lvgCh *lvgCache, pvc *v1.PersistentVolumeClaim, thinPoolName string) error {
pvcKey := configurePVCKey(pvc)

func (c *Cache) addThinPoolIfNotExists(lvgCh *lvgCache, thinPoolName string) error {
if len(thinPoolName) == 0 {
err := errors.New("no thin pool name specified")
c.log.Error(err, fmt.Sprintf("[addNewThinPool] unable to add thin pool for PVC %s in the LVMVolumeGroup %s", pvc.Name, lvgCh.lvg.Name))
c.log.Error(err, fmt.Sprintf("[addThinPoolIfNotExists] unable to add thin pool in the LVMVolumeGroup %s", lvgCh.lvg.Name))
return err
}

_, found := lvgCh.thinPools.Load(thinPoolName)
if found {
err := fmt.Errorf("thin pool %s is already created", thinPoolName)
c.log.Error(err, fmt.Sprintf("[addNewThinPool] unable to add new Thin pool %s to the LVMVolumeGroup %s for PVC %s", thinPoolName, lvgCh.lvg.Name, pvcKey))
return err
c.log.Debug(fmt.Sprintf("[addThinPoolIfNotExists] Thin pool %s is already created in the LVMVolumeGroup %s. No need to add a new one", thinPoolName, lvgCh.lvg.Name))
return nil
}

lvgCh.thinPools.Store(thinPoolName, &thinPoolCache{})
Expand All @@ -496,6 +489,7 @@ func (c *Cache) GetAllPVCForLVG(lvgName string) ([]*v1.PersistentVolumeClaim, er
return nil, err
}

// TODO: fix this to struct size field after refactoring
size := 0
lvgCh.(*lvgCache).thickPVCs.Range(func(key, value any) bool {
size++
Expand All @@ -510,11 +504,13 @@ func (c *Cache) GetAllPVCForLVG(lvgName string) ([]*v1.PersistentVolumeClaim, er
})

result := make([]*v1.PersistentVolumeClaim, 0, size)
// collect Thick PVC for the LVG
lvgCh.(*lvgCache).thickPVCs.Range(func(pvcName, pvcCh any) bool {
result = append(result, pvcCh.(*pvcCache).pvc)
return true
})

// collect Thin PVC for the LVG
lvgCh.(*lvgCache).thinPools.Range(func(tpName, tpCh any) bool {
tpCh.(*thinPoolCache).pvcs.Range(func(pvcName, pvcCh any) bool {
result = append(result, pvcCh.(*pvcCache).pvc)
Expand Down Expand Up @@ -637,6 +633,7 @@ func (c *Cache) CheckIsPVCStored(pvc *v1.PersistentVolumeClaim) bool {
// RemoveSpaceReservationForPVCWithSelectedNode removes space reservation for selected PVC for every LVMVolumeGroup resource, which is not bound to the PVC selected node.
func (c *Cache) RemoveSpaceReservationForPVCWithSelectedNode(pvc *v1.PersistentVolumeClaim, deviceType string) error {
pvcKey := configurePVCKey(pvc)
// the LVG which is used to store PVC
selectedLVGName := ""

lvgNamesForPVC, found := c.pvcLVGs.Load(pvcKey)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -562,11 +562,19 @@ func BenchmarkCache_FullLoad(b *testing.B) {
},
}

for _, pvc := range pvcs {
for d, pvc := range pvcs {
for err != nil {
err = cache.UpdateThickPVC(lvg.Name, pvc)
}

for err != nil {
err = cache.AddThinPVC(lvg.Name, fmt.Sprintf("test-thin-%d", d), pvc)
}

for err != nil {
err = cache.UpdateThinPVC(lvg.Name, fmt.Sprintf("test-thin-%d", d), pvc)
}

cache.GetLVGNamesForPVC(pvc)
}
}
Expand All @@ -581,6 +589,10 @@ func BenchmarkCache_FullLoad(b *testing.B) {
if err != nil {
b.Error(err)
}
_, err = cache.GetLVGThinReservedSpace(lvgName, "test-thin")
if err != nil {
b.Error(err)
}
}

cache.GetLVGNamesByNodeName(nodeName)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -118,12 +118,6 @@ func RunPVCWatcherCacheController(
}

func reconcilePVC(ctx context.Context, mgr manager.Manager, log logger.Logger, schedulerCache *cache.Cache, pvc *v1.PersistentVolumeClaim, selectedNodeName string) {
log.Debug(fmt.Sprintf("[reconcilePVC] starts to find common LVMVolumeGroup for the selected node %s and PVC %s/%s", selectedNodeName, pvc.Namespace, pvc.Name))
lvgsOnTheNode := schedulerCache.GetLVGNamesByNodeName(selectedNodeName)
for _, lvgName := range lvgsOnTheNode {
log.Trace(fmt.Sprintf("[reconcilePVC] LVMVolumeGroup %s belongs to the node %s", lvgName, selectedNodeName))
}

sc := &v12.StorageClass{}
err := mgr.GetClient().Get(ctx, client.ObjectKey{
Name: *pvc.Spec.StorageClassName,
Expand All @@ -138,10 +132,12 @@ func reconcilePVC(ctx context.Context, mgr manager.Manager, log logger.Logger, s
return
}

log.Debug(fmt.Sprintf("[reconcilePVC] tries to extract LVGs from the Storage Class %s for PVC %s/%s", sc.Name, pvc.Namespace, pvc.Name))
lvgsFromSc, err := scheduler.ExtractLVGsFromSC(sc)
if err != nil {
log.Error(err, fmt.Sprintf("[reconcilePVC] unable to extract LVMVolumeGroups from the Storage Class %s", sc.Name))
}
log.Debug(fmt.Sprintf("[reconcilePVC] successfully extracted LVGs from the Storage Class %s for PVC %s/%s", sc.Name, pvc.Namespace, pvc.Name))

lvgsForPVC := schedulerCache.GetLVGNamesForPVC(pvc)
if lvgsForPVC == nil || len(lvgsForPVC) == 0 {
Expand All @@ -155,6 +151,12 @@ func reconcilePVC(ctx context.Context, mgr manager.Manager, log logger.Logger, s
log.Trace(fmt.Sprintf("[reconcilePVC] LVMVolumeGroup %s belongs to PVC %s/%s", lvgName, pvc.Namespace, pvc.Name))
}

log.Debug(fmt.Sprintf("[reconcilePVC] starts to find common LVMVolumeGroup for the selected node %s and PVC %s/%s", selectedNodeName, pvc.Namespace, pvc.Name))
lvgsOnTheNode := schedulerCache.GetLVGNamesByNodeName(selectedNodeName)
for _, lvgName := range lvgsOnTheNode {
log.Trace(fmt.Sprintf("[reconcilePVC] LVMVolumeGroup %s belongs to the node %s", lvgName, selectedNodeName))
}

var commonLVGName string
for _, pvcLvg := range lvgsForPVC {
if slices.Contains(lvgsOnTheNode, pvcLvg) {
Expand All @@ -181,7 +183,7 @@ func reconcilePVC(ctx context.Context, mgr manager.Manager, log logger.Logger, s
case consts.Thin:
for _, lvg := range lvgsFromSc {
if lvg.Name == commonLVGName {
err = schedulerCache.UpdateThinPVC(commonLVGName, pvc, lvg.Thin.PoolName)
err = schedulerCache.UpdateThinPVC(commonLVGName, lvg.Thin.PoolName, pvc)
if err != nil {
log.Error(err, fmt.Sprintf("[reconcilePVC] unable to update Thin PVC %s/%s in the cache", pvc.Namespace, pvc.Name))
return
Expand Down
Loading

0 comments on commit a019630

Please sign in to comment.