Skip to content

Commit

Permalink
[controller] Change LVMVolumeGroup size field type from string to qua…
Browse files Browse the repository at this point in the history
…ntity

Signed-off-by: Viktor Kramarenko <viktor.kramarenko@flant.com>
  • Loading branch information
ViktorKram committed Apr 22, 2024
1 parent 0c3955b commit c9aac9c
Show file tree
Hide file tree
Showing 8 changed files with 62 additions and 108 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ type LvmVolumeGroupSpec struct {
type LvmVolumeGroupDevice struct {
BlockDevice string `json:"blockDevice"`
DevSize resource.Quantity `json:"devSize"`
PVSize string `json:"pvSize"`
PVSize resource.Quantity `json:"pvSize"`
PVUuid string `json:"pvUUID"`
Path string `json:"path"`
}
Expand All @@ -61,15 +61,15 @@ type LvmVolumeGroupNode struct {
type StatusThinPool struct {
Name string `json:"name"`
ActualSize resource.Quantity `json:"actualSize"`
UsedSize string `json:"usedSize"`
UsedSize resource.Quantity `json:"usedSize"`
}

type LvmVolumeGroupStatus struct {
AllocatedSize string `json:"allocatedSize"`
AllocatedSize resource.Quantity `json:"allocatedSize"`
Health string `json:"health"`
Message string `json:"message"`
Nodes []LvmVolumeGroupNode `json:"nodes"`
ThinPools []StatusThinPool `json:"thinPools"`
VGSize string `json:"vgSize"`
VGSize resource.Quantity `json:"vgSize"`
VGUuid string `json:"vgUUID"`
}
8 changes: 4 additions & 4 deletions images/sds-local-volume-csi/api/v1alpha1/lvm_volume_group.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ type LvmVolumeGroupSpec struct {
type LvmVolumeGroupDevice struct {
BlockDevice string `json:"blockDevice"`
DevSize resource.Quantity `json:"devSize"`
PVSize string `json:"pvSize"`
PVSize resource.Quantity `json:"pvSize"`
PVUuid string `json:"pvUUID"`
Path string `json:"path"`
}
Expand All @@ -64,15 +64,15 @@ type LvmVolumeGroupNode struct {
type StatusThinPool struct {
Name string `json:"name"`
ActualSize resource.Quantity `json:"actualSize"`
UsedSize string `json:"usedSize"`
UsedSize resource.Quantity `json:"usedSize"`
}

type LvmVolumeGroupStatus struct {
AllocatedSize string `json:"allocatedSize"`
AllocatedSize resource.Quantity `json:"allocatedSize"`
Health string `json:"health"`
Message string `json:"message"`
Nodes []LvmVolumeGroupNode `json:"nodes"`
ThinPools []StatusThinPool `json:"thinPools"`
VGSize string `json:"vgSize"`
VGSize resource.Quantity `json:"vgSize"`
VGUuid string `json:"vgUUID"`
}
17 changes: 7 additions & 10 deletions images/sds-local-volume-csi/driver/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,12 +66,12 @@ func (d *Driver) CreateVolume(ctx context.Context, request *csi.CreateVolumeRequ
d.log.Error(err, "error GetStorageClassLVGs")
return nil, status.Errorf(codes.Internal, err.Error())
}
// TODO: Consider refactoring the naming strategy for llvName and lvName.
// Currently, we use the same name for llvName (the name of the LVMLogicalVolume resource in Kubernetes)
// and lvName (the name of the LV in LVM on the node) because the PV name is unique within the cluster,
// preventing name collisions. This approach simplifies matching between nodes and Kubernetes by maintaining
// the same name in both contexts. Future consideration should be given to optimizing this logic to enhance
// code readability and maintainability.
// TODO: Consider refactoring the naming strategy for llvName and lvName.
// Currently, we use the same name for llvName (the name of the LVMLogicalVolume resource in Kubernetes)
// and lvName (the name of the LV in LVM on the node) because the PV name is unique within the cluster,
// preventing name collisions. This approach simplifies matching between nodes and Kubernetes by maintaining
// the same name in both contexts. Future consideration should be given to optimizing this logic to enhance
// code readability and maintainability.
llvName := request.Name
lvName := request.Name
d.log.Info(fmt.Sprintf("llv name: %s ", llvName))
Expand Down Expand Up @@ -302,10 +302,7 @@ func (d *Driver) ControllerExpandVolume(ctx context.Context, request *csi.Contro
}

if llv.Spec.Type == internal.LLMTypeThick {
lvgFreeSpace, err := utils.GetLVMVolumeGroupFreeSpace(*lvg)
if err != nil {
return nil, status.Errorf(codes.Internal, "error getting LVMVolumeGroupCapacity: %v", err)
}
lvgFreeSpace := utils.GetLVMVolumeGroupFreeSpace(*lvg)

if lvgFreeSpace.Value() < (requestCapacity.Value() - llv.Status.ActualSize.Value()) {
return nil, status.Errorf(codes.Internal, "requested size: %s is greater than the capacity of the LVMVolumeGroup: %s", requestCapacity.String(), lvgFreeSpace.String())
Expand Down
30 changes: 6 additions & 24 deletions images/sds-local-volume-csi/pkg/utils/func.go
Original file line number Diff line number Diff line change
Expand Up @@ -178,10 +178,7 @@ func GetNodeWithMaxFreeSpace(log *logger.Logger, lvgs []v1alpha1.LvmVolumeGroup,

switch lvmType {
case internal.LLMTypeThick:
freeSpace, err = GetLVMVolumeGroupFreeSpace(lvg)
if err != nil {
return "", freeSpace, fmt.Errorf("get free space for lvg %+v: %w", lvg, err)
}
freeSpace = GetLVMVolumeGroupFreeSpace(lvg)
case internal.LLMTypeThin:
thinPoolName, ok := storageClassLVGParametersMap[lvg.Name]
if !ok {
Expand Down Expand Up @@ -269,20 +266,10 @@ func GetLVMVolumeGroup(ctx context.Context, kc client.Client, lvgName, namespace
return nil, fmt.Errorf("after %d attempts of getting LvmVolumeGroup %s in namespace %s, last error: %w", KubernetesApiRequestLimit, lvgName, namespace, err)
}

func GetLVMVolumeGroupFreeSpace(lvg v1alpha1.LvmVolumeGroup) (vgFreeSpace resource.Quantity, err error) {
vgSize, err := resource.ParseQuantity(lvg.Status.VGSize)
if err != nil {
return vgFreeSpace, fmt.Errorf("parse size vgSize (%s): %w", lvg.Status.VGSize, err)
}

allocatedSize, err := resource.ParseQuantity(lvg.Status.AllocatedSize)
if err != nil {
return vgFreeSpace, fmt.Errorf("parse size vgSize (%s): %w", lvg.Status.AllocatedSize, err)
}

vgFreeSpace = vgSize
vgFreeSpace.Sub(allocatedSize)
return vgFreeSpace, nil
func GetLVMVolumeGroupFreeSpace(lvg v1alpha1.LvmVolumeGroup) (vgFreeSpace resource.Quantity) {
vgFreeSpace = lvg.Status.VGSize
vgFreeSpace.Sub(lvg.Status.AllocatedSize)
return vgFreeSpace
}

func GetLVMThinPoolFreeSpace(lvg v1alpha1.LvmVolumeGroup, thinPoolName string) (thinPoolFreeSpace resource.Quantity, err error) {
Expand All @@ -297,15 +284,10 @@ func GetLVMThinPoolFreeSpace(lvg v1alpha1.LvmVolumeGroup, thinPoolName string) (
return thinPoolFreeSpace, fmt.Errorf("[GetLVMThinPoolFreeSpace] thin pool %s not found in lvg %+v", thinPoolName, lvg)
}

thinPoolUsedSize, err := resource.ParseQuantity(storagePoolThinPool.UsedSize)
if err != nil {
return thinPoolFreeSpace, fmt.Errorf("[GetLVMThinPoolFreeSpace] parse size thinPool.UsedSize (%s): %w", storagePoolThinPool.UsedSize, err)
}

thinPoolActualSize := storagePoolThinPool.ActualSize

thinPoolFreeSpace = thinPoolActualSize.DeepCopy()
thinPoolFreeSpace.Sub(thinPoolUsedSize)
thinPoolFreeSpace.Sub(storagePoolThinPool.UsedSize)
return thinPoolFreeSpace, nil
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ type LvmVolumeGroupSpec struct {
type LvmVolumeGroupDevice struct {
BlockDevice string `json:"blockDevice"`
DevSize resource.Quantity `json:"devSize"`
PVSize string `json:"pvSize"`
PVSize resource.Quantity `json:"pvSize"`
PVUuid string `json:"pvUUID"`
Path string `json:"path"`
}
Expand All @@ -61,15 +61,15 @@ type LvmVolumeGroupNode struct {
type StatusThinPool struct {
Name string `json:"name"`
ActualSize resource.Quantity `json:"actualSize"`
UsedSize string `json:"usedSize"`
UsedSize resource.Quantity `json:"usedSize"`
}

type LvmVolumeGroupStatus struct {
AllocatedSize string `json:"allocatedSize"`
AllocatedSize resource.Quantity `json:"allocatedSize"`
Health string `json:"health"`
Message string `json:"message"`
Nodes []LvmVolumeGroupNode `json:"nodes"`
ThinPools []StatusThinPool `json:"thinPools"`
VGSize string `json:"vgSize"`
VGSize resource.Quantity `json:"vgSize"`
VGUuid string `json:"vgUUID"`
}
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ import (
"errors"
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/client-go/util/workqueue"
"sds-local-volume-scheduler-extender/api/v1alpha1"
"sds-local-volume-scheduler-extender/pkg/cache"
Expand Down Expand Up @@ -107,23 +106,31 @@ func RunLVGWatcherCacheController(
}

log.Debug(fmt.Sprintf("[RunLVGWatcherCacheController] starts to calculate the size difference for LVMVolumeGroup %s", newLvg.Name))
oldSize, err := resource.ParseQuantity(oldLvg.Status.AllocatedSize)
if err != nil {
log.Error(err, fmt.Sprintf("[RunLVGWatcherCacheController] unable to parse the allocated size for the LVMVolumeGroup %s", oldLvg.Name))
return
}
log.Trace(fmt.Sprintf("[RunLVGWatcherCacheController] old state LVMVolumeGroup %s has size %s", oldLvg.Name, oldSize.String()))

newSize, err := resource.ParseQuantity(newLvg.Status.AllocatedSize)
if err != nil {
log.Error(err, fmt.Sprintf("[RunLVGWatcherCacheController] unable to parse the allocated size for the LVMVolumeGroup %s", oldLvg.Name))
return
}
log.Trace(fmt.Sprintf("[RunLVGWatcherCacheController] new state LVMVolumeGroup %s has size %s", newLvg.Name, newSize.String()))
log.Trace(fmt.Sprintf("[RunLVGWatcherCacheController] old state LVMVolumeGroup %s has size %s", oldLvg.Name, oldLvg.Status.AllocatedSize))

//var oldSize resource.Quantity
//if oldLvg.Status.AllocatedSize != "" {
// oldSize, err = resource.ParseQuantity(oldLvg.Status.AllocatedSize)
// if err != nil {
// log.Error(err, fmt.Sprintf("[RunLVGWatcherCacheController] unable to parse the allocated size for the LVMVolumeGroup %s", oldLvg.Name))
// return
// }
//}
//
//log.Trace(fmt.Sprintf("[RunLVGWatcherCacheController] new state LVMVolumeGroup %s has size %s", newLvg.Name, newLvg.Status.AllocatedSize))
//if newLvg.Status.AllocatedSize == "" {
// log.Warning(fmt.Sprintf("LVMVolumeGroup %s new state has uninitialized allocated size field. Reconciliation will be skipped", newLvg.Name))
// return
//}
////newSize, err := resource.ParseQuantity(newLvg.Status.AllocatedSize)
//if err != nil {
// log.Error(err, fmt.Sprintf("[RunLVGWatcherCacheController] unable to parse the allocated size for the LVMVolumeGroup %s", oldLvg.Name))
// return
//}
log.Debug(fmt.Sprintf("[RunLVGWatcherCacheController] successfully calculated the size difference for LVMVolumeGroup %s", newLvg.Name))

if newLvg.DeletionTimestamp != nil ||
oldSize.Value() == newSize.Value() {
oldLvg.Status.AllocatedSize.Value() == newLvg.Status.AllocatedSize.Value() {
log.Debug(fmt.Sprintf("[RunLVGWatcherCacheController] the LVMVolumeGroup %s should not be reconciled", newLvg.Name))
return
}
Expand Down
41 changes: 11 additions & 30 deletions images/sds-local-volume-scheduler-extender/pkg/scheduler/filter.go
Original file line number Diff line number Diff line change
Expand Up @@ -292,10 +292,7 @@ func filterNodes(
log.Trace(fmt.Sprintf("[filterNodes] the LVMVolumeGroup %s is actually used. VG size: %s, allocatedSize: %s", lvg.Name, lvg.Status.VGSize, lvg.Status.AllocatedSize))
}

lvgsThickFree, err := getLVGThickFreeSpaces(log, usedLVGs)
if err != nil {
return nil, err
}
lvgsThickFree := getLVGThickFreeSpaces(log, usedLVGs)
log.Trace(fmt.Sprintf("[filterNodes] for a Pod %s/%s current LVMVolumeGroups Thick FreeSpace on the node: %+v", pod.Namespace, pod.Name, lvgsThickFree))

for lvgName, freeSpace := range lvgsThickFree {
Expand Down Expand Up @@ -440,21 +437,18 @@ func filterNodes(
return result, nil
}

func getLVGThickFreeSpaces(log logger.Logger, lvgs map[string]*v1alpha1.LvmVolumeGroup) (map[string]int64, error) {
func getLVGThickFreeSpaces(log logger.Logger, lvgs map[string]*v1alpha1.LvmVolumeGroup) map[string]int64 {
result := make(map[string]int64, len(lvgs))

for _, lvg := range lvgs {
log.Debug(fmt.Sprintf("[getLVGThickFreeSpaces] tries to count free VG space for LVMVolumeGroup %s", lvg.Name))
free, err := getVGFreeSpace(lvg)
if err != nil {
return nil, err
}
free := getVGFreeSpace(lvg)
log.Debug(fmt.Sprintf("[getLVGThickFreeSpaces] successfully counted free VG space for LVMVolumeGroup %s", lvg.Name))

result[lvg.Name] = free.Value()
}

return result, nil
return result
}

func findMatchedThinPool(thinPools []v1alpha1.StatusThinPool, name string) *v1alpha1.StatusThinPool {
Expand Down Expand Up @@ -584,30 +578,17 @@ func SortLVGsByNodeName(lvgs map[string]*v1alpha1.LvmVolumeGroup) map[string][]*
return sorted
}

func getVGFreeSpace(lvg *v1alpha1.LvmVolumeGroup) (resource.Quantity, error) {
free, err := resource.ParseQuantity(lvg.Status.VGSize)
if err != nil {
return resource.Quantity{}, fmt.Errorf("unable to parse Status.VGSize quantity for LVMVolumeGroup %s, err: %w", lvg.Name, err)
}

used, err := resource.ParseQuantity(lvg.Status.AllocatedSize)
if err != nil {
return resource.Quantity{}, fmt.Errorf("unable to parse Status.AllocatedSize quantity for LVMVolumeGroup %s, err: %w", lvg.Name, err)
}

free.Sub(used)
return free, nil
func getVGFreeSpace(lvg *v1alpha1.LvmVolumeGroup) resource.Quantity {
free := lvg.Status.VGSize
free.Sub(lvg.Status.AllocatedSize)
return free
}

func getThinPoolFreeSpace(tp *v1alpha1.StatusThinPool) (resource.Quantity, error) {
func getThinPoolFreeSpace(tp *v1alpha1.StatusThinPool) resource.Quantity {
free := tp.ActualSize
used, err := resource.ParseQuantity(tp.UsedSize)
if err != nil {
return resource.Quantity{}, err
}
free.Sub(used)
free.Sub(tp.UsedSize)

return free, nil
return free
}

func getPersistentVolumes(ctx context.Context, cl client.Client) (map[string]corev1.PersistentVolume, error) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -157,11 +157,7 @@ func scoreNodes(
lvg := lvgs[commonLVG.Name]
switch pvcReq.DeviceType {
case thick:
freeSpace, err = getVGFreeSpace(lvg)
if err != nil {
errs <- err
return
}
freeSpace = getVGFreeSpace(lvg)
log.Trace(fmt.Sprintf("[scoreNodes] LVMVolumeGroup %s free thick space before PVC reservation: %s", lvg.Name, freeSpace.String()))
reserved, err := schedulerCache.GetLVGReservedSpace(lvg.Name)
if err != nil {
Expand All @@ -181,20 +177,11 @@ func scoreNodes(
return
}

freeSpace, err = getThinPoolFreeSpace(thinPool)
if err != nil {
errs <- err
return
}
freeSpace = getThinPoolFreeSpace(thinPool)
}

lvgTotalSize, err := resource.ParseQuantity(lvg.Status.VGSize)
if err != nil {
errs <- err
return
}
log.Trace(fmt.Sprintf("[scoreNodes] LVMVolumeGroup %s total size: %s", lvg.Name, lvgTotalSize.String()))
totalFreeSpaceLeft += getFreeSpaceLeftPercent(freeSpace.Value(), pvcReq.RequestedSize, lvgTotalSize.Value())
log.Trace(fmt.Sprintf("[scoreNodes] LVMVolumeGroup %s total size: %s", lvg.Name, lvg.Status.VGSize.String()))
totalFreeSpaceLeft += getFreeSpaceLeftPercent(freeSpace.Value(), pvcReq.RequestedSize, lvg.Status.VGSize.Value())
}

averageFreeSpace := totalFreeSpaceLeft / int64(len(pvcs))
Expand Down

0 comments on commit c9aac9c

Please sign in to comment.