Skip to content

Commit

Permalink
[controller][csi] Fix StorageClass annotation preservation and improv…
Browse files Browse the repository at this point in the history
…e NodePublishVolume mount check (#43)

Signed-off-by: Aleksandr Zimin <alexandr.zimin@flant.com>
  • Loading branch information
AleksZimin authored Jun 24, 2024
1 parent 3b9c68a commit cef61f0
Show file tree
Hide file tree
Showing 5 changed files with 73 additions and 28 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,9 @@ const (
LocalStorageClassFinalizerName = "storage.deckhouse.io/local-storage-class-controller"
LocalStorageClassFinalizerNameOld = "localstorageclass.storage.deckhouse.io"

StorageClassDefaultAnnotationKey = "storageclass.kubernetes.io/is-default-class"
StorageClassDefaultAnnotationValTrue = "true"

AllowVolumeExpansionDefaultValue = true

FailedStatusPhase = "Failed"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ func reconcileLSCUpdateFunc(

if hasDiff {
log.Info(fmt.Sprintf("[reconcileLSCUpdateFunc] current Storage Class LVMVolumeGroups do not match LocalStorageClass ones. The Storage Class %s will be recreated with new ones", lsc.Name))
newSC, err := configureStorageClass(lsc)
newSC, err := updateStorageClass(lsc, oldSC)
if err != nil {
log.Error(err, fmt.Sprintf("[reconcileLSCUpdateFunc] unable to configure a Storage Class for the LocalStorageClass %s", lsc.Name))
upError := updateLocalStorageClassPhase(ctx, cl, lsc, FailedStatusPhase, err.Error())
Expand Down Expand Up @@ -703,3 +703,16 @@ func removeFinalizerIfExists(ctx context.Context, cl client.Client, obj metav1.O

return removed, nil
}

func updateStorageClass(lsc *v1alpha1.LocalStorageClass, oldSC *v1.StorageClass) (*v1.StorageClass, error) {
newSC, err := configureStorageClass(lsc)
if err != nil {
return nil, err
}

if oldSC.Annotations != nil {
newSC.Annotations = oldSC.Annotations
}

return newSC, nil
}
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,23 @@ var _ = Describe(controller.LocalStorageClassCtrlName, func() {
performStandartChecksForSC(sc, lvgSpec, nameForLocalStorageClass, controller.LocalStorageClassLvmType, controller.LVMThickType, reclaimPolicyDelete, volumeBindingModeWFFC)
})

It("Annotate_sc_as_default_sc", func() {
sc := &v1.StorageClass{}
err := cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, sc)
Expect(err).NotTo(HaveOccurred())
Expect(sc.Annotations).To(BeNil())

sc.Annotations = map[string]string{
controller.StorageClassDefaultAnnotationKey: controller.StorageClassDefaultAnnotationValTrue,
}

err = cl.Update(ctx, sc)
Expect(err).NotTo(HaveOccurred())
Expect(sc.Annotations).To(HaveLen(1))
Expect(sc.Annotations).To(HaveKeyWithValue(controller.StorageClassDefaultAnnotationKey, controller.StorageClassDefaultAnnotationValTrue))

})

It("Update_local_sc_add_existing_lvg", func() {
lvgSpec := []v1alpha1.LocalStorageClassLVG{
{Name: existingThickLVG1Name},
Expand Down Expand Up @@ -155,6 +172,15 @@ var _ = Describe(controller.LocalStorageClassCtrlName, func() {
performStandartChecksForSC(sc, lvgSpec, nameForLocalStorageClass, controller.LocalStorageClassLvmType, controller.LVMThickType, reclaimPolicyDelete, volumeBindingModeWFFC)
})

It("Check_anotated_sc_after_lsc_update", func() {
sc := &v1.StorageClass{}
err := cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, sc)
Expect(err).NotTo(HaveOccurred())
Expect(sc.Annotations).To(HaveLen(1))
Expect(sc.Annotations).To(HaveKeyWithValue(controller.StorageClassDefaultAnnotationKey, controller.StorageClassDefaultAnnotationValTrue))

})

It("Update_local_sc_remove_existing_lvg", func() {
lvgSpec := []v1alpha1.LocalStorageClassLVG{
{Name: existingThickLVG1Name},
Expand Down
35 changes: 19 additions & 16 deletions images/sds-local-volume-csi/driver/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -207,8 +207,24 @@ func (d *Driver) NodePublishVolume(ctx context.Context, request *csi.NodePublish
mountOptions = append(mountOptions, "ro")
}

vgName, ok := request.GetVolumeContext()[internal.VGNameKey]
if !ok {
return nil, status.Error(codes.InvalidArgument, "[NodePublishVolume] Volume group name cannot be empty")
}

devPath := fmt.Sprintf("/dev/%s/%s", vgName, request.VolumeId)
d.log.Debug(fmt.Sprintf("[NodePublishVolume] Checking if device exists: %s", devPath))
exists, err := d.storeManager.PathExists(devPath)
if err != nil {
return nil, status.Errorf(codes.Internal, "[NodePublishVolume] Error checking if device exists: %v", err)
}
if !exists {
return nil, status.Errorf(codes.NotFound, "[NodePublishVolume] Device %q not found", devPath)
}

d.log.Debug(fmt.Sprintf("[NodePublishVolume] Volume %s operation started", volumeID))
ok := d.inFlight.Insert(volumeID)

ok = d.inFlight.Insert(volumeID)
if !ok {
return nil, status.Errorf(codes.Aborted, VolumeOperationAlreadyExists, volumeID)
}
Expand All @@ -220,21 +236,8 @@ func (d *Driver) NodePublishVolume(ctx context.Context, request *csi.NodePublish
switch volCap.GetAccessType().(type) {
case *csi.VolumeCapability_Block:
d.log.Trace("[NodePublishVolume] Block volume detected.")
vgName, ok := request.GetVolumeContext()[internal.VGNameKey]
if !ok {
return nil, status.Error(codes.InvalidArgument, "[NodeStageVolume] Volume group name cannot be empty")
}

devPath := fmt.Sprintf("/dev/%s/%s", vgName, request.VolumeId)
d.log.Debug(fmt.Sprintf("[NodePublishVolume] Checking if device exists: %s", devPath))
exists, err := d.storeManager.PathExists(devPath)
if err != nil {
return nil, status.Errorf(codes.Internal, "[NodePublishVolume] Error checking if device exists: %v", err)
}
if !exists {
return nil, status.Errorf(codes.NotFound, "[NodePublishVolume] Device %q not found", devPath)
}
err = d.storeManager.NodePublishVolumeBlock(devPath, target, mountOptions)
err := d.storeManager.NodePublishVolumeBlock(devPath, target, mountOptions)
if err != nil {
return nil, status.Errorf(codes.Internal, "[NodePublishVolume] Error mounting volume %q at %q: %v", devPath, target, err)
}
Expand All @@ -257,7 +260,7 @@ func (d *Driver) NodePublishVolume(ctx context.Context, request *csi.NodePublish

mountOptions = collectMountOptions(fsType, mountVolume.GetMountFlags(), mountOptions)

err := d.storeManager.NodePublishVolumeFS(source, target, fsType, mountOptions)
err := d.storeManager.NodePublishVolumeFS(source, devPath, target, fsType, mountOptions)
if err != nil {
return nil, status.Errorf(codes.Internal, "[NodePublishVolume] Error bind mounting volume %q. Source: %q. Target: %q. Mount options:%v. Err: %v", volumeID, source, target, mountOptions, err)
}
Expand Down
22 changes: 11 additions & 11 deletions images/sds-local-volume-csi/pkg/utils/node-store-manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ import (
type NodeStoreManager interface {
NodeStageVolumeFS(source, target string, fsType string, mountOpts []string, lvmType, lvmThinPoolName string) error
NodePublishVolumeBlock(source, target string, mountOpts []string) error
NodePublishVolumeFS(source, target, fsType string, mountOpts []string) error
NodePublishVolumeFS(source, devPath, target, fsType string, mountOpts []string) error
Unstage(target string) error
Unpublish(target string) error
IsNotMountPoint(target string) (bool, error)
Expand Down Expand Up @@ -173,7 +173,7 @@ func (s *Store) NodePublishVolumeBlock(source, target string, mountOpts []string
return nil
}

func (s *Store) NodePublishVolumeFS(source, target, fsType string, mountOpts []string) error {
func (s *Store) NodePublishVolumeFS(source, devPath, target, fsType string, mountOpts []string) error {
s.Log.Info(" ----== Start NodePublishVolumeFS ==---- ")
s.Log.Trace(fmt.Sprintf("[NodePublishVolumeFS] params source=%q target=%q mountOptions=%v", source, target, mountOpts))
isMountPoint := false
Expand All @@ -197,11 +197,11 @@ func (s *Store) NodePublishVolumeFS(source, target, fsType string, mountOpts []s

if isMountPoint {
s.Log.Trace(fmt.Sprintf("[NodePublishVolumeFS] target directory %q is a mount point. Check mount", target))
err := checkMount(s, source, target, mountOpts)
err := checkMount(s, devPath, target, mountOpts)
if err != nil {
return fmt.Errorf("[NodePublishVolumeFS] failed to check mount info for %q: %w", target, err)
}
s.Log.Trace(fmt.Sprintf("[NodePublishVolumeFS] target directory %q is a mount point and already mounted to source %q", target, source))
s.Log.Trace(fmt.Sprintf("[NodePublishVolumeFS] target directory %q is a mount point and already mounted to source %s. Skipping mount", target, source))
return nil
}

Expand Down Expand Up @@ -283,27 +283,27 @@ func toMapperPath(devPath string) string {
return "/dev/mapper/" + mapperPath
}

func checkMount(s *Store, source, target string, mountOpts []string) error {
func checkMount(s *Store, devPath, target string, mountOpts []string) error {
mntInfo, err := s.NodeStorage.Interface.List()
if err != nil {
return fmt.Errorf("[checkMount] failed to list mounts: %w", err)
}

for _, m := range mntInfo {
if m.Path == target {
if m.Device != source {
return fmt.Errorf("[checkMount] device from mount point %q does not match expected source %q", m.Device, source)
mapperDevicePath := toMapperPath(devPath)
if m.Device != devPath && m.Device != mapperDevicePath {
return fmt.Errorf("[checkMount] device from mount point %q does not match expected source device path %s or mapper device path %s", m.Device, devPath, mapperDevicePath)
}
s.Log.Trace(fmt.Sprintf("[checkMount] mount point %s is mounted to device %s", target, m.Device))

if slices.Contains(mountOpts, "ro") {
if !slices.Contains(m.Opts, "ro") {
return fmt.Errorf("[checkMount] passed mount options contain 'ro' but mount options from mount point %q do not", target)
}

if slices.Equal(m.Opts, mountOpts) {
return fmt.Errorf("mount options %v do not match expected mount options %v", m.Opts, mountOpts)
}
s.Log.Trace(fmt.Sprintf("[checkMount] mount point %s is mounted read-only", target))
}
s.Log.Trace(fmt.Sprintf("[checkMount] mount point %s is mounted to device %s with mount options %v", target, m.Device, m.Opts))

return nil
}
Expand Down

0 comments on commit cef61f0

Please sign in to comment.