From 53ea17dbdb709f8db5abe6f00c892350c815161c Mon Sep 17 00:00:00 2001 From: Aleksandr Zimin Date: Thu, 23 May 2024 17:33:57 +0300 Subject: [PATCH] fix after review Signed-off-by: Aleksandr Zimin --- ...e.sh => check_previous_channel_release.sh} | 0 .github/workflows/deploy_prod.yml | 21 +- crds/doc-ru-localstorageclass.yaml | 56 ++ docs/FAQ.md | 242 +++++- docs/FAQ.ru.md | 237 ++++- docs/README.md | 271 +++--- docs/README.ru.md | 242 +++--- images/sds-local-volume-controller/Dockerfile | 4 +- .../sds-local-volume-controller/cmd/main.go | 13 +- images/sds-local-volume-controller/go.mod | 43 +- images/sds-local-volume-controller/go.sum | 92 +- .../pkg/controller/controller_suite_test.go | 66 ++ .../pkg/controller/local_csi_node_watcher.go | 11 +- .../controller/local_storage_class_watcher.go | 710 +-------------- .../local_storage_class_watcher_func.go | 705 +++++++++++++++ .../local_storage_class_watcher_test.go | 814 ++++++++++++++++++ .../pkg/monitoring/monitoring.go | 150 ---- openapi/doc-ru-config-values.yaml | 10 + 18 files changed, 2487 insertions(+), 1200 deletions(-) rename .github/{checkPreviousChannelRelease.sh => check_previous_channel_release.sh} (100%) create mode 100644 crds/doc-ru-localstorageclass.yaml create mode 100644 images/sds-local-volume-controller/pkg/controller/controller_suite_test.go create mode 100644 images/sds-local-volume-controller/pkg/controller/local_storage_class_watcher_func.go create mode 100644 images/sds-local-volume-controller/pkg/controller/local_storage_class_watcher_test.go delete mode 100644 images/sds-local-volume-controller/pkg/monitoring/monitoring.go diff --git a/.github/checkPreviousChannelRelease.sh b/.github/check_previous_channel_release.sh similarity index 100% rename from .github/checkPreviousChannelRelease.sh rename to .github/check_previous_channel_release.sh diff --git a/.github/workflows/deploy_prod.yml b/.github/workflows/deploy_prod.yml index 061c4da5..6502f824 100644 --- a/.github/workflows/deploy_prod.yml +++ b/.github/workflows/deploy_prod.yml @@ -66,11 +66,12 @@ jobs: - name: ECHO VAR run: | echo $MODULES_MODULE_SOURCE - - name: Check previous release - run: | - .github/checkPreviousChannelRelease.sh $MODULE_SOURCE_NAME ce $RELEASE_CHANNEL $MODULES_MODULE_TAG - uses: actions/checkout@v4 - uses: deckhouse/modules-actions/setup@v1 + - name: Check previous release + run: | + chmod +x .github/check_previous_channel_release.sh + .github/check_previous_channel_release.sh $MODULES_MODULE_NAME ce $RELEASE_CHANNEL $MODULES_MODULE_TAG - uses: deckhouse/modules-actions/deploy@v1 job-EE: @@ -85,11 +86,12 @@ jobs: - name: ECHO VAR run: | echo $MODULES_MODULE_SOURCE - - name: Check previous release - run: | - .github/checkPreviousChannelRelease.sh $MODULE_SOURCE_NAME ee $RELEASE_CHANNEL $MODULES_MODULE_TAG - uses: actions/checkout@v4 - uses: deckhouse/modules-actions/setup@v1 + - name: Check previous release + run: | + chmod +x .github/check_previous_channel_release.sh + .github/check_previous_channel_release.sh $MODULES_MODULE_NAME ee $RELEASE_CHANNEL $MODULES_MODULE_TAG - uses: deckhouse/modules-actions/deploy@v1 job-FE: @@ -104,9 +106,10 @@ jobs: - name: ECHO VAR run: | echo $MODULES_MODULE_SOURCE - - name: Check previous release - run: | - .github/checkPreviousChannelRelease.sh $MODULE_SOURCE_NAME fe $RELEASE_CHANNEL $MODULES_MODULE_TAG - uses: actions/checkout@v4 - uses: deckhouse/modules-actions/setup@v1 + - name: Check previous release + run: | + chmod +x .github/check_previous_channel_release.sh + .github/check_previous_channel_release.sh $MODULES_MODULE_NAME fe $RELEASE_CHANNEL $MODULES_MODULE_TAG - uses: deckhouse/modules-actions/deploy@v1 diff --git a/crds/doc-ru-localstorageclass.yaml b/crds/doc-ru-localstorageclass.yaml new file mode 100644 index 00000000..37a1ff24 --- /dev/null +++ b/crds/doc-ru-localstorageclass.yaml @@ -0,0 +1,56 @@ +spec: + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: | + LocalStorageClass - это пользовательский ресурс Kubernetes, который определяет конфигурацию для Kubernetes Storage Class. + properties: + spec: + description: | + Описывает конфигурацию Kubernetes Storage Class. + properties: + reclaimPolicy: + description: | + Reclaim policy данного storage class'а. Может быть: + - Delete (При удалении Persistent Volume Claim также удаляются Persistent Volume и связанное хранилище) + - Retain (При удалении Persistent Volume Claim остаются Persistent Volume и связанное хранилище) + volumeBindingMode: + description: | + Binding mode для данного Storage class'а. Может быть: + - Immediate (создает PV сразу же, как будет создан PVC) + - WaitForFirstConsumer (создает PV только после того, как будет создан Pod для PVC) + lvm: + description: | + Поле описывает конфигурацию LVM. + properties: + type: + description: | + Тип девайса. + lvmVolumeGroups: + description: | + LVMVolumeGroup ресурсы, на которых будут размещены Persistent Volume. + items: + properties: + name: + description: | + Имя LVMVolumeGroup ресурса. + thin: + description: | + Thin pool в выбранном LVMVolumeGroup ресурсе. + properties: + poolName: + description: | + Имя выбранного Thin pool. + status: + description: | + Описывает текущую информацию о соответствующем Storage Class. + properties: + phase: + description: | + Текущее состояние Storage class. Может быть: + - Failed (в случае, если контроллер получил некорректную конфигурацию, или возникли ошибки в ходе выполнения операций) + - Create (если все операции завершились успешно) + reason: + description: | + Дополнительная информация о состоянии Storage Class. diff --git a/docs/FAQ.md b/docs/FAQ.md index 0ab8036c..886550ee 100644 --- a/docs/FAQ.md +++ b/docs/FAQ.md @@ -16,4 +16,244 @@ In case of no free space in the pool, degradation in the module's operation as a ## How do I set the default StorageClass? -Set the `spec.IsDefault` field to `true` in the corresponding [LocalStorageClass](./cr.html#localstorageclass) custom resource. +Add the annotation `storageclass.kubernetes.io/is-default-class: "true"` to the corresponding StorageClass resource: + +```shell +kubectl annotate storageclasses.storage.k8s.io storageclass.kubernetes.io/is-default-class=true +``` + +## I don't want the module to be used on all nodes of the cluster. How can I select the desired nodes? + +The nodes that will be involved with the module are determined by special labels specified in the `nodeSelector` field in the module settings. + +To display and edit the module settings, you can execute the command: + +```shell +kubectl edit mc sds-local-volume +``` + +The approximate output of the command would be: + +```yaml +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: sds-local-volume +spec: + enabled: true + settings: + dataNodes: + nodeSelector: + my-custom-label-key: my-custom-label-value +status: + message: "" + version: "1" +``` + +To display existing labels specified in the `nodeSelector` field, you can execute the command: + +```shell +kubectl get mc sds-local-volume -o=jsonpath={.spec.settings.dataNodes.nodeSelector} +``` + +The approximate output of the command would be: + +```yaml +nodeSelector: + my-custom-label-key: my-custom-label-value +``` + +Nodes whose labels include the set specified in the settings are selected by the module as targets for usage. Therefore, by changing the `nodeSelector` field, you can influence the list of nodes that the module will use. + +> Please note that the `nodeSelector` field can contain any number of labels, but it's crucial that each of the specified labels is present on the node you intend to use for working with the module. It's only when all the specified labels are present on the selected node that the `sds-local-volume-csi-node` pod will be launched. + +After adding labels to the nodes, the `sds-local-volume-csi-node` pods should be started. You can check their presence using the command: + +```shell + kubectl -n d8-sds-local-volume get pod -owide + ``` + +## Why can't I create a PVC on the selected node using the module? + +Please verify that the pod `sds-local-volume-csi-node` is running on the selected node. + +```shell +kubectl -n d8-sds-local-volume get po -owide +``` + +If the pod is missing, please ensure that all labels specified in the module settings in the `nodeSelector` field are present on the selected node. More details about this can be found [here](#service-pods-for-the-sds-local-volume-components-are-not-being-created-on-the-node-i-need-why-is-that). + +## How do I take a node out of the module's control? +To take a node out of the module's control, you need to remove the labels specified in the `nodeSelector` field in the module settings for `sds-local-volume`. + +You can check the presence of existing labels in the `nodeSelector` using the command: + +```shell +kubectl get mc sds-local-volume -o=jsonpath={.spec.settings.dataNodes.nodeSelector} +``` + +The approximate output of the command would be: + +```yaml +nodeSelector: + my-custom-label-key: my-custom-label-value +``` + +Remove the labels specified in `nodeSelector` from the desired nodes. + +```shell +kubectl label node %node-name% %label-from-selector%- +``` +> Please note that to remove a label, you need to add a hyphen immediately after its key instead of its value. + +As a result, the `sds-local-volume-csi-node` pod should be deleted from the desired node. You can check its status using the command: + +```shell +kubectl -n d8-sds-local-volume get po -owide +``` + +If the `sds-local-volume-csi-node` pod remains on the node after removing the `nodeSelector` label, please ensure that the labels specified in the `nodeSelector` field of the `d8-sds-local-volume-controller-config` in the config have indeed been successfully removed from the selected node. + +You can verify this using the command: + +```shell +kubectl get node %node-name% --show-labels +``` + +If the labels from `nodeSelector` are not present on the node, ensure that this node does not own any `LVMVolumeGroup` resources used by `LocalStorageClass` resources. More details about this check can be found [here](#how-to-check-if-there-are-dependent-resources-lvmvolumegroup-on-the-node). + + +> Please note that on the `LVMVolumeGroup` and `LocalStorageClass` resources, which prevent the node from being taken out of the module's control, the label `storage.deckhouse.io/sds-local-volume-candidate-for-eviction` will be displayed. +On the node itself, the label `storage.deckhouse.io/sds-local-volume-need-manual-eviction` will be present. + + +## How to check if there are dependent resources `LVMVolumeGroup` on the node? +To check for such resources, follow these steps: +1. Display the existing `LocalStorageClass` resources + +```shell +kubectl get lsc +``` + +2. Check each of them for the list of used `LVMVolumeGroup` resources. + +> If you want to list all `LocalStorageClass` resources at once, run the command: +> +> ```shell +> kubectl get lsc -oyaml +> ``` + +```shell +kubectl get lsc %lsc-name% -oyaml +``` + +An approximate representation of `LocalStorageClass` could be: + +```yaml +apiVersion: v1 +items: +- apiVersion: storage.deckhouse.io/v1alpha1 + kind: LocalStorageClass + metadata: + finalizers: + - localstorageclass.storage.deckhouse.io + name: test-sc + spec: + lvm: + lvmVolumeGroups: + - name: test-vg + type: Thick + reclaimPolicy: Delete + volumeBindingMode: WaitForFirstConsumer + status: + phase: Created +kind: List +``` + +> Please pay attention to the `spec.lvm.lvmVolumeGroups` field - it specifies the used `LVMVolumeGroup` resources. + +3. Display the list of existing `LVMVolumeGroup` resources. + +```shell +kubectl get lvg +``` + +An approximate representation of `LVMVolumeGroup` could be: + +```text +NAME HEALTH NODE SIZE ALLOCATED SIZE VG AGE +lvg-on-worker-0 Operational node-worker-0 40956Mi 0 test-vg 15d +lvg-on-worker-1 Operational node-worker-1 61436Mi 0 test-vg 15d +lvg-on-worker-2 Operational node-worker-2 122876Mi 0 test-vg 15d +lvg-on-worker-3 Operational node-worker-3 307196Mi 0 test-vg 15d +lvg-on-worker-4 Operational node-worker-4 307196Mi 0 test-vg 15d +lvg-on-worker-5 Operational node-worker-5 204796Mi 0 test-vg 15d +``` + +4. Ensure that the node you intend to remove from the module's control does not have any `LVMVolumeGroup` resources used in `LocalStorageClass` resources. + +> To avoid unintentionally losing control over volumes already created using the module, the user needs to manually delete dependent resources by performing necessary operations on the volume. + +## I removed the labels from the node, but the `sds-local-volume-csi-node` pod is still there. Why did this happen? +Most likely, there are `LVMVolumeGroup` resources present on the node, which are used in one of the `LocalStorageClass` resources. + +To avoid unintentionally losing control over volumes already created using the module, the user needs to manually delete dependent resources by performing necessary operations on the volume." + +The process of checking for the presence of the aforementioned resources is described [here](#how-to-check-if-there-are-dependent-resources-lvmvolumegroup-on-the-node). + + +## Service pods for the `sds-local-volume` components are not being created on the node I need. Why is that? + +With a high probability, the issues are related to the labels on the node. + +Nodes to be used by the module are determined by special labels specified in the `nodeSelector` field in the module settings. + +To display the existing labels specified in the `nodeSelector` field, you can execute the command: + +```shell +kubectl get mc sds-local-volume -o=jsonpath={.spec.settings.dataNodes.nodeSelector} +``` + +The approximate output of the command would be: + +```yaml +nodeSelector: + my-custom-label-key: my-custom-label-value +``` + +Nodes whose labels include the set specified in the settings are chosen by the module as targets for usage. + +You can also additionally check the selectors used by the module in the configuration of the secret `d8-sds-local-volume-controller-config` in the namespace `d8-sds-local-volume`. + +```shell +kubectl -n d8-sds-local-volume get secret d8-sds-local-volume-controller-config -o jsonpath='{.data.config}' | base64 --decode +``` + +The approximate output of the command would be: + +```yaml +nodeSelector: + kubernetes.io/os: linux + my-custom-label-key: my-custom-label-value +``` + +> The output of this command should include all labels from the settings of the `data.nodeSelector` module, as well as `kubernetes.io/os: linux`. + +Check the labels on the node you need: + +```shell +kubectl get node %node-name% --show-labels +``` + +If necessary, add the missing labels to the desired node: + +```shell +kubectl label node %node-name% my-custom-label-key=my-custom-label-value +``` + +If the labels are present, it's necessary to check for the existence of the label `storage.deckhouse.io/sds-local-volume-node=` on the node. If the label is absent, it's advisable to verify whether `sds-local-volume-controller` is functioning properly. If it is, then check the logs: + +```shell +kubectl -n d8-sds-local-volume get po -l app=sds-local-volume-controller +kubectl -n d8-sds-local-volume logs -l app=sds-local-volume-controller +``` diff --git a/docs/FAQ.ru.md b/docs/FAQ.ru.md index 0414b268..400aa175 100644 --- a/docs/FAQ.ru.md +++ b/docs/FAQ.ru.md @@ -16,4 +16,239 @@ Overprovisioning в LVMThin нужно использовать с осторо ## Как назначить StorageClass по умолчанию? -В соответствующем пользовательском ресурсе [LocalStorageClass](./cr.html#localstorageclass) в поле `spec.isDefault` указать `true`. +Добавьте аннотацию `storageclass.kubernetes.io/is-default-class: "true"` в соответствующий ресурс StorageClass: + +```shell +kubectl annotate storageclasses.storage.k8s.io storageclass.kubernetes.io/is-default-class=true +``` + +## Я не хочу, чтобы модуль использовался на всех узлах кластера. Как мне выбрать желаемые узлы? +Узлы, которые будут задействованы модулем, определяются специальными метками, указанными в поле `nodeSelector` в настройках модуля. + +Для отображения и редактирования настроек модуля, можно выполнить команду: + +```shell +kubectl edit mc sds-local-volume +``` + +Примерный вывод команды: + +```yaml +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: sds-local-volume +spec: + enabled: true + settings: + dataNodes: + nodeSelector: + my-custom-label-key: my-custom-label-value +status: + message: "" + version: "1" +``` + +Для отображения существующих меток, указанных в поле `nodeSelector`, можно выполнить команду: + +```shell +kubectl get mc sds-local-volume -o=jsonpath={.spec.settings.dataNodes.nodeSelector} +``` +Примерный вывод команды: + +```yaml +nodeSelector: + my-custom-label-key: my-custom-label-value +``` + +Узлы, метки которых включают в себя набор, указанный в настройках, выбираются модулем как целевые для использования. Соответственно, изменяя поле `nodeSelector` Вы можете влиять на список узлов, которые будут использованы модулем. + +> Обратите внимание, что в поле `nodeSelector` может быть указано любое количество меток, но важно, чтобы каждая из указанных меток присутствовала на узле, который Вы собираетесь использовать для работы с модулем. Именно при наличии всех указанных меток на выбранном узле, произойдет запуск pod-а `sds-local-volume-csi-node`. + +После добавление меток на узлах должны быть запущены pod-ы `sds-local-volume-csi-node`. Проверить их наличие можно командой: + +```shell + kubectl -n d8-sds-local-volume get pod -owide + ``` + +## Почему не удается создать PVC на выбранном узле с помощью модуля? + +Пожалуйста, проверьте, что на выбранном узле работает pod `sds-local-volume-csi-node`. + +```shell +kubectl -n d8-sds-local-volume get po -owide +``` + +Если pod отсутствует, пожалуйста, убедитесь, что на выбранном узле присутствуют все метки, указанные в настройках модуля в поле `nodeSelector`. Подробнее об этом [здесь](#служебные-pod-ы-компонентов-sds-local-volume-не-создаются-на-нужном-мне-узле-почему). + +## Я хочу вывести узел из-под управления модуля, что делать? +Для вывода узла из-под управления модуля необходимо убрать метки, указанные в поле `nodeSelector` в настройках модуля `sds-local-volume`. + +Проверить наличие существующих меток в `nodeSelector` можно командой: + +```shell +kubectl get mc sds-local-volume -o=jsonpath={.spec.settings.dataNodes.nodeSelector} +``` + +Примерный вывод команды: + +```yaml +nodeSelector: + my-custom-label-key: my-custom-label-value +``` + +Снимите указанные в `nodeSelector` метки с желаемых узлов. + +```shell +kubectl label node %node-name% %label-from-selector%- +``` +> Обратите внимание, что для снятия метки необходимо после его ключа вместо значения сразу же поставить знак минуса. + +В результате pod `sds-local-volume-csi-node` должен быть удален с желаемого узла. Для проверки состояния можно выполнить команду: + +```shell +kubectl -n d8-sds-local-volume get po -owide +``` + +Если pod `sds-local-volume-csi-node` после удаления метки `nodeSelector` все же остался на узле, пожалуйста, убедитесь, что указанные в конфиге `d8-sds-local-volume-controller-config` в `nodeSelector` метки действительно успешно снялись с выбранного узла. +Проверить это можно командой: + +```shell +kubectl get node %node-name% --show-labels +``` + +Если метки из `nodeSelector` не присутствуют на узле, то убедитесь, что данному узлу не принадлежат `LVMVolumeGroup` ресурсы, использующиеся `LocalStorageClass` ресурсами. Подробнее об этой проверке можно прочитать [здесь](#как-проверить-имеются-ли-зависимые-ресурсы-lvmvolumegroup-на-узле). + + +> Обратите внимание, что на ресурсах `LVMVolumeGroup` и `LocalStorageClass`, из-за которых не удается вывести узел из-под управления модуля будет отображена метка `storage.deckhouse.io/sds-local-volume-candidate-for-eviction`. +> +> На самом узле будет присутствовать метка `storage.deckhouse.io/sds-local-volume-need-manual-eviction`. + +## Как проверить, имеются ли зависимые ресурсы `LVMVolumeGroup` на узле? +Для проверки таковых ресурсов необходимо выполнить следующие шаги: +1. Отобразить имеющиеся `LocalStorageClass` ресурсы + +```shell +kubectl get lsc +``` + +2. Проверить у каждого из них список используемых `LVMVolumeGroup` ресурсов + +> Вы можете сразу отобразить содержимое всех `LocalStorageClass` ресурсов, выполнив команду: +> +> ```shell +> kubectl get lsc -oyaml +> ``` + +```shell +kubectl get lsc %lsc-name% -oyaml +``` + +Примерный вид `LocalStorageClass` + +```yaml +apiVersion: v1 +items: +- apiVersion: storage.deckhouse.io/v1alpha1 + kind: LocalStorageClass + metadata: + finalizers: + - localstorageclass.storage.deckhouse.io + name: test-sc + spec: + lvm: + lvmVolumeGroups: + - name: test-vg + type: Thick + reclaimPolicy: Delete + volumeBindingMode: WaitForFirstConsumer + status: + phase: Created +kind: List +``` + +> Обратите внимание на поле spec.lvm.lvmVolumeGroups - именно в нем указаны используемые `LVMVolumeGroup` ресурсы. + +3. Отобразите список существующих `LVMVolumeGroup` ресурсов + +```shell +kubectl get lvg +``` + +Примерный вывод `LVMVolumeGroup` ресурсов: + +```text +NAME HEALTH NODE SIZE ALLOCATED SIZE VG AGE +lvg-on-worker-0 Operational node-worker-0 40956Mi 0 test-vg 15d +lvg-on-worker-1 Operational node-worker-1 61436Mi 0 test-vg 15d +lvg-on-worker-2 Operational node-worker-2 122876Mi 0 test-vg 15d +lvg-on-worker-3 Operational node-worker-3 307196Mi 0 test-vg 15d +lvg-on-worker-4 Operational node-worker-4 307196Mi 0 test-vg 15d +lvg-on-worker-5 Operational node-worker-5 204796Mi 0 test-vg 15d +``` + +4. Проверьте, что на узле, который вы собираетесь вывести из-под управления модуля, не присутствует какой-либо `LVMVolumeGroup` ресурс, используемый в `LocalStorageClass` ресурсах. + +> Во избежание непредвиденной потери контроля за уже созданными с помощью модуля томами пользователю необходимо вручную удалить зависимые ресурсы, совершив необходимые операции над томом. + +## Я убрал метки с узла, но pod `sds-local-volume-csi-node` остался. Почему так произошло? +Вероятнее всего, на узле присутствуют `LVMVolumeGroup` ресурсы, которые используются в одном из `LocalStorageClass` ресурсов. + +Во избежание непредвиденной потери контроля за уже созданными с помощью модуля томами пользователю необходимо вручную удалить зависимые ресурсы, совершив необходимые операции над томом. + +Процесс проверки на наличие вышеуказанных ресурсов описан [здесь](#как-проверить-имеются-ли-зависимые-ресурсы-lvmvolumegroup-на-узле). + +## Служебные pod-ы компонентов `sds-local-volume` не создаются на нужном мне узле. Почему? +С высокой вероятностью проблемы связаны с метками на узле. + +Узлы, которые будут задействованы модулем, определяются специальными метками, указанными в поле `nodeSelector` в настройках модуля. + +Для отображения существующих меток, указанных в поле `nodeSelector`, можно выполнить команду: + +```shell +kubectl get mc sds-local-volume -o=jsonpath={.spec.settings.dataNodes.nodeSelector} +``` + +Примерный вывод команды: + +```yaml +nodeSelector: + my-custom-label-key: my-custom-label-value +``` + +Узлы, метки которых включают в себя набор, указанный в настройках, выбираются модулем как целевые для использования. + +Также Вы можете дополнительно проверить селекторы, которые используются модулем в конфиге секрета `d8-sds-local-volume-controller-config` в пространстве имен `d8-sds-local-volume`. + +```shell +kubectl -n d8-sds-local-volume get secret d8-sds-local-volume-controller-config -o jsonpath='{.data.config}' | base64 --decode +``` + +Примерный вывод команды: + +```yaml +nodeSelector: + kubernetes.io/os: linux + my-custom-label-key: my-custom-label-value +``` + +> В выводе данной команды должны быть указаны все метки из настроек модуля `data.nodeSelector`, а также `kubernetes.io/os: linux`. + +Проверьте метки на нужном вам узле: + +```shell +kubectl get node %node-name% --show-labels +``` + +При необходимости добавьте недостающие метки на желаемый узел: + +```shell +kubectl label node %node-name% my-custom-label-key=my-custom-label-value +``` + +Если метки присутствуют, необходимо проверить наличие метки `storage.deckhouse.io/sds-local-volume-node=` на узле. Если метка отсутствует, следует проверить работает ли `sds-local-volume-controller`, и в случае его работоспособности, проверить логи: + +```shell +kubectl -n d8-sds-local-volume get po -l app=sds-local-volume-controller +kubectl -n d8-sds-local-volume logs -l app=sds-local-volume-controller +``` diff --git a/docs/README.md b/docs/README.md index 80dd99dc..8e9a8dcf 100644 --- a/docs/README.md +++ b/docs/README.md @@ -23,194 +23,195 @@ Note that all commands must be run on a machine that has administrator access to - Enable the sds-node-configurator module - ```yaml - kubectl apply -f - < The location of the pod data is determined by special labels (nodeSelector) specified in the `spec.settings.dataNodes.nodeSelector` field in the module settings. For more detailed information on the configuration, please follow the [link](./faq.html#i-dont-want-the-module-to-be-used-on-all-nodes-of-the-cluster-how-can-i-select-the-desired-nodes). ### Configuring storage on nodes -You need to create `LVM` volume groups on the nodes using `LVMVolumeGroup` custom resources. As part of this quickstart guide, we will create a regular `Thin` storage. +You need to create `LVM` volume groups on the nodes using `LVMVolumeGroup` custom resources. As part of this quickstart guide, we will create a regular `Thick` storage. + +> Please ensure that the `sds-local-volume-csi-node` pod is running on the node before creating the `LVMVolumeGroup`. You can do this using the command: + +> ```shell +> kubectl -n d8-sds-local-volume get pod -owide +> ``` To configure the storage: - List all the [BlockDevice](../../sds-node-configurator/stable/cr.html#blockdevice) resources available in your cluster: - ```shell - kubectl get bd - - NAME NODE CONSUMABLE SIZE PATH - dev-ef4fb06b63d2c05fb6ee83008b55e486aa1161aa worker-0 false 976762584Ki /dev/nvme1n1 - dev-0cfc0d07f353598e329d34f3821bed992c1ffbcd worker-0 false 894006140416 /dev/nvme0n1p6 - dev-7e4df1ddf2a1b05a79f9481cdf56d29891a9f9d0 worker-1 false 976762584Ki /dev/nvme1n1 - dev-b103062f879a2349a9c5f054e0366594568de68d worker-1 false 894006140416 /dev/nvme0n1p6 - dev-53d904f18b912187ac82de29af06a34d9ae23199 worker-2 false 976762584Ki /dev/nvme1n1 - dev-6c5abbd549100834c6b1668c8f89fb97872ee2b1 worker-2 false 894006140416 /dev/nvme0n1p6 - ``` +```shell +kubectl get bd + +NAME NODE CONSUMABLE SIZE PATH +dev-ef4fb06b63d2c05fb6ee83008b55e486aa1161aa worker-0 false 976762584Ki /dev/nvme1n1 +dev-0cfc0d07f353598e329d34f3821bed992c1ffbcd worker-0 false 894006140416 /dev/nvme0n1p6 +dev-7e4df1ddf2a1b05a79f9481cdf56d29891a9f9d0 worker-1 false 976762584Ki /dev/nvme1n1 +dev-b103062f879a2349a9c5f054e0366594568de68d worker-1 false 894006140416 /dev/nvme0n1p6 +dev-53d904f18b912187ac82de29af06a34d9ae23199 worker-2 false 976762584Ki /dev/nvme1n1 +dev-6c5abbd549100834c6b1668c8f89fb97872ee2b1 worker-2 false 894006140416 /dev/nvme0n1p6 +``` - Create an [LVMVolumeGroup](../../sds-node-configurator/stable/cr.html#lvmvolumegroup) resource for `worker-0`: - ```yaml - kubectl apply -f - < Расположение данных pod-ов определяется специальными метками (nodeSelector), которые указываются в поле `spec.settings.dataNodes.nodeSelector` в настройках модуля. Для получения более подробной информации о настройке, пожалуйста, перейдите по [ссылке](./faq.html#я-не-хочу-чтобы-модуль-использовался-на-всех-узлах-кластера-как-мне-выбрать-желаемые-узлы) ### Настройка хранилища на узлах +Необходимо на этих узлах создать группы томов `LVM` с помощью пользовательских ресурсов `LVMVolumeGroup`. В быстром старте будем создавать обычное `Thick` хранилище. -Необходимо на этих узлах создать группы томов `LVM` с помощью пользовательских ресурсов `LVMVolumeGroup`. В быстром старте будем создавать обычное `Thin` хранилище. +> Пожалуйста, перед созданием `LVMVolumeGroup` убедитесь, что на данном узле запущен pod `sds-local-volume-csi-node`. Это можно сделать командой: +> +> ```shell +> kubectl -n d8-sds-local-volume get pod -owide +> ``` Приступим к настройке хранилища: @@ -90,128 +106,112 @@ moduleStatus: experimental - Создать ресурс [LVMVolumeGroup](../../sds-node-configurator/stable/cr.html#lvmvolumegroup) для узла `worker-0`: - ```yaml - kubectl apply -f - < 1 { - var msgBuilder strings.Builder - msgBuilder.WriteString(fmt.Sprintf("|%s: ", nodeName)) - for _, lvgName := range lvgs { - msgBuilder.WriteString(fmt.Sprintf("%s,", lvgName)) - } - - badLVGs = append(badLVGs, msgBuilder.String()) - } - } - - return badLVGs -} - -func recreateStorageClass(ctx context.Context, cl client.Client, oldSC, newSC *v1.StorageClass) error { - // It is necessary to pass the original StorageClass to the delete operation because - // the deletion will not succeed if the fields in the StorageClass provided to delete - // differ from those currently in the cluster. - err := deleteStorageClass(ctx, cl, oldSC) - if err != nil { - err = fmt.Errorf("[recreateStorageClass] unable to delete a storage class %s: %s", oldSC.Name, err.Error()) - return err - } - - err = cl.Create(ctx, newSC) - if err != nil { - err = fmt.Errorf("[recreateStorageClass] unable to create a storage class %s: %s", newSC.Name, err.Error()) - return err - } - - return nil -} - -func deleteStorageClass(ctx context.Context, cl client.Client, sc *v1.StorageClass) error { - if sc.Provisioner != LocalStorageClassProvisioner { - return fmt.Errorf("a storage class %s does not belong to %s provisioner", sc.Name, LocalStorageClassProvisioner) - } - - _, err := removeLocalSCFinalizerIfExistsForSC(ctx, cl, sc) - if err != nil { - return err - } - - err = cl.Delete(ctx, sc) - if err != nil { - return err - } - - return nil -} diff --git a/images/sds-local-volume-controller/pkg/controller/local_storage_class_watcher_func.go b/images/sds-local-volume-controller/pkg/controller/local_storage_class_watcher_func.go new file mode 100644 index 00000000..0db6dd46 --- /dev/null +++ b/images/sds-local-volume-controller/pkg/controller/local_storage_class_watcher_func.go @@ -0,0 +1,705 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "fmt" + v1alpha1 "sds-local-volume-controller/api/v1alpha1" + "sds-local-volume-controller/pkg/logger" + "strings" + + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/strings/slices" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" +) + +func reconcileLSCDeleteFunc( + ctx context.Context, + cl client.Client, + log logger.Logger, + scList *v1.StorageClassList, + lsc *v1alpha1.LocalStorageClass, +) (bool, error) { + log.Debug(fmt.Sprintf("[reconcileLSCDeleteFunc] tries to find a storage class for the LocalStorageClass %s", lsc.Name)) + var sc *v1.StorageClass + for _, s := range scList.Items { + if s.Name == lsc.Name { + sc = &s + break + } + } + if sc == nil { + log.Info(fmt.Sprintf("[reconcileLSCDeleteFunc] no storage class found for the LocalStorageClass, name: %s", lsc.Name)) + } + + if sc != nil { + log.Info(fmt.Sprintf("[reconcileLSCDeleteFunc] successfully found a storage class for the LocalStorageClass %s", lsc.Name)) + log.Debug(fmt.Sprintf("[reconcileLSCDeleteFunc] starts identifing a provisioner for the storage class %s", sc.Name)) + + if sc.Provisioner != LocalStorageClassProvisioner { + log.Info(fmt.Sprintf("[reconcileLSCDeleteFunc] the storage class %s does not belongs to %s provisioner. It will not be deleted", sc.Name, LocalStorageClassProvisioner)) + } else { + log.Info(fmt.Sprintf("[reconcileLSCDeleteFunc] the storage class %s belongs to %s provisioner. It will be deleted", sc.Name, LocalStorageClassProvisioner)) + + err := deleteStorageClass(ctx, cl, sc) + if err != nil { + log.Error(err, fmt.Sprintf("[reconcileLSCDeleteFunc] unable to delete a storage class, name: %s", sc.Name)) + upErr := updateLocalStorageClassPhase(ctx, cl, lsc, FailedStatusPhase, fmt.Sprintf("Unable to delete a storage class, err: %s", err.Error())) + if upErr != nil { + log.Error(upErr, fmt.Sprintf("[reconcileLSCDeleteFunc] unable to update the LocalStorageClass, name: %s", lsc.Name)) + } + return true, err + } + log.Info(fmt.Sprintf("[reconcileLSCDeleteFunc] successfully deleted a storage class, name: %s", sc.Name)) + } + } + + log.Debug(fmt.Sprintf("[reconcileLSCDeleteFunc] starts removing a finalizer %s from the LocalStorageClass, name: %s", LocalStorageClassFinalizerName, lsc.Name)) + removed, err := removeFinalizerIfExists(ctx, cl, lsc, LocalStorageClassFinalizerName) + if err != nil { + log.Error(err, "[reconcileLSCDeleteFunc] unable to remove a finalizer %s from the LocalStorageClass, name: %s", LocalStorageClassFinalizerName, lsc.Name) + upErr := updateLocalStorageClassPhase(ctx, cl, lsc, FailedStatusPhase, fmt.Sprintf("Unable to remove a finalizer, err: %s", err.Error())) + if upErr != nil { + log.Error(upErr, fmt.Sprintf("[reconcileLSCDeleteFunc] unable to update the LocalStorageClass, name: %s", lsc.Name)) + } + return true, err + } + log.Debug(fmt.Sprintf("[reconcileLSCDeleteFunc] the LocalStorageClass %s finalizer %s was removed: %t", lsc.Name, LocalStorageClassFinalizerName, removed)) + + log.Debug("[reconcileLSCDeleteFunc] ends the reconciliation") + return false, nil +} + +func reconcileLSCUpdateFunc( + ctx context.Context, + cl client.Client, + log logger.Logger, + scList *v1.StorageClassList, + lsc *v1alpha1.LocalStorageClass, +) (bool, error) { + log.Debug(fmt.Sprintf("[reconcileLSCUpdateFunc] starts the LocalStorageClass %s validation", lsc.Name)) + valid, msg := validateLocalStorageClass(ctx, cl, scList, lsc) + if !valid { + err := fmt.Errorf("validation failed: %s", msg) + log.Error(err, fmt.Sprintf("[reconcileLSCUpdateFunc] Unable to reconcile the LocalStorageClass, name: %s", lsc.Name)) + upError := updateLocalStorageClassPhase(ctx, cl, lsc, FailedStatusPhase, msg) + if upError != nil { + log.Error(upError, fmt.Sprintf("[reconcileLSCUpdateFunc] unable to update the LocalStorageClass %s", lsc.Name)) + } + + return true, err + } + log.Debug(fmt.Sprintf("[reconcileLSCUpdateFunc] successfully validated the LocalStorageClass, name: %s", lsc.Name)) + + var oldSC *v1.StorageClass + for _, s := range scList.Items { + if s.Name == lsc.Name { + oldSC = &s + break + } + } + if oldSC == nil { + err := fmt.Errorf("a storage class %s does not exist", lsc.Name) + log.Error(err, fmt.Sprintf("[reconcileLSCUpdateFunc] unable to find a storage class for the LocalStorageClass, name: %s", lsc.Name)) + upError := updateLocalStorageClassPhase(ctx, cl, lsc, FailedStatusPhase, err.Error()) + if upError != nil { + log.Error(upError, fmt.Sprintf("[reconcileLSCUpdateFunc] unable to update the LocalStorageClass %s", lsc.Name)) + } + return true, err + } + + log.Debug(fmt.Sprintf("[reconcileLSCUpdateFunc] successfully found a storage class for the LocalStorageClass, name: %s", lsc.Name)) + + log.Trace(fmt.Sprintf("[reconcileLSCUpdateFunc] storage class %s params: %+v", oldSC.Name, oldSC.Parameters)) + log.Trace(fmt.Sprintf("[reconcileLSCUpdateFunc] LocalStorageClass %s Spec.LVM: %+v", lsc.Name, lsc.Spec.LVM)) + hasDiff, err := hasLVGDiff(oldSC, lsc) + if err != nil { + log.Error(err, fmt.Sprintf("[reconcileLSCUpdateFunc] unable to identify the LVMVolumeGroup difference for the LocalStorageClass %s", lsc.Name)) + upError := updateLocalStorageClassPhase(ctx, cl, lsc, FailedStatusPhase, err.Error()) + if upError != nil { + log.Error(upError, fmt.Sprintf("[reconcileLSCUpdateFunc] unable to update the LocalStorageClass %s", lsc.Name)) + } + return true, err + } + + if hasDiff { + log.Info(fmt.Sprintf("[reconcileLSCUpdateFunc] current Storage Class LVMVolumeGroups do not match LocalStorageClass ones. The Storage Class %s will be recreated with new ones", lsc.Name)) + newSC, err := configureStorageClass(lsc) + if err != nil { + log.Error(err, fmt.Sprintf("[reconcileLSCUpdateFunc] unable to configure a Storage Class for the LocalStorageClass %s", lsc.Name)) + upError := updateLocalStorageClassPhase(ctx, cl, lsc, FailedStatusPhase, err.Error()) + if upError != nil { + log.Error(upError, fmt.Sprintf("[reconcileLSCUpdateFunc] unable to update the LocalStorageClass %s", lsc.Name)) + return true, upError + } + return false, err + } + + err = recreateStorageClass(ctx, cl, oldSC, newSC) + if err != nil { + log.Error(err, fmt.Sprintf("[reconcileLSCUpdateFunc] unable to recreate a Storage Class %s", newSC.Name)) + upError := updateLocalStorageClassPhase(ctx, cl, lsc, FailedStatusPhase, err.Error()) + if upError != nil { + log.Error(upError, fmt.Sprintf("[reconcileLSCUpdateFunc] unable to update the LocalStorageClass %s", lsc.Name)) + } + return true, err + } + + log.Info(fmt.Sprintf("[reconcileLSCUpdateFunc] a Storage Class %s was successfully recreated", newSC.Name)) + } + + err = updateLocalStorageClassPhase(ctx, cl, lsc, CreatedStatusPhase, "") + if err != nil { + log.Error(err, fmt.Sprintf("[reconcileLSCUpdateFunc] unable to update the LocalStorageClass, name: %s", lsc.Name)) + return true, err + } + log.Debug(fmt.Sprintf("[reconcileLSCUpdateFunc] successfully updated the LocalStorageClass %s status", lsc.Name)) + + return false, nil +} + +func identifyReconcileFunc(scList *v1.StorageClassList, lsc *v1alpha1.LocalStorageClass) (reconcileType, error) { + if shouldReconcileByDeleteFunc(lsc) { + return DeleteReconcile, nil + } + + if shouldReconcileByCreateFunc(scList, lsc) { + return CreateReconcile, nil + } + + should, err := shouldReconcileByUpdateFunc(scList, lsc) + if err != nil { + return "none", err + } + if should { + return UpdateReconcile, nil + } + + return "none", nil +} + +func shouldReconcileByDeleteFunc(lsc *v1alpha1.LocalStorageClass) bool { + if lsc.DeletionTimestamp != nil { + return true + } + + return false +} + +func shouldReconcileByUpdateFunc(scList *v1.StorageClassList, lsc *v1alpha1.LocalStorageClass) (bool, error) { + if lsc.DeletionTimestamp != nil { + return false, nil + } + + for _, sc := range scList.Items { + if sc.Name == lsc.Name { + if sc.Provisioner == LocalStorageClassProvisioner { + diff, err := hasLVGDiff(&sc, lsc) + if err != nil { + return false, err + } + + if diff { + return true, nil + } + + if lsc.Status.Phase == FailedStatusPhase { + return true, nil + } + + return false, nil + + } else { + err := fmt.Errorf("a storage class %s already exists and does not belong to %s provisioner", sc.Name, LocalStorageClassProvisioner) + return false, err + } + } + } + + err := fmt.Errorf("a storage class %s does not exist", lsc.Name) + return false, err + +} + +func hasLVGDiff(sc *v1.StorageClass, lsc *v1alpha1.LocalStorageClass) (bool, error) { + currentLVGs, err := getLVGFromSCParams(sc) + if err != nil { + return false, err + } + + if len(currentLVGs) != len(lsc.Spec.LVM.LVMVolumeGroups) { + return true, nil + } + + for i := range currentLVGs { + if currentLVGs[i].Name != lsc.Spec.LVM.LVMVolumeGroups[i].Name { + return true, nil + } + if lsc.Spec.LVM.Type == LVMThinType { + if currentLVGs[i].Thin == nil && lsc.Spec.LVM.LVMVolumeGroups[i].Thin != nil { + return true, nil + } + if currentLVGs[i].Thin == nil && lsc.Spec.LVM.LVMVolumeGroups[i].Thin == nil { + err := fmt.Errorf("LocalStorageClass type=%q: unable to identify the Thin pool differences for the LocalStorageClass %q. The current LVMVolumeGroup %q does not have a Thin pool configured in either the StorageClass or the LocalStorageClass", lsc.Spec.LVM.Type, lsc.Name, currentLVGs[i].Name) + return false, err + } + if currentLVGs[i].Thin.PoolName != lsc.Spec.LVM.LVMVolumeGroups[i].Thin.PoolName { + return true, nil + } + } + } + + return false, nil +} + +func getLVGFromSCParams(sc *v1.StorageClass) ([]v1alpha1.LocalStorageClassLVG, error) { + lvgsFromParams := sc.Parameters[LVMVolumeGroupsParamKey] + var currentLVGs []v1alpha1.LocalStorageClassLVG + + err := yaml.Unmarshal([]byte(lvgsFromParams), ¤tLVGs) + if err != nil { + return nil, err + } + + return currentLVGs, nil +} + +func shouldReconcileByCreateFunc(scList *v1.StorageClassList, lsc *v1alpha1.LocalStorageClass) bool { + if lsc.DeletionTimestamp != nil { + return false + } + + for _, sc := range scList.Items { + if sc.Name == lsc.Name { + return false + } + } + + return true +} + +func reconcileLSCCreateFunc( + ctx context.Context, + cl client.Client, + log logger.Logger, + scList *v1.StorageClassList, + lsc *v1alpha1.LocalStorageClass, +) (bool, error) { + log.Debug(fmt.Sprintf("[reconcileLSCCreateFunc] starts the LocalStorageClass %s validation", lsc.Name)) + added, err := addFinalizerIfNotExistsForLSC(ctx, cl, lsc) + if err != nil { + log.Error(err, fmt.Sprintf("[reconcileLSCCreateFunc] unable to add a finalizer %s to the LocalStorageClass %s", LocalStorageClassFinalizerName, lsc.Name)) + return true, err + } + log.Debug(fmt.Sprintf("[reconcileLSCCreateFunc] finalizer %s was added to the LocalStorageClass %s: %t", LocalStorageClassFinalizerName, lsc.Name, added)) + + valid, msg := validateLocalStorageClass(ctx, cl, scList, lsc) + if !valid { + err := fmt.Errorf("validation failed: %s", msg) + log.Error(err, fmt.Sprintf("[reconcileLSCCreateFunc] Unable to reconcile the LocalStorageClass, name: %s", lsc.Name)) + upError := updateLocalStorageClassPhase(ctx, cl, lsc, FailedStatusPhase, msg) + if upError != nil { + log.Error(upError, fmt.Sprintf("[reconcileLSCCreateFunc] unable to update the LocalStorageClass %s", lsc.Name)) + } + + return true, err + } + log.Debug(fmt.Sprintf("[reconcileLSCCreateFunc] successfully validated the LocalStorageClass, name: %s", lsc.Name)) + + log.Debug(fmt.Sprintf("[reconcileLSCCreateFunc] starts storage class configuration for the LocalStorageClass, name: %s", lsc.Name)) + sc, err := configureStorageClass(lsc) + if err != nil { + log.Error(err, fmt.Sprintf("[reconcileLSCCreateFunc] unable to configure Storage Class for LocalStorageClass, name: %s", lsc.Name)) + upError := updateLocalStorageClassPhase(ctx, cl, lsc, FailedStatusPhase, err.Error()) + if upError != nil { + log.Error(upError, fmt.Sprintf("[reconcileLSCCreateFunc] unable to update the LocalStorageClass %s", lsc.Name)) + return true, upError + } + return false, err + } + log.Debug(fmt.Sprintf("[reconcileLSCCreateFunc] successfully configurated storage class for the LocalStorageClass, name: %s", lsc.Name)) + + created, err := createStorageClassIfNotExists(ctx, cl, scList, sc) + if err != nil { + log.Error(err, fmt.Sprintf("[reconcileLSCCreateFunc] unable to create a Storage Class, name: %s", sc.Name)) + upError := updateLocalStorageClassPhase(ctx, cl, lsc, FailedStatusPhase, err.Error()) + if upError != nil { + log.Error(upError, fmt.Sprintf("[reconcileLSCCreateFunc] unable to update the LocalStorageClass %s", lsc.Name)) + return true, upError + } + return true, err + } + log.Debug(fmt.Sprintf("[reconcileLSCCreateFunc] a storage class %s was created: %t", sc.Name, created)) + if created { + log.Info(fmt.Sprintf("[reconcileLSCCreateFunc] successfully create storage class, name: %s", sc.Name)) + } else { + log.Warning(fmt.Sprintf("[reconcileLSCCreateFunc] Storage class %s already exists. Adding event to requeue.", sc.Name)) + return true, nil + } + + added, err = addFinalizerIfNotExistsForSC(ctx, cl, sc) + if err != nil { + log.Error(err, fmt.Sprintf("[reconcileLSCCreateFunc] unable to add a finalizer %s to the StorageClass %s", LocalStorageClassFinalizerName, sc.Name)) + return true, err + } + log.Debug(fmt.Sprintf("[reconcileLSCCreateFunc] finalizer %s was added to the StorageClass %s: %t", LocalStorageClassFinalizerName, sc.Name, added)) + + err = updateLocalStorageClassPhase(ctx, cl, lsc, CreatedStatusPhase, "") + if err != nil { + log.Error(err, fmt.Sprintf("[reconcileLSCCreateFunc] unable to update the LocalStorageClass, name: %s", lsc.Name)) + return true, err + } + log.Debug(fmt.Sprintf("[reconcileLSCCreateFunc] successfully updated the LocalStorageClass %s status", sc.Name)) + + return false, nil +} + +func createStorageClassIfNotExists( + ctx context.Context, + cl client.Client, + scList *v1.StorageClassList, + sc *v1.StorageClass, +) (bool, error) { + for _, s := range scList.Items { + if s.Name == sc.Name { + return false, nil + } + } + + err := cl.Create(ctx, sc) + if err != nil { + return false, err + } + + return true, err +} + +func addFinalizerIfNotExistsForLSC(ctx context.Context, cl client.Client, lsc *v1alpha1.LocalStorageClass) (bool, error) { + if !slices.Contains(lsc.Finalizers, LocalStorageClassFinalizerName) { + lsc.Finalizers = append(lsc.Finalizers, LocalStorageClassFinalizerName) + } + + err := cl.Update(ctx, lsc) + if err != nil { + return false, err + } + + return true, nil +} + +func addFinalizerIfNotExistsForSC(ctx context.Context, cl client.Client, sc *v1.StorageClass) (bool, error) { + if !slices.Contains(sc.Finalizers, LocalStorageClassFinalizerName) { + sc.Finalizers = append(sc.Finalizers, LocalStorageClassFinalizerName) + } + + err := cl.Update(ctx, sc) + if err != nil { + return false, err + } + + return true, nil +} + +func configureStorageClass(lsc *v1alpha1.LocalStorageClass) (*v1.StorageClass, error) { + reclaimPolicy := corev1.PersistentVolumeReclaimPolicy(lsc.Spec.ReclaimPolicy) + volumeBindingMode := v1.VolumeBindingMode(lsc.Spec.VolumeBindingMode) + AllowVolumeExpansion := AllowVolumeExpansionDefaultValue + + if lsc.Spec.LVM == nil { + //TODO: add support for other LSC types + return nil, fmt.Errorf("unable to identify the LocalStorageClass type") + } + + lvgsParam, err := yaml.Marshal(lsc.Spec.LVM.LVMVolumeGroups) + if err != nil { + return nil, err + } + + params := map[string]string{ + TypeParamKey: LocalStorageClassLvmType, + LVMTypeParamKey: lsc.Spec.LVM.Type, + LVMVolumeBindingModeParamKey: lsc.Spec.VolumeBindingMode, + LVMVolumeGroupsParamKey: string(lvgsParam), + } + + sc := &v1.StorageClass{ + TypeMeta: metav1.TypeMeta{ + Kind: StorageClassKind, + APIVersion: StorageClassAPIVersion, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: lsc.Name, + Namespace: lsc.Namespace, + Finalizers: []string{LocalStorageClassFinalizerName}, + }, + Provisioner: LocalStorageClassProvisioner, + Parameters: params, + ReclaimPolicy: &reclaimPolicy, + AllowVolumeExpansion: &AllowVolumeExpansion, + VolumeBindingMode: &volumeBindingMode, + } + + return sc, nil +} + +func updateLocalStorageClassPhase( + ctx context.Context, + cl client.Client, + lsc *v1alpha1.LocalStorageClass, + phase, + reason string, +) error { + if lsc.Status == nil { + lsc.Status = new(v1alpha1.LocalStorageClassStatus) + } + lsc.Status.Phase = phase + lsc.Status.Reason = reason + + if !slices.Contains(lsc.Finalizers, LocalStorageClassFinalizerName) { + lsc.Finalizers = append(lsc.Finalizers, LocalStorageClassFinalizerName) + } + + // TODO: add retry logic + err := cl.Update(ctx, lsc) + if err != nil { + return err + } + + return nil +} + +func validateLocalStorageClass( + ctx context.Context, + cl client.Client, + scList *v1.StorageClassList, + lsc *v1alpha1.LocalStorageClass, +) (bool, string) { + var ( + failedMsgBuilder strings.Builder + valid = true + ) + + unmanagedScName := findUnmanagedDuplicatedSC(scList, lsc) + if unmanagedScName != "" { + valid = false + failedMsgBuilder.WriteString(fmt.Sprintf("There already is a storage class with the same name: %s but it is not managed by the LocalStorageClass controller\n", unmanagedScName)) + } + + lvgList := &v1alpha1.LvmVolumeGroupList{} + err := cl.List(ctx, lvgList) + if err != nil { + valid = false + failedMsgBuilder.WriteString(fmt.Sprintf("Unable to validate selected LVMVolumeGroups, err: %s\n", err.Error())) + return valid, failedMsgBuilder.String() + } + + if lsc.Spec.LVM != nil { + LVGsFromTheSameNode := findLVMVolumeGroupsOnTheSameNode(lvgList, lsc) + if len(LVGsFromTheSameNode) != 0 { + valid = false + failedMsgBuilder.WriteString(fmt.Sprintf("Some LVMVolumeGroups use the same node (|node: LVG names): %s\n", strings.Join(LVGsFromTheSameNode, ""))) + } + + nonexistentLVGs := findNonexistentLVGs(lvgList, lsc) + if len(nonexistentLVGs) != 0 { + valid = false + failedMsgBuilder.WriteString(fmt.Sprintf("Some of selected LVMVolumeGroups are nonexistent, LVG names: %s\n", strings.Join(nonexistentLVGs, ","))) + } + + if lsc.Spec.LVM.Type == LVMThinType { + LVGSWithNonexistentTps := findNonexistentThinPools(lvgList, lsc) + if len(LVGSWithNonexistentTps) != 0 { + valid = false + failedMsgBuilder.WriteString(fmt.Sprintf("Some LVMVolumeGroups use nonexistent thin pools, LVG names: %s\n", strings.Join(LVGSWithNonexistentTps, ","))) + } + } else { + LVGsWithTps := findAnyThinPool(lsc) + if len(LVGsWithTps) != 0 { + valid = false + failedMsgBuilder.WriteString(fmt.Sprintf("Some LVMVolumeGroups use thin pools though device type is Thick, LVG names: %s\n", strings.Join(LVGsWithTps, ","))) + } + } + } else { + // TODO: add support for other types + valid = false + failedMsgBuilder.WriteString(fmt.Sprintf("Unable to identify a type of LocalStorageClass %s", lsc.Name)) + } + + return valid, failedMsgBuilder.String() +} + +func findUnmanagedDuplicatedSC(scList *v1.StorageClassList, lsc *v1alpha1.LocalStorageClass) string { + for _, sc := range scList.Items { + if sc.Name == lsc.Name && sc.Provisioner != LocalStorageClassProvisioner { + return sc.Name + } + } + + return "" +} + +func findAnyThinPool(lsc *v1alpha1.LocalStorageClass) []string { + badLvgs := make([]string, 0, len(lsc.Spec.LVM.LVMVolumeGroups)) + for _, lvs := range lsc.Spec.LVM.LVMVolumeGroups { + if lvs.Thin != nil { + badLvgs = append(badLvgs, lvs.Name) + } + } + + return badLvgs +} + +func findNonexistentThinPools(lvgList *v1alpha1.LvmVolumeGroupList, lsc *v1alpha1.LocalStorageClass) []string { + lvgs := make(map[string]v1alpha1.LvmVolumeGroup, len(lvgList.Items)) + for _, lvg := range lvgList.Items { + lvgs[lvg.Name] = lvg + } + + badLvgs := make([]string, 0, len(lsc.Spec.LVM.LVMVolumeGroups)) + for _, lscLvg := range lsc.Spec.LVM.LVMVolumeGroups { + if lscLvg.Thin == nil { + badLvgs = append(badLvgs, lscLvg.Name) + continue + } + + lvgRes := lvgs[lscLvg.Name] + exist := false + + for _, tp := range lvgRes.Status.ThinPools { + if tp.Name == lscLvg.Thin.PoolName { + exist = true + break + } + } + + if !exist { + badLvgs = append(badLvgs, lscLvg.Name) + } + } + + return badLvgs +} + +func findNonexistentLVGs(lvgList *v1alpha1.LvmVolumeGroupList, lsc *v1alpha1.LocalStorageClass) []string { + lvgs := make(map[string]struct{}, len(lvgList.Items)) + for _, lvg := range lvgList.Items { + lvgs[lvg.Name] = struct{}{} + } + + nonexistent := make([]string, 0, len(lsc.Spec.LVM.LVMVolumeGroups)) + for _, lvg := range lsc.Spec.LVM.LVMVolumeGroups { + if _, exist := lvgs[lvg.Name]; !exist { + nonexistent = append(nonexistent, lvg.Name) + } + } + + return nonexistent +} + +func findLVMVolumeGroupsOnTheSameNode(lvgList *v1alpha1.LvmVolumeGroupList, lsc *v1alpha1.LocalStorageClass) []string { + nodesWithLVGs := make(map[string][]string, len(lsc.Spec.LVM.LVMVolumeGroups)) + usedLVGs := make(map[string]struct{}, len(lsc.Spec.LVM.LVMVolumeGroups)) + for _, lvg := range lsc.Spec.LVM.LVMVolumeGroups { + usedLVGs[lvg.Name] = struct{}{} + } + + badLVGs := make([]string, 0, len(lsc.Spec.LVM.LVMVolumeGroups)) + for _, lvg := range lvgList.Items { + if _, used := usedLVGs[lvg.Name]; used { + for _, node := range lvg.Status.Nodes { + nodesWithLVGs[node.Name] = append(nodesWithLVGs[node.Name], lvg.Name) + } + } + } + + for nodeName, lvgs := range nodesWithLVGs { + if len(lvgs) > 1 { + var msgBuilder strings.Builder + msgBuilder.WriteString(fmt.Sprintf("|%s: ", nodeName)) + for _, lvgName := range lvgs { + msgBuilder.WriteString(fmt.Sprintf("%s,", lvgName)) + } + + badLVGs = append(badLVGs, msgBuilder.String()) + } + } + + return badLVGs +} + +func recreateStorageClass(ctx context.Context, cl client.Client, oldSC, newSC *v1.StorageClass) error { + // It is necessary to pass the original StorageClass to the delete operation because + // the deletion will not succeed if the fields in the StorageClass provided to delete + // differ from those currently in the cluster. + err := deleteStorageClass(ctx, cl, oldSC) + if err != nil { + err = fmt.Errorf("[recreateStorageClass] unable to delete a storage class %s: %s", oldSC.Name, err.Error()) + return err + } + + err = cl.Create(ctx, newSC) + if err != nil { + err = fmt.Errorf("[recreateStorageClass] unable to create a storage class %s: %s", newSC.Name, err.Error()) + return err + } + + return nil +} + +func deleteStorageClass(ctx context.Context, cl client.Client, sc *v1.StorageClass) error { + if sc.Provisioner != LocalStorageClassProvisioner { + return fmt.Errorf("a storage class %s does not belong to %s provisioner", sc.Name, LocalStorageClassProvisioner) + } + + _, err := removeFinalizerIfExists(ctx, cl, sc, LocalStorageClassFinalizerName) + if err != nil { + return err + } + + err = cl.Delete(ctx, sc) + if err != nil { + return err + } + + return nil +} + +func removeFinalizerIfExists(ctx context.Context, cl client.Client, obj metav1.Object, finalizerName string) (bool, error) { + removed := false + finalizers := obj.GetFinalizers() + for i, f := range finalizers { + if f == finalizerName { + finalizers = append(finalizers[:i], finalizers[i+1:]...) + removed = true + break + } + } + + if removed { + obj.SetFinalizers(finalizers) + err := cl.Update(ctx, obj.(client.Object)) + if err != nil { + return false, err + } + } + + return removed, nil +} diff --git a/images/sds-local-volume-controller/pkg/controller/local_storage_class_watcher_test.go b/images/sds-local-volume-controller/pkg/controller/local_storage_class_watcher_test.go new file mode 100644 index 00000000..4a510703 --- /dev/null +++ b/images/sds-local-volume-controller/pkg/controller/local_storage_class_watcher_test.go @@ -0,0 +1,814 @@ +/* +Copyright 2023 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller_test + +import ( + "context" + v1alpha1 "sds-local-volume-controller/api/v1alpha1" + "sds-local-volume-controller/pkg/controller" + "sds-local-volume-controller/pkg/logger" + "slices" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/storage/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ = Describe(controller.LocalStorageClassCtrlName, func() { + const ( + controllerNamespace = "test-namespace" + nameForLocalStorageClass = "sds-local-volume-storage-class" + + existingThickLVG1Name = "test-thick-vg1" + existingThickLVG2Name = "test-thick-vg2" + newThickLVGName = "test-thick-vg3-new" + + existingThinLVG1Name = "test-thin-vg1" + existingThinLVG2Name = "test-thin-vg2" + newThinLVGName = "test-thin-vg3-new" + + nonExistentLVG1Name = "test-vg4-non-existent" + nonExistentLVG2Name = "test-vg5-non-existent" + ) + + var ( + ctx = context.Background() + cl = NewFakeClient() + log = logger.Logger{} + + reclaimPolicyDelete = string(corev1.PersistentVolumeReclaimDelete) + reclaimPolicyRetain = string(corev1.PersistentVolumeReclaimRetain) + + volumeBindingModeWFFC = string(v1.VolumeBindingWaitForFirstConsumer) + volumeBindingModeIM = string(v1.VolumeBindingImmediate) + + existingThickLVG1Template = generateLVMVolumeGroup(existingThickLVG1Name, []string{"dev-1111", "dev-2222"}, []string{}) + existingThickLVG2Template = generateLVMVolumeGroup(existingThickLVG2Name, []string{"dev-3333", "dev-4444"}, []string{}) + newThickLVGTemplate = generateLVMVolumeGroup(newThickLVGName, []string{"dev-5555", "dev-6666"}, []string{}) + + existingThinLVG1Template = generateLVMVolumeGroup(existingThinLVG1Name, []string{"dev-7777", "dev-8888"}, []string{"thin-pool-1", "thin-pool-2"}) + existingThinLVG2Template = generateLVMVolumeGroup(existingThinLVG2Name, []string{"dev-9999", "dev-1010"}, []string{"thin-pool-1", "thin-pool-2"}) + newThinLVGTemplate = generateLVMVolumeGroup(newThinLVGName, []string{"dev-1111", "dev-1212"}, []string{"thin-pool-1", "thin-pool-2"}) + ) + + It("Create_local_sc_with_existing_lvgs", func() { + lvgSpec := []v1alpha1.LocalStorageClassLVG{ + {Name: existingThickLVG1Name}, + {Name: existingThickLVG2Name}, + } + + err := cl.Create(ctx, existingThickLVG1Template) + Expect(err).NotTo(HaveOccurred()) + + err = cl.Create(ctx, existingThickLVG2Template) + Expect(err).NotTo(HaveOccurred()) + + lscTemplate := generateLocalStorageClass(nameForLocalStorageClass, reclaimPolicyDelete, volumeBindingModeWFFC, controller.LVMThickType, lvgSpec) + + err = cl.Create(ctx, lscTemplate) + Expect(err).NotTo(HaveOccurred()) + + lsc := &v1alpha1.LocalStorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, lsc) + Expect(err).NotTo(HaveOccurred()) + + Expect(lsc).NotTo(BeNil()) + Expect(lsc.Name).To(Equal(nameForLocalStorageClass)) + Expect(lsc.Finalizers).To(HaveLen(0)) + + scList := &v1.StorageClassList{} + err = cl.List(ctx, scList) + Expect(err).NotTo(HaveOccurred()) + + shouldRequeue, err := controller.RunEventReconcile(ctx, cl, log, scList, lsc) + Expect(err).NotTo(HaveOccurred()) + Expect(shouldRequeue).To(BeFalse()) + + err = cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, lsc) + Expect(err).NotTo(HaveOccurred()) + Expect(lsc.Finalizers).To(HaveLen(1)) + Expect(lsc.Finalizers).To(ContainElement(controller.LocalStorageClassFinalizerName)) + + sc := &v1.StorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, sc) + Expect(err).NotTo(HaveOccurred()) + performStandartChecksForSC(sc, lvgSpec, nameForLocalStorageClass, controller.LocalStorageClassLvmType, controller.LVMThickType, reclaimPolicyDelete, volumeBindingModeWFFC) + }) + + It("Update_local_sc_add_existing_lvg", func() { + lvgSpec := []v1alpha1.LocalStorageClassLVG{ + {Name: existingThickLVG1Name}, + {Name: existingThickLVG2Name}, + {Name: newThickLVGName}, + } + + err := cl.Create(ctx, newThickLVGTemplate) + Expect(err).NotTo(HaveOccurred()) + + lsc := &v1alpha1.LocalStorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, lsc) + Expect(err).NotTo(HaveOccurred()) + + lsc.Spec.LVM.LVMVolumeGroups = append(lsc.Spec.LVM.LVMVolumeGroups, v1alpha1.LocalStorageClassLVG{Name: newThickLVGName}) + + err = cl.Update(ctx, lsc) + Expect(err).NotTo(HaveOccurred()) + + scList := &v1.StorageClassList{} + err = cl.List(ctx, scList) + Expect(err).NotTo(HaveOccurred()) + + shouldRequeue, err := controller.RunEventReconcile(ctx, cl, log, scList, lsc) + Expect(err).NotTo(HaveOccurred()) + Expect(shouldRequeue).To(BeFalse()) + + err = cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, lsc) + Expect(err).NotTo(HaveOccurred()) + Expect(lsc.Finalizers).To(HaveLen(1)) + Expect(lsc.Finalizers).To(ContainElement(controller.LocalStorageClassFinalizerName)) + Expect(lsc.Spec.LVM.LVMVolumeGroups).To(Equal(lvgSpec)) + Expect(lsc.Status.Phase).To(Equal(controller.CreatedStatusPhase)) + + sc := &v1.StorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, sc) + Expect(err).NotTo(HaveOccurred()) + performStandartChecksForSC(sc, lvgSpec, nameForLocalStorageClass, controller.LocalStorageClassLvmType, controller.LVMThickType, reclaimPolicyDelete, volumeBindingModeWFFC) + }) + + It("Update_local_sc_remove_existing_lvg", func() { + lvgSpec := []v1alpha1.LocalStorageClassLVG{ + {Name: existingThickLVG1Name}, + {Name: existingThickLVG2Name}, + } + + lsc := &v1alpha1.LocalStorageClass{} + err := cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, lsc) + Expect(err).NotTo(HaveOccurred()) + + lsc.Spec.LVM.LVMVolumeGroups = delFromSlice(lsc.Spec.LVM.LVMVolumeGroups, newThickLVGName) + + err = cl.Update(ctx, lsc) + Expect(err).NotTo(HaveOccurred()) + + scList := &v1.StorageClassList{} + err = cl.List(ctx, scList) + Expect(err).NotTo(HaveOccurred()) + + shouldRequeue, err := controller.RunEventReconcile(ctx, cl, log, scList, lsc) + Expect(err).NotTo(HaveOccurred()) + Expect(shouldRequeue).To(BeFalse()) + + err = cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, lsc) + Expect(err).NotTo(HaveOccurred()) + Expect(lsc.Finalizers).To(HaveLen(1)) + Expect(lsc.Finalizers).To(ContainElement(controller.LocalStorageClassFinalizerName)) + Expect(lsc.Spec.LVM.LVMVolumeGroups).To(Equal(lvgSpec)) + Expect(lsc.Status.Phase).To(Equal(controller.CreatedStatusPhase)) + + sc := &v1.StorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, sc) + Expect(err).NotTo(HaveOccurred()) + performStandartChecksForSC(sc, lvgSpec, nameForLocalStorageClass, controller.LocalStorageClassLvmType, controller.LVMThickType, reclaimPolicyDelete, volumeBindingModeWFFC) + }) + + It("Update_local_sc_add_non_existing_lvg", func() { + lvgSpecOld := []v1alpha1.LocalStorageClassLVG{ + {Name: existingThickLVG1Name}, + {Name: existingThickLVG2Name}, + } + + lvgSpec := []v1alpha1.LocalStorageClassLVG{ + {Name: existingThickLVG1Name}, + {Name: existingThickLVG2Name}, + {Name: nonExistentLVG1Name}, + } + + lsc := &v1alpha1.LocalStorageClass{} + err := cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, lsc) + Expect(err).NotTo(HaveOccurred()) + + lsc.Spec.LVM.LVMVolumeGroups = append(lsc.Spec.LVM.LVMVolumeGroups, v1alpha1.LocalStorageClassLVG{Name: nonExistentLVG1Name}) + + err = cl.Update(ctx, lsc) + Expect(err).NotTo(HaveOccurred()) + + scList := &v1.StorageClassList{} + err = cl.List(ctx, scList) + Expect(err).NotTo(HaveOccurred()) + + shouldRequeue, err := controller.RunEventReconcile(ctx, cl, log, scList, lsc) + Expect(err).To(HaveOccurred()) + Expect(shouldRequeue).To(BeTrue()) + + err = cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, lsc) + Expect(err).NotTo(HaveOccurred()) + Expect(lsc.Finalizers).To(HaveLen(1)) + Expect(lsc.Finalizers).To(ContainElement(controller.LocalStorageClassFinalizerName)) + Expect(lsc.Spec.LVM.LVMVolumeGroups).To(Equal(lvgSpec)) + Expect(lsc.Status.Phase).To(Equal(controller.FailedStatusPhase)) + + sc := &v1.StorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, sc) + Expect(err).NotTo(HaveOccurred()) + performStandartChecksForSC(sc, lvgSpecOld, nameForLocalStorageClass, controller.LocalStorageClassLvmType, controller.LVMThickType, reclaimPolicyDelete, volumeBindingModeWFFC) + }) + + It("Remove_local_sc_with_non_existing_lvg", func() { + lvgSpec := []v1alpha1.LocalStorageClassLVG{ + {Name: existingThickLVG1Name}, + {Name: existingThickLVG2Name}, + {Name: nonExistentLVG1Name}, + } + + lsc := &v1alpha1.LocalStorageClass{} + err := cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, lsc) + Expect(err).NotTo(HaveOccurred()) + Expect(lsc.Spec.LVM.LVMVolumeGroups).To(Equal(lvgSpec)) + + err = cl.Delete(ctx, lsc) + Expect(err).NotTo(HaveOccurred()) + + lsc = &v1alpha1.LocalStorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, lsc) + Expect(err).NotTo(HaveOccurred()) + Expect(lsc.Finalizers).To(HaveLen(1)) + + scList := &v1.StorageClassList{} + err = cl.List(ctx, scList) + Expect(err).NotTo(HaveOccurred()) + Expect(scList.Items).To(HaveLen(1)) + + shouldRequeue, err := controller.RunEventReconcile(ctx, cl, log, scList, lsc) + Expect(err).NotTo(HaveOccurred()) + Expect(shouldRequeue).To(BeFalse()) + + err = cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, lsc) + Expect(k8serrors.IsNotFound(err)).To(BeTrue()) + + sc := &v1.StorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, sc) + Expect(k8serrors.IsNotFound(err)).To(BeTrue()) + }) + + It("Create_local_sc_with_non_existing_lvgs", func() { + lvgSpec := []v1alpha1.LocalStorageClassLVG{ + {Name: nonExistentLVG1Name}, + {Name: nonExistentLVG2Name}, + } + + lscTemplate := generateLocalStorageClass(nameForLocalStorageClass, reclaimPolicyDelete, volumeBindingModeWFFC, controller.LVMThickType, lvgSpec) + + err := cl.Create(ctx, lscTemplate) + Expect(err).NotTo(HaveOccurred()) + + lsc := &v1alpha1.LocalStorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, lsc) + Expect(err).NotTo(HaveOccurred()) + + scList := &v1.StorageClassList{} + err = cl.List(ctx, scList) + Expect(err).NotTo(HaveOccurred()) + Expect(scList.Items).To(HaveLen(0)) + + shouldRequeue, err := controller.RunEventReconcile(ctx, cl, log, scList, lsc) + Expect(err).To(HaveOccurred()) + Expect(shouldRequeue).To(BeTrue()) + + err = cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, lsc) + Expect(err).NotTo(HaveOccurred()) + Expect(lsc.Finalizers).To(HaveLen(1)) + Expect(lsc.Finalizers).To(ContainElement(controller.LocalStorageClassFinalizerName)) + Expect(lsc.Status.Phase).To(Equal(controller.FailedStatusPhase)) + + sc := &v1.StorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, sc) + Expect(k8serrors.IsNotFound(err)).To(BeTrue()) + + }) + + It("Update_local_sc_with_all_existing_lvgs", func() { + lvgSpec := []v1alpha1.LocalStorageClassLVG{ + {Name: existingThickLVG1Name}, + {Name: existingThickLVG2Name}, + } + + lsc := &v1alpha1.LocalStorageClass{} + err := cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, lsc) + Expect(err).NotTo(HaveOccurred()) + + lsc.Spec.LVM.LVMVolumeGroups = lvgSpec + + err = cl.Update(ctx, lsc) + Expect(err).NotTo(HaveOccurred()) + + scList := &v1.StorageClassList{} + err = cl.List(ctx, scList) + Expect(err).NotTo(HaveOccurred()) + Expect(scList.Items).To(HaveLen(0)) + + shouldRequeue, err := controller.RunEventReconcile(ctx, cl, log, scList, lsc) + Expect(err).NotTo(HaveOccurred()) + Expect(shouldRequeue).To(BeFalse()) + + err = cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, lsc) + Expect(err).NotTo(HaveOccurred()) + Expect(lsc.Finalizers).To(HaveLen(1)) + Expect(lsc.Finalizers).To(ContainElement(controller.LocalStorageClassFinalizerName)) + Expect(lsc.Spec.LVM.LVMVolumeGroups).To(Equal(lvgSpec)) + Expect(lsc.Status.Phase).To(Equal(controller.CreatedStatusPhase)) + + sc := &v1.StorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, sc) + Expect(err).NotTo(HaveOccurred()) + performStandartChecksForSC(sc, lvgSpec, nameForLocalStorageClass, controller.LocalStorageClassLvmType, controller.LVMThickType, reclaimPolicyDelete, volumeBindingModeWFFC) + }) + + It("Remove_local_sc_with_existing_lvgs", func() { + lvgSpec := []v1alpha1.LocalStorageClassLVG{ + {Name: existingThickLVG1Name}, + {Name: existingThickLVG2Name}, + } + + lsc := &v1alpha1.LocalStorageClass{} + err := cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, lsc) + Expect(err).NotTo(HaveOccurred()) + + err = cl.Delete(ctx, lsc) + Expect(err).NotTo(HaveOccurred()) + + lsc = &v1alpha1.LocalStorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, lsc) + Expect(err).NotTo(HaveOccurred()) + Expect(lsc.Finalizers).To(HaveLen(1)) + Expect(lsc.Finalizers).To(ContainElement(controller.LocalStorageClassFinalizerName)) + Expect(lsc.Spec.LVM.LVMVolumeGroups).To(Equal(lvgSpec)) + + scList := &v1.StorageClassList{} + err = cl.List(ctx, scList) + Expect(err).NotTo(HaveOccurred()) + Expect(scList.Items).To(HaveLen(1)) + + shouldRequeue, err := controller.RunEventReconcile(ctx, cl, log, scList, lsc) + Expect(err).NotTo(HaveOccurred()) + Expect(shouldRequeue).To(BeFalse()) + + err = cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, lsc) + Expect(k8serrors.IsNotFound(err)).To(BeTrue()) + + sc := &v1.StorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, sc) + Expect(k8serrors.IsNotFound(err)).To(BeTrue()) + }) + + It("Create_local_sc_when_sc_with_another_provisioner_exists", func() { + sc := &v1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: nameForLocalStorageClass, + }, + Provisioner: "test-provisioner", + } + + err := cl.Create(ctx, sc) + Expect(err).NotTo(HaveOccurred()) + + lvgSpec := []v1alpha1.LocalStorageClassLVG{ + {Name: existingThickLVG1Name}, + {Name: existingThickLVG2Name}, + } + + lscTemplate := generateLocalStorageClass(nameForLocalStorageClass, reclaimPolicyDelete, volumeBindingModeWFFC, controller.LVMThickType, lvgSpec) + + err = cl.Create(ctx, lscTemplate) + Expect(err).NotTo(HaveOccurred()) + + lsc := &v1alpha1.LocalStorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, lsc) + Expect(err).NotTo(HaveOccurred()) + + scList := &v1.StorageClassList{} + err = cl.List(ctx, scList) + Expect(err).NotTo(HaveOccurred()) + Expect(scList.Items).To(HaveLen(1)) + + shouldRequeue, err := controller.RunEventReconcile(ctx, cl, log, scList, lsc) + Expect(err).To(HaveOccurred()) + Expect(shouldRequeue).To(BeTrue()) + + err = cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, lsc) + Expect(err).NotTo(HaveOccurred()) + Expect(lsc.Finalizers).To(HaveLen(1)) + Expect(lsc.Finalizers).To(ContainElement(controller.LocalStorageClassFinalizerName)) + Expect(lsc.Status.Phase).To(Equal(controller.FailedStatusPhase)) + + sc = &v1.StorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, sc) + Expect(err).NotTo(HaveOccurred()) + Expect(sc.Provisioner).To(Equal("test-provisioner")) + Expect(sc.Finalizers).To(HaveLen(0)) + }) + + It("Update_local_sc_add_existing_vg_when_sc_with_another_provisioner_exists", func() { + lvgSpec := []v1alpha1.LocalStorageClassLVG{ + {Name: existingThickLVG1Name}, + {Name: existingThickLVG2Name}, + {Name: newThickLVGName}, + } + + lsc := &v1alpha1.LocalStorageClass{} + err := cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, lsc) + Expect(err).NotTo(HaveOccurred()) + + lsc.Spec.LVM.LVMVolumeGroups = append(lsc.Spec.LVM.LVMVolumeGroups, v1alpha1.LocalStorageClassLVG{Name: newThickLVGName}) + + err = cl.Update(ctx, lsc) + Expect(err).NotTo(HaveOccurred()) + + scList := &v1.StorageClassList{} + err = cl.List(ctx, scList) + Expect(err).NotTo(HaveOccurred()) + Expect(scList.Items).To(HaveLen(1)) + + shouldRequeue, err := controller.RunEventReconcile(ctx, cl, log, scList, lsc) + Expect(err).To(HaveOccurred()) + Expect(shouldRequeue).To(BeTrue()) + + err = cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, lsc) + Expect(err).NotTo(HaveOccurred()) + Expect(lsc.Finalizers).To(HaveLen(1)) + Expect(lsc.Finalizers).To(ContainElement(controller.LocalStorageClassFinalizerName)) + Expect(lsc.Spec.LVM.LVMVolumeGroups).To(Equal(lvgSpec)) + Expect(lsc.Status.Phase).To(Equal(controller.FailedStatusPhase)) + + sc := &v1.StorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, sc) + Expect(err).NotTo(HaveOccurred()) + Expect(sc.Provisioner).To(Equal("test-provisioner")) + Expect(sc.Finalizers).To(HaveLen(0)) + }) + + It("Remove_local_sc_with_existing_vgs_when_sc_with_another_provisioner_exists", func() { + lvgSpec := []v1alpha1.LocalStorageClassLVG{ + {Name: existingThickLVG1Name}, + {Name: existingThickLVG2Name}, + {Name: newThickLVGName}, + } + + lsc := &v1alpha1.LocalStorageClass{} + err := cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, lsc) + Expect(err).NotTo(HaveOccurred()) + + err = cl.Delete(ctx, lsc) + Expect(err).NotTo(HaveOccurred()) + + lsc = &v1alpha1.LocalStorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, lsc) + Expect(err).NotTo(HaveOccurred()) + Expect(lsc.Finalizers).To(HaveLen(1)) + Expect(lsc.Finalizers).To(ContainElement(controller.LocalStorageClassFinalizerName)) + Expect(lsc.Spec.LVM.LVMVolumeGroups).To(Equal(lvgSpec)) + + scList := &v1.StorageClassList{} + err = cl.List(ctx, scList) + Expect(err).NotTo(HaveOccurred()) + Expect(scList.Items).To(HaveLen(1)) + + shouldRequeue, err := controller.RunEventReconcile(ctx, cl, log, scList, lsc) + Expect(err).NotTo(HaveOccurred()) + Expect(shouldRequeue).To(BeFalse()) + + err = cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, lsc) + Expect(k8serrors.IsNotFound(err)).To(BeTrue()) + + sc := &v1.StorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, sc) + Expect(err).NotTo(HaveOccurred()) + Expect(sc.Provisioner).To(Equal("test-provisioner")) + Expect(sc.Finalizers).To(HaveLen(0)) + + err = cl.Delete(ctx, sc) + Expect(err).NotTo(HaveOccurred()) + + sc = &v1.StorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, sc) + Expect(k8serrors.IsNotFound(err)).To(BeTrue()) + }) + + It("Create_local_thin_sc_with_existing_thin_lvgs", func() { + lvgSpec := []v1alpha1.LocalStorageClassLVG{ + {Name: existingThinLVG1Name, Thin: &v1alpha1.LocalStorageClassThinPool{PoolName: "thin-pool-1"}}, + {Name: existingThinLVG2Name, Thin: &v1alpha1.LocalStorageClassThinPool{PoolName: "thin-pool-2"}}, + } + + err := cl.Create(ctx, existingThinLVG1Template) + Expect(err).NotTo(HaveOccurred()) + + err = cl.Create(ctx, existingThinLVG2Template) + Expect(err).NotTo(HaveOccurred()) + + lscTemplate := generateLocalStorageClass(nameForLocalStorageClass, reclaimPolicyRetain, volumeBindingModeIM, controller.LVMThinType, lvgSpec) + + err = cl.Create(ctx, lscTemplate) + Expect(err).NotTo(HaveOccurred()) + + lsc := &v1alpha1.LocalStorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, lsc) + Expect(err).NotTo(HaveOccurred()) + + Expect(lsc).NotTo(BeNil()) + Expect(lsc.Name).To(Equal(nameForLocalStorageClass)) + Expect(lsc.Finalizers).To(HaveLen(0)) + + scList := &v1.StorageClassList{} + err = cl.List(ctx, scList) + Expect(err).NotTo(HaveOccurred()) + + shouldRequeue, err := controller.RunEventReconcile(ctx, cl, log, scList, lsc) + Expect(err).NotTo(HaveOccurred()) + Expect(shouldRequeue).To(BeFalse()) + + err = cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, lsc) + Expect(err).NotTo(HaveOccurred()) + Expect(lsc.Finalizers).To(HaveLen(1)) + Expect(lsc.Finalizers).To(ContainElement(controller.LocalStorageClassFinalizerName)) + + sc := &v1.StorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, sc) + Expect(err).NotTo(HaveOccurred()) + performStandartChecksForSC(sc, lvgSpec, nameForLocalStorageClass, controller.LocalStorageClassLvmType, controller.LVMThinType, reclaimPolicyRetain, volumeBindingModeIM) + }) + + It("Update_local_thin_sc_add_existing_thin_lvg", func() { + lvgSpec := []v1alpha1.LocalStorageClassLVG{ + {Name: existingThinLVG1Name, Thin: &v1alpha1.LocalStorageClassThinPool{PoolName: "thin-pool-1"}}, + {Name: existingThinLVG2Name, Thin: &v1alpha1.LocalStorageClassThinPool{PoolName: "thin-pool-2"}}, + {Name: newThinLVGName, Thin: &v1alpha1.LocalStorageClassThinPool{PoolName: "thin-pool-1"}}, + } + + err := cl.Create(ctx, newThinLVGTemplate) + Expect(err).NotTo(HaveOccurred()) + + lsc := &v1alpha1.LocalStorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, lsc) + Expect(err).NotTo(HaveOccurred()) + + lsc.Spec.LVM.LVMVolumeGroups = append(lsc.Spec.LVM.LVMVolumeGroups, v1alpha1.LocalStorageClassLVG{Name: newThinLVGName, Thin: &v1alpha1.LocalStorageClassThinPool{PoolName: "thin-pool-1"}}) + + err = cl.Update(ctx, lsc) + Expect(err).NotTo(HaveOccurred()) + + scList := &v1.StorageClassList{} + err = cl.List(ctx, scList) + Expect(err).NotTo(HaveOccurred()) + + shouldRequeue, err := controller.RunEventReconcile(ctx, cl, log, scList, lsc) + Expect(err).NotTo(HaveOccurred()) + Expect(shouldRequeue).To(BeFalse()) + + err = cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, lsc) + Expect(err).NotTo(HaveOccurred()) + Expect(lsc.Finalizers).To(HaveLen(1)) + Expect(lsc.Finalizers).To(ContainElement(controller.LocalStorageClassFinalizerName)) + Expect(lsc.Spec.LVM.LVMVolumeGroups).To(Equal(lvgSpec)) + Expect(lsc.Status.Phase).To(Equal(controller.CreatedStatusPhase)) + + sc := &v1.StorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, sc) + Expect(err).NotTo(HaveOccurred()) + performStandartChecksForSC(sc, lvgSpec, nameForLocalStorageClass, controller.LocalStorageClassLvmType, controller.LVMThinType, reclaimPolicyRetain, volumeBindingModeIM) + }) + + It("Update_local_thin_sc_remove_existing_thin_lvg", func() { + lvgSpec := []v1alpha1.LocalStorageClassLVG{ + {Name: existingThinLVG1Name, Thin: &v1alpha1.LocalStorageClassThinPool{PoolName: "thin-pool-1"}}, + {Name: existingThinLVG2Name, Thin: &v1alpha1.LocalStorageClassThinPool{PoolName: "thin-pool-2"}}, + } + + lsc := &v1alpha1.LocalStorageClass{} + err := cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, lsc) + Expect(err).NotTo(HaveOccurred()) + + lsc.Spec.LVM.LVMVolumeGroups = delFromSlice(lsc.Spec.LVM.LVMVolumeGroups, newThinLVGName) + + err = cl.Update(ctx, lsc) + Expect(err).NotTo(HaveOccurred()) + + scList := &v1.StorageClassList{} + err = cl.List(ctx, scList) + Expect(err).NotTo(HaveOccurred()) + + shouldRequeue, err := controller.RunEventReconcile(ctx, cl, log, scList, lsc) + Expect(err).NotTo(HaveOccurred()) + Expect(shouldRequeue).To(BeFalse()) + + err = cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, lsc) + Expect(err).NotTo(HaveOccurred()) + Expect(lsc.Finalizers).To(HaveLen(1)) + Expect(lsc.Finalizers).To(ContainElement(controller.LocalStorageClassFinalizerName)) + Expect(lsc.Spec.LVM.LVMVolumeGroups).To(Equal(lvgSpec)) + Expect(lsc.Status.Phase).To(Equal(controller.CreatedStatusPhase)) + + sc := &v1.StorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, sc) + Expect(err).NotTo(HaveOccurred()) + performStandartChecksForSC(sc, lvgSpec, nameForLocalStorageClass, controller.LocalStorageClassLvmType, controller.LVMThinType, reclaimPolicyRetain, volumeBindingModeIM) + }) + + It("Update_local_thin_sc_add_existing_thick_lvg", func() { + lvgSpecOld := []v1alpha1.LocalStorageClassLVG{ + {Name: existingThinLVG1Name, Thin: &v1alpha1.LocalStorageClassThinPool{PoolName: "thin-pool-1"}}, + {Name: existingThinLVG2Name, Thin: &v1alpha1.LocalStorageClassThinPool{PoolName: "thin-pool-2"}}, + } + + lvgSpec := []v1alpha1.LocalStorageClassLVG{ + {Name: existingThinLVG1Name, Thin: &v1alpha1.LocalStorageClassThinPool{PoolName: "thin-pool-1"}}, + {Name: existingThinLVG2Name, Thin: &v1alpha1.LocalStorageClassThinPool{PoolName: "thin-pool-2"}}, + {Name: existingThickLVG1Name}, + } + + lsc := &v1alpha1.LocalStorageClass{} + err := cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, lsc) + Expect(err).NotTo(HaveOccurred()) + + lsc.Spec.LVM.LVMVolumeGroups = append(lsc.Spec.LVM.LVMVolumeGroups, v1alpha1.LocalStorageClassLVG{Name: existingThickLVG1Name}) + + err = cl.Update(ctx, lsc) + Expect(err).NotTo(HaveOccurred()) + + scList := &v1.StorageClassList{} + err = cl.List(ctx, scList) + Expect(err).NotTo(HaveOccurred()) + + shouldRequeue, err := controller.RunEventReconcile(ctx, cl, log, scList, lsc) + Expect(err).To(HaveOccurred()) + Expect(shouldRequeue).To(BeTrue()) + + err = cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, lsc) + Expect(err).NotTo(HaveOccurred()) + Expect(lsc.Finalizers).To(HaveLen(1)) + Expect(lsc.Finalizers).To(ContainElement(controller.LocalStorageClassFinalizerName)) + Expect(lsc.Spec.LVM.LVMVolumeGroups).To(Equal(lvgSpec)) + Expect(lsc.Status.Phase).To(Equal(controller.FailedStatusPhase)) + + sc := &v1.StorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, sc) + Expect(err).NotTo(HaveOccurred()) + performStandartChecksForSC(sc, lvgSpecOld, nameForLocalStorageClass, controller.LocalStorageClassLvmType, controller.LVMThinType, reclaimPolicyRetain, volumeBindingModeIM) + }) + + It("Remove_local_thin_sc_with_existing_thick_lvg", func() { + lvgSpec := []v1alpha1.LocalStorageClassLVG{ + {Name: existingThinLVG1Name, Thin: &v1alpha1.LocalStorageClassThinPool{PoolName: "thin-pool-1"}}, + {Name: existingThinLVG2Name, Thin: &v1alpha1.LocalStorageClassThinPool{PoolName: "thin-pool-2"}}, + {Name: existingThickLVG1Name}, + } + + lsc := &v1alpha1.LocalStorageClass{} + err := cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, lsc) + Expect(err).NotTo(HaveOccurred()) + + err = cl.Delete(ctx, lsc) + Expect(err).NotTo(HaveOccurred()) + + lsc = &v1alpha1.LocalStorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, lsc) + Expect(err).NotTo(HaveOccurred()) + Expect(lsc.Finalizers).To(HaveLen(1)) + Expect(lsc.Finalizers).To(ContainElement(controller.LocalStorageClassFinalizerName)) + Expect(lsc.Spec.LVM.LVMVolumeGroups).To(Equal(lvgSpec)) + + scList := &v1.StorageClassList{} + err = cl.List(ctx, scList) + Expect(err).NotTo(HaveOccurred()) + Expect(scList.Items).To(HaveLen(1)) + + shouldRequeue, err := controller.RunEventReconcile(ctx, cl, log, scList, lsc) + Expect(err).NotTo(HaveOccurred()) + Expect(shouldRequeue).To(BeFalse()) + + err = cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, lsc) + Expect(k8serrors.IsNotFound(err)).To(BeTrue()) + + sc := &v1.StorageClass{} + err = cl.Get(ctx, client.ObjectKey{Name: nameForLocalStorageClass}, sc) + Expect(k8serrors.IsNotFound(err)).To(BeTrue()) + }) + +}) + +func generateLVMVolumeGroup(name string, devices, thinPoolNames []string) *v1alpha1.LvmVolumeGroup { + lvmType := controller.LVMThickType + + if len(thinPoolNames) > 0 { + lvmType = controller.LVMThinType + } + + thinPoolsSpec := make([]v1alpha1.SpecThinPool, 0) + thinPoolsStatus := make([]v1alpha1.StatusThinPool, 0) + for i := 0; i < len(thinPoolNames); i++ { + thinPoolsSpec = append(thinPoolsSpec, v1alpha1.SpecThinPool{ + Name: thinPoolNames[i], + Size: resource.MustParse("10Gi"), + }) + thinPoolsStatus = append(thinPoolsStatus, v1alpha1.StatusThinPool{ + Name: thinPoolNames[i], + ActualSize: resource.MustParse("10Gi"), + UsedSize: resource.MustParse("0Gi"), + }) + } + + return &v1alpha1.LvmVolumeGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: v1alpha1.LvmVolumeGroupSpec{ + ActualVGNameOnTheNode: "vg1", + BlockDeviceNames: devices, + ThinPools: thinPoolsSpec, + Type: lvmType, + }, + Status: v1alpha1.LvmVolumeGroupStatus{ + ThinPools: thinPoolsStatus, + }, + } +} + +func generateLocalStorageClass(lscName, reclaimPolicy, volumeBindingMode, lvmType string, lvgs []v1alpha1.LocalStorageClassLVG) *v1alpha1.LocalStorageClass { + + return &v1alpha1.LocalStorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: lscName, + }, + Spec: v1alpha1.LocalStorageClassSpec{ + ReclaimPolicy: reclaimPolicy, + VolumeBindingMode: volumeBindingMode, + LVM: &v1alpha1.LocalStorageClassLVM{ + Type: lvmType, + LVMVolumeGroups: lvgs, + }, + }, + } + +} + +func performStandartChecksForSC(sc *v1.StorageClass, lvgSpec []v1alpha1.LocalStorageClassLVG, nameForLocalStorageClass, LSCType, LVMType, reclaimPolicy, volumeBindingMode string) { + expectString := "" + for i, lvg := range lvgSpec { + if i != 0 { + expectString += "\n" + } + if lvg.Thin != nil { + expectString += "- name: " + lvg.Name + "\n thin:\n poolName: " + lvg.Thin.PoolName + } else { + expectString += "- name: " + lvg.Name + } + } + expectString += "\n" + + Expect(sc).NotTo(BeNil()) + Expect(sc.Name).To(Equal(nameForLocalStorageClass)) + Expect(sc.Finalizers).To(HaveLen(1)) + Expect(sc.Finalizers).To(ContainElement(controller.LocalStorageClassFinalizerName)) + + Expect(sc.Parameters).To(HaveLen(4)) + Expect(sc.Parameters).To(HaveKeyWithValue(controller.TypeParamKey, LSCType)) + Expect(sc.Parameters).To(HaveKeyWithValue(controller.LVMTypeParamKey, LVMType)) + Expect(sc.Parameters).To(HaveKeyWithValue(controller.LVMVolumeBindingModeParamKey, volumeBindingMode)) + Expect(sc.Parameters).To(HaveKey(controller.LVMVolumeGroupsParamKey)) + Expect(sc.Parameters[controller.LVMVolumeGroupsParamKey]).To(Equal(expectString)) + + Expect(sc.Provisioner).To(Equal(controller.LocalStorageClassProvisioner)) + Expect(string(*sc.ReclaimPolicy)).To(Equal(reclaimPolicy)) + Expect(string(*sc.VolumeBindingMode)).To(Equal(volumeBindingMode)) + Expect(*sc.AllowVolumeExpansion).To(BeTrue()) + +} + +func delFromSlice(slice []v1alpha1.LocalStorageClassLVG, name string) []v1alpha1.LocalStorageClassLVG { + for i, lvg := range slice { + if lvg.Name == name { + // return append(slice[:i], slice[i+1:]...) + return slices.Delete(slice, i, i+1) + } + } + return slice +} diff --git a/images/sds-local-volume-controller/pkg/monitoring/monitoring.go b/images/sds-local-volume-controller/pkg/monitoring/monitoring.go deleted file mode 100644 index a282d9fd..00000000 --- a/images/sds-local-volume-controller/pkg/monitoring/monitoring.go +++ /dev/null @@ -1,150 +0,0 @@ -/* -Copyright 2024 Flant JSC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package monitoring - -import ( - "github.com/prometheus/client_golang/prometheus" - "k8s.io/utils/clock" - "sigs.k8s.io/controller-runtime/pkg/metrics" - "strings" - "time" -) - -const ( - namespace = "sds_node_configurator" -) - -var ( - reconcilesCountTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Name: "reconciles_count_total", - Help: "Total number of times the resources were reconciled.", - }, []string{"node", "controller"}) - - reconcileDuration = prometheus.NewSummaryVec(prometheus.SummaryOpts{ - Namespace: namespace, - Name: "reconcile_duration_seconds", - Help: "How long in seconds reconciling of resource takes.", - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, - }, []string{"node", "controller"}) - - utilsCommandsDuration = prometheus.NewSummaryVec(prometheus.SummaryOpts{ - Namespace: namespace, - Name: "custom_utils_commands_duration_seconds", - Help: "How long in seconds utils commands execution takes.", - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, - }, []string{"node", "controller", "command"}) - - utilsCommandsExecutionCount = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Name: "utils_commands_execution_count_total", - Help: "Total number of times the util-command was executed.", - }, []string{"node", "controller", "method"}) - - utilsCommandsErrorsCount = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Name: "utils_commands_errors_count_total", - Help: "How many errors occurs during utils-command executions.", - }, []string{"node", "controller", "method"}) - - apiMethodsDuration = prometheus.NewSummaryVec(prometheus.SummaryOpts{ - Namespace: namespace, - Name: "api_commands_duration_seconds", - Help: "How long in seconds kube-api methods execution takes.", - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, - }, []string{"node", "controller", "method"}) - - apiMethodsExecutionCount = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Name: "api_methods_execution_count_total", - Help: "Total number of times the method was executed.", - }, []string{"node", "controller", "method"}) - - apiMethodsErrorsCount = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Name: "api_methods_errors_count_total", - Help: "How many errors occur during api-method executions.", - }, []string{"node", "controller", "method"}) - - noOperationalResourcesCount = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "no_operational_resources_count_total", - Help: "How many LVMVolumeGroup resources are in Nooperational state.", - }, []string{"resource"}) -) - -func init() { - metrics.Registry.MustRegister(reconcilesCountTotal) - metrics.Registry.MustRegister(reconcileDuration) - metrics.Registry.MustRegister(utilsCommandsDuration) - metrics.Registry.MustRegister(apiMethodsDuration) - metrics.Registry.MustRegister(apiMethodsExecutionCount) - metrics.Registry.MustRegister(apiMethodsErrorsCount) - metrics.Registry.MustRegister(noOperationalResourcesCount) -} - -type Metrics struct { - node string - c clock.Clock -} - -func GetMetrics(nodeName string) Metrics { - return Metrics{ - node: nodeName, - c: clock.RealClock{}, - } -} - -func (m Metrics) GetEstimatedTimeInSeconds(since time.Time) float64 { - return m.c.Since(since).Seconds() -} - -func (m Metrics) ReconcilesCountTotal(controllerName string) prometheus.Counter { - return reconcilesCountTotal.WithLabelValues(m.node, controllerName) -} - -func (m Metrics) ReconcileDuration(controllerName string) prometheus.Observer { - return reconcileDuration.WithLabelValues(m.node, controllerName) -} - -func (m Metrics) UtilsCommandsDuration(controllerName, command string) prometheus.Observer { - return utilsCommandsDuration.WithLabelValues(m.node, controllerName, strings.ToLower(command)) -} - -func (m Metrics) UtilsCommandsExecutionCount(controllerName, command string) prometheus.Counter { - return utilsCommandsExecutionCount.WithLabelValues(m.node, controllerName, strings.ToLower(command)) -} - -func (m Metrics) UtilsCommandsErrorsCount(controllerName, command string) prometheus.Counter { - return utilsCommandsErrorsCount.WithLabelValues(m.node, controllerName, strings.ToLower(command)) -} - -func (m Metrics) ApiMethodsDuration(controllerName, method string) prometheus.Observer { - return apiMethodsDuration.WithLabelValues(m.node, controllerName, strings.ToLower(method)) -} - -func (m Metrics) ApiMethodsExecutionCount(controllerName, method string) prometheus.Counter { - return apiMethodsExecutionCount.WithLabelValues(m.node, controllerName, strings.ToLower(method)) -} - -func (m Metrics) ApiMethodsErrors(controllerName, method string) prometheus.Counter { - return apiMethodsErrorsCount.WithLabelValues(m.node, controllerName, strings.ToLower(method)) -} - -func (m Metrics) NoOperationalResourcesCount(resourceName string) prometheus.Gauge { - return noOperationalResourcesCount.WithLabelValues(strings.ToLower(resourceName)) -} diff --git a/openapi/doc-ru-config-values.yaml b/openapi/doc-ru-config-values.yaml index c9019095..64b43efc 100644 --- a/openapi/doc-ru-config-values.yaml +++ b/openapi/doc-ru-config-values.yaml @@ -2,3 +2,13 @@ type: object properties: logLevel: description: Уровень логирования модуля. + dataNodes: + description: Настройки локальных томов csi на узлах с данными + properties: + nodeSelector: + description: | + То же, что и в параметре `spec.nodeSelector` подов в Kubernetes. + + Если параметр опущен, локальный том csi будет размещен на всех узлах. + + **Внимание!** Изменение этого параметра не приводит к перераспределению данных. Если узел с данными больше не соответствует «nodeSelector», данные на этом узле станут недоступными. \ No newline at end of file