From 2a1f05f89be88a465118883b1038a7cb075a4d25 Mon Sep 17 00:00:00 2001 From: Ethan Mosbaugh Date: Tue, 28 Jan 2025 14:40:31 -0800 Subject: [PATCH] feat(v2): upgrades without manager (#1755) * feat(v2): upgrades without manager * f * f * f * conditionally migrate --- kinds/apis/v1beta1/installation_types.go | 1 - .../controllers/installation_controller.go | 5 - operator/pkg/cli/migrate_v2.go | 42 +- operator/pkg/cli/migrate_v2_installmanager.go | 87 ---- operator/pkg/cli/migrate_v2_pod.go | 336 -------------- operator/pkg/cli/migrate_v2_pod_test.go | 316 -------------- operator/pkg/cli/migratev2/adminconsole.go | 146 ------- .../pkg/cli/migratev2/adminconsole_test.go | 370 ---------------- operator/pkg/cli/migratev2/installation.go | 119 ++--- .../pkg/cli/migratev2/installation_test.go | 319 ++++++++------ operator/pkg/cli/migratev2/k0s.go | 122 ++++++ operator/pkg/cli/migratev2/k0s_test.go | 111 +++++ operator/pkg/cli/migratev2/manager.go | 194 --------- operator/pkg/cli/migratev2/manager_test.go | 409 ------------------ operator/pkg/cli/migratev2/managerpod.go | 198 --------- operator/pkg/cli/migratev2/migrate.go | 65 +-- operator/pkg/cli/migratev2/operator.go | 149 +++---- operator/pkg/cli/migratev2/operator_test.go | 237 ++++------ operator/pkg/cli/upgrade.go | 24 - operator/pkg/cli/upgrade_job.go | 47 +- operator/pkg/upgrade/job.go | 10 - 21 files changed, 640 insertions(+), 2667 deletions(-) delete mode 100644 operator/pkg/cli/migrate_v2_installmanager.go delete mode 100644 operator/pkg/cli/migrate_v2_pod.go delete mode 100644 operator/pkg/cli/migrate_v2_pod_test.go delete mode 100644 operator/pkg/cli/migratev2/adminconsole.go delete mode 100644 operator/pkg/cli/migratev2/adminconsole_test.go create mode 100644 operator/pkg/cli/migratev2/k0s.go create mode 100644 operator/pkg/cli/migratev2/k0s_test.go delete mode 100644 operator/pkg/cli/migratev2/manager.go delete mode 100644 operator/pkg/cli/migratev2/manager_test.go delete mode 100644 operator/pkg/cli/migratev2/managerpod.go diff --git a/kinds/apis/v1beta1/installation_types.go b/kinds/apis/v1beta1/installation_types.go index 9402b1493..a7f2dcd6d 100644 --- a/kinds/apis/v1beta1/installation_types.go +++ b/kinds/apis/v1beta1/installation_types.go @@ -51,7 +51,6 @@ const ( const ( ConditionTypeV2MigrationInProgress = "V2MigrationInProgress" - ConditionTypeDisableReconcile = "DisableReconcile" ) // ConfigSecretEntryName holds the entry name we are looking for in the secret diff --git a/operator/controllers/installation_controller.go b/operator/controllers/installation_controller.go index 6f1507d10..14aa9cb6a 100644 --- a/operator/controllers/installation_controller.go +++ b/operator/controllers/installation_controller.go @@ -590,11 +590,6 @@ func (r *InstallationReconciler) Reconcile(ctx context.Context, req ctrl.Request return ctrl.Result{}, nil } - if k8sutil.CheckConditionStatus(in.Status, v1beta1.ConditionTypeDisableReconcile) == metav1.ConditionTrue { - log.Info("Installation reconciliation is disabled, reconciliation ended") - return ctrl.Result{}, nil - } - // if this installation points to a cluster configuration living on // a secret we need to fetch this configuration before moving on. // at this stage we bail out with an error if we can't fetch or diff --git a/operator/pkg/cli/migrate_v2.go b/operator/pkg/cli/migrate_v2.go index 18d8614a2..05d48ae70 100644 --- a/operator/pkg/cli/migrate_v2.go +++ b/operator/pkg/cli/migrate_v2.go @@ -2,21 +2,17 @@ package cli import ( "fmt" - "io" "log" ecv1beta1 "github.com/replicatedhq/embedded-cluster/kinds/apis/v1beta1" "github.com/replicatedhq/embedded-cluster/operator/pkg/cli/migratev2" "github.com/replicatedhq/embedded-cluster/operator/pkg/k8sutil" - "github.com/replicatedhq/embedded-cluster/pkg/helm" - "github.com/replicatedhq/embedded-cluster/pkg/manager" - "github.com/replicatedhq/embedded-cluster/pkg/runtimeconfig" "github.com/spf13/cobra" ) // MigrateV2Cmd returns a cobra command for migrating the installation from v1 to v2. func MigrateV2Cmd() *cobra.Command { - var installationFile, migrationSecret, appSlug, appVersionLabel string + var installationFile string var installation *ecv1beta1.Installation @@ -31,12 +27,6 @@ func MigrateV2Cmd() *cobra.Command { } installation = in - // set the runtime config from the installation spec - // NOTE: this is run in a pod so the data dir is not available - runtimeconfig.Set(installation.Spec.RuntimeConfig) - - manager.SetServiceName(appSlug) - return nil }, RunE: func(cmd *cobra.Command, args []string) error { @@ -47,16 +37,7 @@ func MigrateV2Cmd() *cobra.Command { return fmt.Errorf("failed to create kubernetes client: %w", err) } - helmCLI, err := helm.NewHelm(helm.HelmOptions{ - Writer: io.Discard, - LogFn: log.Printf, - RESTClientGetterFactory: k8sutil.RESTClientGetterFactory, - }) - if err != nil { - return fmt.Errorf("failed to create helm client: %w", err) - } - - err = migratev2.Run(ctx, log.Printf, cli, helmCLI, installation, migrationSecret, appSlug, appVersionLabel) + err = migratev2.Run(ctx, log.Printf, cli, installation) if err != nil { return fmt.Errorf("failed to run v2 migration: %w", err) } @@ -70,25 +51,6 @@ func MigrateV2Cmd() *cobra.Command { if err != nil { panic(err) } - cmd.Flags().StringVar(&migrationSecret, "migrate-v2-secret", "", "The secret name from which to read the license") - err = cmd.MarkFlagRequired("migrate-v2-secret") - if err != nil { - panic(err) - } - cmd.Flags().StringVar(&appSlug, "app-slug", "", "The application slug") - err = cmd.MarkFlagRequired("app-slug") - if err != nil { - panic(err) - } - cmd.Flags().StringVar(&appVersionLabel, "app-version-label", "", "The application version label") - err = cmd.MarkFlagRequired("app-version-label") - if err != nil { - panic(err) - } - - cmd.AddCommand( - MigrateV2InstallManagerCmd(), - ) return cmd } diff --git a/operator/pkg/cli/migrate_v2_installmanager.go b/operator/pkg/cli/migrate_v2_installmanager.go deleted file mode 100644 index fa3f8b959..000000000 --- a/operator/pkg/cli/migrate_v2_installmanager.go +++ /dev/null @@ -1,87 +0,0 @@ -package cli - -import ( - "fmt" - - ecv1beta1 "github.com/replicatedhq/embedded-cluster/kinds/apis/v1beta1" - "github.com/replicatedhq/embedded-cluster/operator/pkg/cli/migratev2" - "github.com/replicatedhq/embedded-cluster/pkg/helpers" - "github.com/replicatedhq/embedded-cluster/pkg/manager" - "github.com/replicatedhq/embedded-cluster/pkg/runtimeconfig" - kotsv1beta1 "github.com/replicatedhq/kotskinds/apis/kots/v1beta1" - "github.com/sirupsen/logrus" - "github.com/spf13/cobra" -) - -// MigrateV2InstallManagerCmd returns a cobra command run by the migrate-v2 command that is run in -// a pod on all nodes in the cluster. It will download the manager binary and install it as a -// systemd service on the host. -func MigrateV2InstallManagerCmd() *cobra.Command { - var installationFile, licenseFile, appSlug, appVersionLabel string - - var installation *ecv1beta1.Installation - var license *kotsv1beta1.License - - cmd := &cobra.Command{ - Use: "install-manager", - Short: "Downloads the v2 manager binary and installs it as a systemd service.", - PreRunE: func(cmd *cobra.Command, args []string) error { - logrus.SetLevel(logrus.DebugLevel) - - in, err := getInstallationFromFile(installationFile) - if err != nil { - return fmt.Errorf("failed to get installation from file: %w", err) - } - installation = in - - li, err := helpers.ParseLicense(licenseFile) - if err != nil { - return fmt.Errorf("failed to get license from file: %w", err) - } - license = li - - // set the runtime config from the installation spec - runtimeconfig.Set(installation.Spec.RuntimeConfig) - - manager.SetServiceName(appSlug) - - return nil - }, - RunE: func(cmd *cobra.Command, args []string) error { - ctx := cmd.Context() - - err := migratev2.InstallAndStartManager( - ctx, - license.Spec.LicenseID, license.Spec.Endpoint, appVersionLabel, - ) - if err != nil { - return fmt.Errorf("failed to run manager migration: %w", err) - } - - return nil - }, - } - - cmd.Flags().StringVar(&installationFile, "installation", "", "Path to the installation file") - err := cmd.MarkFlagRequired("installation") - if err != nil { - panic(err) - } - cmd.Flags().StringVar(&licenseFile, "license", "", "Path to the license file") - err = cmd.MarkFlagRequired("license") - if err != nil { - panic(err) - } - cmd.Flags().StringVar(&appSlug, "app-slug", "", "The application slug") - err = cmd.MarkFlagRequired("app-slug") - if err != nil { - panic(err) - } - cmd.Flags().StringVar(&appVersionLabel, "app-version-label", "", "The application version label") - err = cmd.MarkFlagRequired("app-version-label") - if err != nil { - panic(err) - } - - return cmd -} diff --git a/operator/pkg/cli/migrate_v2_pod.go b/operator/pkg/cli/migrate_v2_pod.go deleted file mode 100644 index af2325efb..000000000 --- a/operator/pkg/cli/migrate_v2_pod.go +++ /dev/null @@ -1,336 +0,0 @@ -package cli - -import ( - "context" - "encoding/json" - "fmt" - "strings" - "time" - - ecv1beta1 "github.com/replicatedhq/embedded-cluster/kinds/apis/v1beta1" - "github.com/replicatedhq/embedded-cluster/operator/pkg/metadata" - "github.com/replicatedhq/embedded-cluster/operator/pkg/release" - corev1 "k8s.io/api/core/v1" - k8serrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - apitypes "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -const ( - migrateV2PodNamespace = "kotsadm" - migrateV2PodName = "migrate-v2" -) - -var _migrateV2PodSpec = corev1.Pod{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "Pod", - }, - ObjectMeta: metav1.ObjectMeta{ - Namespace: migrateV2PodNamespace, - Name: migrateV2PodName, - Labels: map[string]string{ - "app": "install-v2-manager", - }, - }, - Spec: corev1.PodSpec{ - ServiceAccountName: "kotsadm", - RestartPolicy: corev1.RestartPolicyNever, - Containers: []corev1.Container{ - { - Name: "install-v2-manager", - Image: "DYNAMIC", - ImagePullPolicy: corev1.PullIfNotPresent, - Command: []string{ - "/manager", "migrate-v2", - "--installation", "/ec/installation/installation", - // "--migrate-v2-secret", "DYNAMIC", - // "--app-slug", "DYNAMIC", - // "--app-version-label", "DYNAMIC", - }, - VolumeMounts: []corev1.VolumeMount{ - { - Name: "installation", // required to set runtime config - MountPath: "/ec/installation", - ReadOnly: true, - }, - }, - }, - }, - Volumes: []corev1.Volume{ - // { - // Name: "installation", - // VolumeSource: corev1.VolumeSource{ - // ConfigMap: &corev1.ConfigMapVolumeSource{ - // LocalObjectReference: corev1.LocalObjectReference{ - // Name: "DYNAMIC", - // }, - // }, - // }, - // }, - }, - }, -} - -// runMigrateV2PodAndWait runs the v2 migration pod and waits for the pod to finish. -func runMigrateV2PodAndWait( - ctx context.Context, logf LogFunc, cli client.Client, - in *ecv1beta1.Installation, - migrationSecret string, appSlug string, appVersionLabel string, -) error { - logf("Ensuring installation config map") - if err := ensureInstallationConfigMap(ctx, cli, in); err != nil { - return fmt.Errorf("ensure installation config map: %w", err) - } - logf("Successfully ensured installation config map") - - logf("Getting operator image name") - operatorImage, err := getOperatorImageName(ctx, cli, in) - if err != nil { - return fmt.Errorf("get operator image name: %w", err) - } - logf("Successfully got operator image name") - - logf("Ensuring v2 migration pod") - _, err = ensureMigrateV2Pod(ctx, cli, in, operatorImage, migrationSecret, appSlug, appVersionLabel) - if err != nil { - return fmt.Errorf("create pod: %w", err) - } - logf("Successfully ensured v2 migration pod") - - logf("Waiting for v2 migration pod to finish") - err = waitForMigrateV2Pod(ctx, cli) - if err != nil { - return fmt.Errorf("wait for pod: %w", err) - } - logf("Successfully waited for v2 migration pod to finish") - - // NOTE: the installation config map cannot be deleted because the service account gets deleted - // during the v2 migration so this pod no longer has permissions. - // - // logf("Deleting installation config map") - // err = deleteInstallationConfigMap(ctx, cli, in) - // if err != nil { - // return fmt.Errorf("delete installation config map: %w", err) - // } - // logf("Successfully deleted installation config map") - - return nil -} - -func getOperatorImageName(ctx context.Context, cli client.Client, in *ecv1beta1.Installation) (string, error) { - if in.Spec.AirGap { - err := metadata.CopyVersionMetadataToCluster(ctx, cli, in) - if err != nil { - return "", fmt.Errorf("copy version metadata to cluster: %w", err) - } - } - - meta, err := release.MetadataFor(ctx, in, cli) - if err != nil { - return "", fmt.Errorf("get release metadata: %w", err) - } - - for _, image := range meta.Images { - if strings.Contains(image, "embedded-cluster-operator-image") { - return image, nil - } - } - return "", fmt.Errorf("no embedded-cluster-operator image found in release metadata") -} - -func ensureMigrateV2Pod( - ctx context.Context, cli client.Client, - in *ecv1beta1.Installation, operatorImage string, - migrationSecret string, appSlug string, appVersionLabel string, -) (string, error) { - existing, err := getMigrateV2Pod(ctx, cli) - if err == nil { - if migrateV2PodHasSucceeded(existing) { - return existing.Name, nil - } else if migrateV2PodHasFailed(existing) { - err := cli.Delete(ctx, &existing) - if err != nil { - return "", fmt.Errorf("delete pod %s: %w", existing.Name, err) - } - } else { - // still running - return existing.Name, nil - } - } else if !k8serrors.IsNotFound(err) { - return "", fmt.Errorf("get pod: %w", err) - } - - pod := getMigrateV2PodSpec(in, operatorImage, migrationSecret, appSlug, appVersionLabel) - if err := cli.Create(ctx, pod); err != nil { - return "", err - } - - return pod.Name, nil -} - -// deleteMigrateV2Pod deletes the v2 migration pod. -func deleteMigrateV2Pod(ctx context.Context, logf LogFunc, cli client.Client) error { - logf("Deleting v2 migration pod") - - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: migrateV2PodNamespace, Name: migrateV2PodName, - }, - } - err := cli.Delete(ctx, pod, client.PropagationPolicy(metav1.DeletePropagationBackground)) - if err != nil { - return fmt.Errorf("delete pod: %w", err) - } - - logf("Successfully deleted v2 migration pod") - - return nil -} - -func waitForMigrateV2Pod(ctx context.Context, cli client.Client) error { - return wait.PollUntilContextCancel(ctx, 2*time.Second, true, func(ctx context.Context) (bool, error) { - pod := corev1.Pod{} - nsn := apitypes.NamespacedName{Namespace: migrateV2PodNamespace, Name: migrateV2PodName} - err := cli.Get(ctx, nsn, &pod) - switch { - // If we get unauthorized, it means the service account has been deleted by the migration - // pod and it is likely almost done. - case k8serrors.IsUnauthorized(err): - return true, nil - case err != nil: - return false, fmt.Errorf("get pod: %w", err) - case pod.Status.Phase == corev1.PodSucceeded: - return true, nil - case pod.Status.Phase == corev1.PodFailed: - return true, fmt.Errorf("pod failed: %s", pod.Status.Reason) - default: - return false, nil - } - }) -} - -func getMigrateV2Pod(ctx context.Context, cli client.Client) (corev1.Pod, error) { - var pod corev1.Pod - err := cli.Get(ctx, client.ObjectKey{Namespace: migrateV2PodNamespace, Name: migrateV2PodName}, &pod) - if err != nil { - return pod, fmt.Errorf("get pod: %w", err) - } - return pod, nil -} - -func migrateV2PodHasSucceeded(pod corev1.Pod) bool { - return pod.Status.Phase == corev1.PodSucceeded -} - -func migrateV2PodHasFailed(pod corev1.Pod) bool { - return pod.Status.Phase == corev1.PodFailed -} - -func getMigrateV2PodSpec( - in *ecv1beta1.Installation, operatorImage string, - migrationSecret string, appSlug string, appVersionLabel string, -) *corev1.Pod { - pod := _migrateV2PodSpec.DeepCopy() - - pod.Spec.Containers[0].Image = operatorImage - pod.Spec.Containers[0].Command = append(pod.Spec.Containers[0].Command, - "--migrate-v2-secret", migrationSecret, - "--app-slug", appSlug, - "--app-version-label", appVersionLabel, - ) - - pod.Spec.Volumes = append(pod.Spec.Volumes, corev1.Volume{ - Name: "installation", - VolumeSource: corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: getInstallationConfigMapName(in), - }, - }, - }, - }) - - return pod -} - -func ensureInstallationConfigMap(ctx context.Context, cli client.Client, in *ecv1beta1.Installation) error { - copy := in.DeepCopy() - err := createInstallationConfigMap(ctx, cli, copy) - if k8serrors.IsAlreadyExists(err) { - err := updateInstallationConfigMap(ctx, cli, copy) - if err != nil { - return fmt.Errorf("update installation config map: %w", err) - } - } else if err != nil { - return fmt.Errorf("create installation config map: %w", err) - } - return nil -} - -func deleteInstallationConfigMap(ctx context.Context, cli client.Client, in *ecv1beta1.Installation) error { - cm := corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: migrateV2PodNamespace, - Name: getInstallationConfigMapName(in), - }, - } - err := cli.Delete(ctx, &cm) - if k8serrors.IsNotFound(err) { - return nil - } - return err -} - -func createInstallationConfigMap(ctx context.Context, cli client.Client, in *ecv1beta1.Installation) error { - data, err := json.Marshal(in) - if err != nil { - return fmt.Errorf("marshal installation: %w", err) - } - - cm := &corev1.ConfigMap{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "ConfigMap", - }, - ObjectMeta: metav1.ObjectMeta{ - Namespace: migrateV2PodNamespace, - Name: getInstallationConfigMapName(in), - }, - Data: map[string]string{ - "installation": string(data), - }, - } - if err := cli.Create(ctx, cm); err != nil { - return fmt.Errorf("create configmap: %w", err) - } - - return nil -} - -func updateInstallationConfigMap(ctx context.Context, cli client.Client, in *ecv1beta1.Installation) error { - // find configmap with the same name as the installation - nsn := apitypes.NamespacedName{Namespace: migrateV2PodNamespace, Name: getInstallationConfigMapName(in)} - var cm corev1.ConfigMap - if err := cli.Get(ctx, nsn, &cm); err != nil { - return fmt.Errorf("get configmap: %w", err) - } - - // marshal the installation and update the configmap - data, err := json.Marshal(in) - if err != nil { - return fmt.Errorf("marshal installation: %w", err) - } - cm.Data["installation"] = string(data) - - if err := cli.Update(ctx, &cm); err != nil { - return fmt.Errorf("update configmap: %w", err) - } - return nil -} - -func getInstallationConfigMapName(in *ecv1beta1.Installation) string { - return fmt.Sprintf("%s-installation", in.Name) -} diff --git a/operator/pkg/cli/migrate_v2_pod_test.go b/operator/pkg/cli/migrate_v2_pod_test.go deleted file mode 100644 index 108662aa1..000000000 --- a/operator/pkg/cli/migrate_v2_pod_test.go +++ /dev/null @@ -1,316 +0,0 @@ -package cli - -import ( - "context" - "encoding/json" - "net/http" - "net/http/httptest" - "testing" - "time" - - ecv1beta1 "github.com/replicatedhq/embedded-cluster/kinds/apis/v1beta1" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - batchv1 "k8s.io/api/batch/v1" - corev1 "k8s.io/api/core/v1" - k8serrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - apitypes "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/fake" -) - -func Test_runMigrateV2PodAndWait(t *testing.T) { - // Start a mock server for the metadata request - metadataServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - metadata := map[string]interface{}{ - "images": []string{ - "embedded-cluster-operator-image:1.0", - }, - } - json.NewEncoder(w).Encode(metadata) - })) - defer metadataServer.Close() - - // Create test installation with metadata override URL - installation := &ecv1beta1.Installation{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-install", - }, - Spec: ecv1beta1.InstallationSpec{ - Config: &ecv1beta1.ConfigSpec{ - Version: "1.0.0", - MetadataOverrideURL: metadataServer.URL, - }, - }, - } - - // Set up the test scheme - scheme := runtime.NewScheme() - require.NoError(t, corev1.AddToScheme(scheme)) - require.NoError(t, batchv1.AddToScheme(scheme)) - require.NoError(t, ecv1beta1.AddToScheme(scheme)) - - // Create fake client with nodes - cli := fake.NewClientBuilder(). - WithScheme(scheme). - WithObjects(installation). - Build() - - // Create a context with timeout - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - // Start a goroutine to simulate the pod completing successfully - go func() { - for { - time.Sleep(100 * time.Millisecond) // Give time for pod to be created - - // Get the pod - nsn := apitypes.NamespacedName{Namespace: migrateV2PodNamespace, Name: migrateV2PodName} - var pod corev1.Pod - err := cli.Get(ctx, nsn, &pod) - if k8serrors.IsNotFound(err) { - continue - } - require.NoError(t, err) - - // Update the pod to be successful - // Update pod status - pod.Status.Phase = corev1.PodSucceeded - err = cli.Status().Update(ctx, &pod) - require.NoError(t, err) - } - }() - - // Run the function - logf := func(format string, args ...any) { - // No-op logger for testing - } - - err := runMigrateV2PodAndWait(ctx, logf, cli, installation, "test-secret", "test-app", "v1.0.0") - require.NoError(t, err) - - // Verify the pod was created and completed - nsn := apitypes.NamespacedName{Namespace: migrateV2PodNamespace, Name: migrateV2PodName} - var pod corev1.Pod - err = cli.Get(ctx, nsn, &pod) - require.NoError(t, err) - - // Verify pod succeeded - assert.Equal(t, corev1.PodSucceeded, pod.Status.Phase) - - // Verify pod has correct labels - assert.Equal(t, "install-v2-manager", pod.Labels["app"]) - - // Verify pod spec - assert.Equal(t, "install-v2-manager", pod.Spec.Containers[0].Name) - assert.Equal(t, corev1.RestartPolicyNever, pod.Spec.RestartPolicy) - - // Verify volumes - foundInstallationVolume := false - for _, volume := range pod.Spec.Volumes { - if volume.Name == "installation" { - foundInstallationVolume = true - assert.Equal(t, "test-install-installation", volume.ConfigMap.Name) - } - } - assert.True(t, foundInstallationVolume, "expected installation volume to be mounted") - - // Verify command arguments - container := pod.Spec.Containers[0] - assert.Equal(t, container.Image, "embedded-cluster-operator-image:1.0") - assert.Contains(t, container.Command, "migrate-v2") - assert.Contains(t, container.Command, "--migrate-v2-secret") - assert.Contains(t, container.Command, "test-secret") - assert.Contains(t, container.Command, "--app-slug") - assert.Contains(t, container.Command, "test-app") - assert.Contains(t, container.Command, "--app-version-label") - assert.Contains(t, container.Command, "v1.0.0") -} - -func Test_deleteMigrateV2Pod(t *testing.T) { - // Create existing pod - pods := []corev1.Pod{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: migrateV2PodName, - Namespace: migrateV2PodNamespace, - }, - }, - } - - // Set up the test scheme - scheme := runtime.NewScheme() - require.NoError(t, corev1.AddToScheme(scheme)) - require.NoError(t, batchv1.AddToScheme(scheme)) - - // Create fake client with nodes and pods - cli := fake.NewClientBuilder(). - WithScheme(scheme). - WithObjects( - podsToRuntimeObjects(pods)..., - ). - Build() - - // Run the function - logf := func(format string, args ...any) { - // No-op logger for testing - } - - err := deleteMigrateV2Pod(context.Background(), logf, cli) - require.NoError(t, err) - - // Verify pods were deleted - var remainingPods corev1.PodList - err = cli.List(context.Background(), &remainingPods) - require.NoError(t, err) - assert.Empty(t, remainingPods.Items, "expected all pods to be deleted") -} - -func nodesToRuntimeObjects(nodes []corev1.Node) []client.Object { - objects := make([]client.Object, len(nodes)) - for i := range nodes { - objects[i] = &nodes[i] - } - return objects -} - -func Test_ensureMigrateV2Pod(t *testing.T) { - installation := &ecv1beta1.Installation{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-install", - }, - Spec: ecv1beta1.InstallationSpec{ - Config: &ecv1beta1.ConfigSpec{ - Version: "1.0.0", - }, - }, - } - - tests := []struct { - name string - existingPod *corev1.Pod - expectNewPod bool - validatePod func(*testing.T, *corev1.Pod) - expectError bool - }{ - { - name: "creates new pod when none exists", - existingPod: nil, - validatePod: func(t *testing.T, pod *corev1.Pod) { - assert.Equal(t, migrateV2PodName, pod.Name) - assert.Equal(t, migrateV2PodNamespace, pod.Namespace) - - // Verify volumes - foundInstallationVolume := false - for _, volume := range pod.Spec.Volumes { - if volume.Name == "installation" { - foundInstallationVolume = true - assert.Equal(t, "test-install-installation", volume.ConfigMap.Name) - } - } - assert.True(t, foundInstallationVolume, "expected installation volume to be mounted") - - // Verify container - container := pod.Spec.Containers[0] - assert.Equal(t, container.Image, "embedded-cluster-operator-image:1.0") - assert.Contains(t, container.Command, "migrate-v2") - assert.Contains(t, container.Command, "--migrate-v2-secret") - assert.Contains(t, container.Command, "test-secret") - assert.Contains(t, container.Command, "--app-slug") - assert.Contains(t, container.Command, "test-app") - assert.Contains(t, container.Command, "--app-version-label") - assert.Contains(t, container.Command, "v1.0.0") - }, - expectError: false, - }, - { - name: "reuses existing successful pod", - existingPod: &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: migrateV2PodName, - Namespace: migrateV2PodNamespace, - }, - Status: corev1.PodStatus{ - Phase: corev1.PodSucceeded, - }, - }, - validatePod: func(t *testing.T, pod *corev1.Pod) { - assert.Equal(t, corev1.PodSucceeded, pod.Status.Phase) - }, - expectError: false, - }, - { - name: "replaces failed pod", - existingPod: &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: migrateV2PodName, - Namespace: migrateV2PodNamespace, - }, - Status: corev1.PodStatus{ - Phase: corev1.PodFailed, - }, - }, - validatePod: func(t *testing.T, pod *corev1.Pod) { - assert.Equal(t, corev1.PodPhase(""), pod.Status.Phase) - }, - expectError: false, - }, - } - - // Set up the test scheme - scheme := runtime.NewScheme() - require.NoError(t, corev1.AddToScheme(scheme)) - require.NoError(t, batchv1.AddToScheme(scheme)) - require.NoError(t, ecv1beta1.AddToScheme(scheme)) - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - - // Create fake client - builder := fake.NewClientBuilder().WithScheme(scheme) - if tt.existingPod != nil { - builder = builder.WithObjects(tt.existingPod) - } - cli := builder.Build() - - // Run the function - podName, err := ensureMigrateV2Pod( - context.Background(), - cli, - installation, - "embedded-cluster-operator-image:1.0", - "test-secret", - "test-app", - "v1.0.0", - ) - - if tt.expectError { - require.Error(t, err) - return - } - require.NoError(t, err) - - // Get the pod - var pod corev1.Pod - err = cli.Get(context.Background(), client.ObjectKey{ - Namespace: migrateV2PodNamespace, - Name: podName, - }, &pod) - require.NoError(t, err) - - // Run validation - tt.validatePod(t, &pod) - }) - } -} - -func podsToRuntimeObjects(pods []corev1.Pod) []client.Object { - objects := make([]client.Object, len(pods)) - for i := range pods { - objects[i] = &pods[i] - } - return objects -} diff --git a/operator/pkg/cli/migratev2/adminconsole.go b/operator/pkg/cli/migratev2/adminconsole.go deleted file mode 100644 index eaee0e175..000000000 --- a/operator/pkg/cli/migratev2/adminconsole.go +++ /dev/null @@ -1,146 +0,0 @@ -package migratev2 - -import ( - "context" - "fmt" - "time" - - k0sv1beta1 "github.com/k0sproject/k0s/pkg/apis/k0s/v1beta1" - ecv1beta1 "github.com/replicatedhq/embedded-cluster/kinds/apis/v1beta1" - "gopkg.in/yaml.v3" - corev1 "k8s.io/api/core/v1" - apitypes "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -// enableV2AdminConsole enables Embedded Cluster V2 in kotsadm by setting the IS_EC2_INSTALL -// environment variable to true in the admin-console chart and waits for the chart to be updated. -func enableV2AdminConsole(ctx context.Context, logf LogFunc, cli client.Client, in *ecv1beta1.Installation) error { - logf("Updating admin-console chart values") - err := updateAdminConsoleClusterConfig(ctx, cli) - if err != nil { - return fmt.Errorf("update cluster config: %w", err) - } - logf("Successfully updated admin-console chart values") - - logf("Waiting for admin-console deployment to be updated") - err = waitForAdminConsoleDeployment(ctx, cli) - if err != nil { - return fmt.Errorf("wait for admin console deployment to be updated: %w", err) - } - logf("Successfully waited for admin-console deployment to be updated") - - return nil -} - -func updateAdminConsoleClusterConfig(ctx context.Context, cli client.Client) error { - var clusterConfig k0sv1beta1.ClusterConfig - nsn := apitypes.NamespacedName{Name: "k0s", Namespace: "kube-system"} - err := cli.Get(ctx, nsn, &clusterConfig) - if err != nil { - return fmt.Errorf("get k0s cluster config: %w", err) - } - - for ix, ext := range clusterConfig.Spec.Extensions.Helm.Charts { - if ext.Name == "admin-console" { - values, err := updateAdminConsoleChartValues([]byte(ext.Values)) - if err != nil { - return fmt.Errorf("update admin-console chart values: %w", err) - } - ext.Values = string(values) - - clusterConfig.Spec.Extensions.Helm.Charts[ix] = ext - } - } - - err = cli.Update(ctx, &clusterConfig) - if err != nil { - return fmt.Errorf("update k0s cluster config: %w", err) - } - - return nil -} - -func updateAdminConsoleChartValues(values []byte) ([]byte, error) { - var m map[string]interface{} - err := yaml.Unmarshal(values, &m) - if err != nil { - return nil, fmt.Errorf("unmarshal values: %w", err) - } - - m["isEC2Install"] = "true" - - b, err := yaml.Marshal(m) - if err != nil { - return nil, fmt.Errorf("marshal values: %w", err) - } - - return b, nil -} - -// waitForAdminConsoleDeployment waits for the kotsadm pod to be updated as the service account -// does not have permissions to get deployments. -func waitForAdminConsoleDeployment(ctx context.Context, cli client.Client) error { - backoff := wait.Backoff{Steps: 60, Duration: 5 * time.Second, Factor: 1.0, Jitter: 0.1} - var lasterr error - if err := wait.ExponentialBackoffWithContext( - ctx, backoff, func(ctx context.Context) (bool, error) { - ready, err := isAdminConsoleDeploymentUpdated(ctx, cli) - if err != nil { - lasterr = fmt.Errorf("check deployment: %w", err) - return false, nil - } - return ready, nil - }, - ); err != nil { - if lasterr != nil { - return lasterr - } - return err - } - return nil -} - -// isAdminConsoleDeploymentUpdated checks that the kotsadm pod has the desired environment variable -// and is ready. This is necessary as the service account does not have permissions to get -// deployments. -func isAdminConsoleDeploymentUpdated(ctx context.Context, cli client.Client) (bool, error) { - var podList corev1.PodList - err := cli.List(ctx, &podList, client.InNamespace("kotsadm"), client.MatchingLabels{"app": "kotsadm"}) - if err != nil { - return false, fmt.Errorf("list kotsadm pods: %w", err) - } - // could be a rolling update - if len(podList.Items) != 1 { - return false, nil - } - pod := podList.Items[0] - if adminConsolePodHasEnvVar(pod) && adminConsolePodIsReady(pod) { - return true, nil - } - return false, nil -} - -func adminConsolePodHasEnvVar(pod corev1.Pod) bool { - for _, container := range pod.Spec.Containers { - if container.Name == "kotsadm" { - for _, env := range container.Env { - if env.Name == "IS_EC2_INSTALL" && env.Value == "true" { - return true - } - } - break - } - } - return false -} - -func adminConsolePodIsReady(pod corev1.Pod) bool { - for _, condition := range pod.Status.Conditions { - if condition.Type == corev1.PodReady { - return condition.Status == corev1.ConditionTrue - } - } - return false -} diff --git a/operator/pkg/cli/migratev2/adminconsole_test.go b/operator/pkg/cli/migratev2/adminconsole_test.go deleted file mode 100644 index 81e29e92e..000000000 --- a/operator/pkg/cli/migratev2/adminconsole_test.go +++ /dev/null @@ -1,370 +0,0 @@ -package migratev2 - -import ( - "context" - "testing" - - k0sv1beta1 "github.com/k0sproject/k0s/pkg/apis/k0s/v1beta1" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - apitypes "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/fake" -) - -func Test_updateAdminConsoleClusterConfig(t *testing.T) { - tests := []struct { - name string - initialCharts k0sv1beta1.ChartsSettings - expectedCharts k0sv1beta1.ChartsSettings - expectError bool - }{ - { - name: "updates admin-console chart values", - initialCharts: k0sv1beta1.ChartsSettings{ - { - Name: "admin-console", - Values: "foo: bar", - }, - { - Name: "other-chart", - Values: "unchanged: true", - }, - }, - expectedCharts: k0sv1beta1.ChartsSettings{ - { - Name: "admin-console", - Values: "foo: bar\nisEC2Install: \"true\"\n", - }, - { - Name: "other-chart", - Values: "unchanged: true", - }, - }, - expectError: false, - }, - { - name: "does not update admin-console chart values if already set", - initialCharts: k0sv1beta1.ChartsSettings{ - { - Name: "admin-console", - Values: "foo: bar\nisEC2Install: \"true\"\n", - }, - { - Name: "other-chart", - Values: "unchanged: true", - }, - }, - expectedCharts: k0sv1beta1.ChartsSettings{ - { - Name: "admin-console", - Values: "foo: bar\nisEC2Install: \"true\"\n", - }, - { - Name: "other-chart", - Values: "unchanged: true", - }, - }, - expectError: false, - }, - { - name: "handles missing admin-console chart", - initialCharts: k0sv1beta1.ChartsSettings{ - { - Name: "other-chart", - Values: "unchanged: true", - }, - }, - expectedCharts: k0sv1beta1.ChartsSettings{ - { - Name: "other-chart", - Values: "unchanged: true", - }, - }, - expectError: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - scheme := runtime.NewScheme() - require.NoError(t, k0sv1beta1.AddToScheme(scheme)) - - initialConfig := &k0sv1beta1.ClusterConfig{ - ObjectMeta: metav1.ObjectMeta{ - Name: "k0s", - Namespace: "kube-system", - }, - Spec: &k0sv1beta1.ClusterSpec{ - Extensions: &k0sv1beta1.ClusterExtensions{ - Helm: &k0sv1beta1.HelmExtensions{ - Charts: tt.initialCharts, - }, - }, - }, - } - - cli := fake.NewClientBuilder(). - WithScheme(scheme). - WithObjects(initialConfig). - Build() - - err := updateAdminConsoleClusterConfig(context.Background(), cli) - - if tt.expectError { - require.Error(t, err) - return - } - require.NoError(t, err) - - var updatedConfig k0sv1beta1.ClusterConfig - err = cli.Get(context.Background(), apitypes.NamespacedName{ - Namespace: "kube-system", - Name: "k0s", - }, &updatedConfig) - require.NoError(t, err) - - assert.Equal(t, tt.expectedCharts, updatedConfig.Spec.Extensions.Helm.Charts) - }) - } -} - -func Test_isAdminConsoleDeploymentUpdated(t *testing.T) { - tests := []struct { - name string - pods []corev1.Pod - want bool - wantErr bool - }{ - { - name: "pod is ready and has correct env var", - pods: []corev1.Pod{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "kotsadm-123", - Namespace: "kotsadm", - Labels: map[string]string{ - "app": "kotsadm", - }, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "kotsadm", - Env: []corev1.EnvVar{ - { - Name: "IS_EC2_INSTALL", - Value: "true", - }, - }, - }, - }, - }, - Status: corev1.PodStatus{ - Conditions: []corev1.PodCondition{ - { - Type: corev1.PodReady, - Status: corev1.ConditionTrue, - }, - }, - }, - }, - }, - want: true, - wantErr: false, - }, - { - name: "pod is ready but missing env var", - pods: []corev1.Pod{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "kotsadm-123", - Namespace: "kotsadm", - Labels: map[string]string{ - "app": "kotsadm", - }, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "kotsadm", - Env: []corev1.EnvVar{}, - }, - }, - }, - Status: corev1.PodStatus{ - Conditions: []corev1.PodCondition{ - { - Type: corev1.PodReady, - Status: corev1.ConditionTrue, - }, - }, - }, - }, - }, - want: false, - wantErr: false, - }, - { - name: "pod has env var but not ready", - pods: []corev1.Pod{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "kotsadm-123", - Namespace: "kotsadm", - Labels: map[string]string{ - "app": "kotsadm", - }, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "kotsadm", - Env: []corev1.EnvVar{ - { - Name: "IS_EC2_INSTALL", - Value: "true", - }, - }, - }, - }, - }, - Status: corev1.PodStatus{ - Conditions: []corev1.PodCondition{ - { - Type: corev1.PodReady, - Status: corev1.ConditionFalse, - }, - }, - }, - }, - }, - want: false, - wantErr: false, - }, - { - name: "multiple pods during rolling update", - pods: []corev1.Pod{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "kotsadm-123", - Namespace: "kotsadm", - Labels: map[string]string{ - "app": "kotsadm", - }, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "kotsadm", - Env: []corev1.EnvVar{ - { - Name: "IS_EC2_INSTALL", - Value: "true", - }, - }, - }, - }, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "kotsadm-124", - Namespace: "kotsadm", - Labels: map[string]string{ - "app": "kotsadm", - }, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "kotsadm", - Env: []corev1.EnvVar{ - { - Name: "IS_EC2_INSTALL", - Value: "true", - }, - }, - }, - }, - }, - }, - }, - want: false, - wantErr: false, - }, - { - name: "no pods found", - pods: []corev1.Pod{}, - want: false, - wantErr: false, - }, - } - - scheme := runtime.NewScheme() - require.NoError(t, corev1.AddToScheme(scheme)) - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cli := fake.NewClientBuilder(). - WithObjects(podsToRuntimeObjects(tt.pods)...). - Build() - - got, err := isAdminConsoleDeploymentUpdated(context.Background(), cli) - if tt.wantErr { - require.Error(t, err) - return - } - require.NoError(t, err) - assert.Equal(t, tt.want, got) - }) - } -} - -func podsToRuntimeObjects(pods []corev1.Pod) []client.Object { - objects := make([]client.Object, len(pods)) - for i := range pods { - objects[i] = &pods[i] - } - return objects -} - -func Test_updateAdminConsoleChartValues(t *testing.T) { - type args struct { - values []byte - } - tests := []struct { - name string - args args - want []byte - wantErr bool - }{ - { - name: "basic", - args: args{ - values: []byte(`isAirgap: "true" -embeddedClusterID: e79f0701-67f3-4abf-a672-42a1f830064e -`), - }, - want: []byte(`embeddedClusterID: e79f0701-67f3-4abf-a672-42a1f830064e -isAirgap: "true" -isEC2Install: "true" -`), - wantErr: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := updateAdminConsoleChartValues(tt.args.values) - if tt.wantErr { - require.Error(t, err) - } else { - require.NoError(t, err) - } - assert.Equal(t, string(tt.want), string(got)) - }) - } -} diff --git a/operator/pkg/cli/migratev2/installation.go b/operator/pkg/cli/migratev2/installation.go index 599e7fd87..09517be88 100644 --- a/operator/pkg/cli/migratev2/installation.go +++ b/operator/pkg/cli/migratev2/installation.go @@ -3,15 +3,9 @@ package migratev2 import ( "context" "fmt" - "time" - "github.com/replicatedhq/embedded-cluster/kinds/apis/v1beta1" ecv1beta1 "github.com/replicatedhq/embedded-cluster/kinds/apis/v1beta1" - "github.com/replicatedhq/embedded-cluster/pkg/kubeutils" - k8serrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/retry" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -21,11 +15,7 @@ import ( func setV2MigrationInProgress(ctx context.Context, logf LogFunc, cli client.Client, in *ecv1beta1.Installation) error { logf("Setting v2 migration in progress") - err := setInstallationCondition(ctx, cli, in, metav1.Condition{ - Type: ecv1beta1.ConditionTypeV2MigrationInProgress, - Status: metav1.ConditionTrue, - Reason: "V2MigrationInProgress", - }) + err := setV2MigrationInProgressCondition(ctx, cli, in, metav1.ConditionTrue, "MigrationInProgress", "") if err != nil { return fmt.Errorf("set v2 migration in progress condition: %w", err) } @@ -34,101 +24,44 @@ func setV2MigrationInProgress(ctx context.Context, logf LogFunc, cli client.Clie return nil } -// waitForInstallationStateInstalled waits for the installation to be in a successful state and -// ready for the migration. -func waitForInstallationStateInstalled(ctx context.Context, logf LogFunc, cli client.Client, installation *ecv1beta1.Installation) error { - logf("Waiting for installation to reconcile") - - err := wait.PollUntilContextCancel(ctx, 2*time.Second, true, func(ctx context.Context) (bool, error) { - in, err := kubeutils.GetCRDInstallation(ctx, cli, installation.Name) - if err != nil { - return false, fmt.Errorf("get installation: %w", err) - } - - switch in.Status.State { - // Success states - case ecv1beta1.InstallationStateInstalled, ecv1beta1.InstallationStateAddonsInstalled: - return true, nil - - // Failure states - case ecv1beta1.InstallationStateFailed, ecv1beta1.InstallationStateHelmChartUpdateFailure: - return false, fmt.Errorf("installation failed: %s", in.Status.Reason) - case ecv1beta1.InstallationStateObsolete: - return false, fmt.Errorf("installation is obsolete") +// setV2MigrationComplete sets the Installation condition to indicate that the v2 migration is +// complete. +func setV2MigrationComplete(ctx context.Context, logf LogFunc, cli client.Client, in *ecv1beta1.Installation) error { + logf("Setting v2 migration complete") - // In progress states - default: - return false, nil - } - }) + err := setV2MigrationInProgressCondition(ctx, cli, in, metav1.ConditionFalse, "MigrationComplete", "") if err != nil { - return err + return fmt.Errorf("set v2 migration in progress condition: %w", err) } - logf("Installation reconciled") + logf("Successfully set v2 migration complete") return nil } -// copyInstallationsToConfigMaps copies the Installation CRs to ConfigMaps. -func copyInstallationsToConfigMaps(ctx context.Context, logf LogFunc, cli client.Client) error { - var installationList ecv1beta1.InstallationList - err := cli.List(ctx, &installationList) - if err != nil { - // handle the case where the CRD has already been uninstalled - if meta.IsNoMatchError(err) { - return nil - } - return fmt.Errorf("list installations: %w", err) - } +// setV2MigrationFailed sets the Installation condition to indicate that the v2 migration has +// failed. +func setV2MigrationFailed(ctx context.Context, logf LogFunc, cli client.Client, in *ecv1beta1.Installation, failure error) error { + logf("Setting v2 migration failed") - for _, installation := range installationList.Items { - logf("Copying installation %s to config map", installation.Name) - err := ensureInstallationConfigMap(ctx, cli, &installation) - if err != nil { - return fmt.Errorf("ensure config map for installation %s: %w", installation.Name, err) - } - logf("Successfully copied installation %s to config map", installation.Name) + err := setV2MigrationInProgressCondition(ctx, cli, in, metav1.ConditionFalse, "MigrationFailed", failure.Error()) + if err != nil { + return fmt.Errorf("set v2 migration in progress condition: %w", err) } + logf("Successfully set v2 migration failed") return nil } -func ensureInstallationConfigMap(ctx context.Context, cli client.Client, in *ecv1beta1.Installation) error { - copy := in.DeepCopy() - err := kubeutils.CreateInstallation(ctx, cli, copy) - if k8serrors.IsAlreadyExists(err) { - err := kubeutils.UpdateInstallation(ctx, cli, copy) - if err != nil { - return fmt.Errorf("update installation: %w", err) - } - } else if err != nil { - return fmt.Errorf("create installation: %w", err) - } - return nil -} - -// ensureInstallationStateInstalled sets the ConfigMap installation state to installed and updates -// the status to mark the upgrade as complete. -func ensureInstallationStateInstalled(ctx context.Context, logf LogFunc, cli client.Client, in *ecv1beta1.Installation) error { - logf("Setting installation state to installed") - - // the installation will be in a ConfigMap at this point - copy, err := kubeutils.GetInstallation(ctx, cli, in.Name) - if err != nil { - return fmt.Errorf("get installation: %w", err) - } - - copy.Status.SetState(v1beta1.InstallationStateInstalled, "V2MigrationComplete", nil) - meta.RemoveStatusCondition(©.Status.Conditions, ecv1beta1.ConditionTypeV2MigrationInProgress) - meta.RemoveStatusCondition(©.Status.Conditions, ecv1beta1.ConditionTypeDisableReconcile) - - err = kubeutils.UpdateInstallationStatus(ctx, cli, copy) - if err != nil { - return fmt.Errorf("update installation status: %w", err) - } - - logf("Successfully set installation state to installed") - return nil +func setV2MigrationInProgressCondition( + ctx context.Context, cli client.Client, in *ecv1beta1.Installation, + status metav1.ConditionStatus, reason string, message string, +) error { + return setInstallationCondition(ctx, cli, in, metav1.Condition{ + Type: ecv1beta1.ConditionTypeV2MigrationInProgress, + Status: status, + Reason: reason, + Message: message, + }) } func setInstallationCondition(ctx context.Context, cli client.Client, in *ecv1beta1.Installation, condition metav1.Condition) error { diff --git a/operator/pkg/cli/migratev2/installation_test.go b/operator/pkg/cli/migratev2/installation_test.go index d67c27622..6c53a98c2 100644 --- a/operator/pkg/cli/migratev2/installation_test.go +++ b/operator/pkg/cli/migratev2/installation_test.go @@ -2,13 +2,11 @@ package migratev2 import ( "context" + "fmt" "testing" - "time" ecv1beta1 "github.com/replicatedhq/embedded-cluster/kinds/apis/v1beta1" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -16,166 +14,220 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" ) -func Test_waitForInstallationStateInstalled(t *testing.T) { +func Test_setV2MigrationInProgress(t *testing.T) { scheme := runtime.NewScheme() require.NoError(t, ecv1beta1.AddToScheme(scheme)) - tests := []struct { - name string + type args struct { installation *ecv1beta1.Installation - updateFunc func(*ecv1beta1.Installation) // Function to update installation state during test - expectError bool - errorString string + } + tests := []struct { + name string + args args + expectError bool }{ { - name: "installation already in installed state", - installation: &ecv1beta1.Installation{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-installation", - }, - Status: ecv1beta1.InstallationStatus{ - State: ecv1beta1.InstallationStateInstalled, + name: "set v2 migration in progress", + args: args{ + installation: &ecv1beta1.Installation{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-installation", + }, + TypeMeta: metav1.TypeMeta{ + APIVersion: "embeddedcluster.replicated.com/v1beta1", + Kind: "Installation", + }, }, }, expectError: false, }, { - name: "installation already in failed state", - installation: &ecv1beta1.Installation{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-installation", - }, - Status: ecv1beta1.InstallationStatus{ - State: ecv1beta1.InstallationStateFailed, - Reason: "something went wrong", - }, - }, - expectError: true, - errorString: "installation failed: something went wrong", - }, - { - name: "installation transitions to installed state", - installation: &ecv1beta1.Installation{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-installation", - }, - Status: ecv1beta1.InstallationStatus{ - State: ecv1beta1.InstallationStateAddonsInstalling, + name: "updates the condition", + args: args{ + installation: &ecv1beta1.Installation{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-installation", + }, + TypeMeta: metav1.TypeMeta{ + APIVersion: "embeddedcluster.replicated.com/v1beta1", + Kind: "Installation", + }, + Status: ecv1beta1.InstallationStatus{ + Conditions: []metav1.Condition{ + { + Type: ecv1beta1.ConditionTypeV2MigrationInProgress, + Status: metav1.ConditionFalse, + Reason: "MigrationFailed", + Message: "Migration failed", + }, + }, + }, }, }, - updateFunc: func(in *ecv1beta1.Installation) { - in.Status.State = ecv1beta1.InstallationStateInstalled - }, expectError: false, }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cli := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(tt.args.installation). + WithStatusSubresource(&ecv1beta1.Installation{}). + Build() + + // Discard log messages + logf := func(format string, args ...any) {} + + err := setV2MigrationInProgress(context.Background(), logf, cli, tt.args.installation) + require.NoError(t, err) + + // Verify that the condition was set correctly + var updatedInstallation ecv1beta1.Installation + err = cli.Get(context.Background(), client.ObjectKey{Name: "test-installation"}, &updatedInstallation) + require.NoError(t, err) + + condition := meta.FindStatusCondition(updatedInstallation.Status.Conditions, ecv1beta1.ConditionTypeV2MigrationInProgress) + require.NotNil(t, condition, "Expected V2MigrationInProgress condition to be set") + require.Equal(t, metav1.ConditionTrue, condition.Status) + require.Equal(t, "MigrationInProgress", condition.Reason) + require.Empty(t, condition.Message) + }) + } +} + +func Test_setV2MigrationComplete(t *testing.T) { + scheme := runtime.NewScheme() + require.NoError(t, ecv1beta1.AddToScheme(scheme)) + + type args struct { + installation *ecv1beta1.Installation + } + tests := []struct { + name string + args args + expectError bool + }{ { - name: "installation transitions to failed state", - installation: &ecv1beta1.Installation{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-installation", - }, - Status: ecv1beta1.InstallationStatus{ - State: ecv1beta1.InstallationStateAddonsInstalling, + name: "set v2 migration in progress", + args: args{ + installation: &ecv1beta1.Installation{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-installation", + }, + TypeMeta: metav1.TypeMeta{ + APIVersion: "embeddedcluster.replicated.com/v1beta1", + Kind: "Installation", + }, }, }, - updateFunc: func(in *ecv1beta1.Installation) { - in.Status.State = ecv1beta1.InstallationStateHelmChartUpdateFailure - in.Status.Reason = "helm chart update failed" - }, - expectError: true, - errorString: "installation failed: helm chart update failed", + expectError: false, }, { - name: "installation becomes obsolete", - installation: &ecv1beta1.Installation{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-installation", - }, - Status: ecv1beta1.InstallationStatus{ - State: ecv1beta1.InstallationStateAddonsInstalling, + name: "updates the condition", + args: args{ + installation: &ecv1beta1.Installation{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-installation", + }, + TypeMeta: metav1.TypeMeta{ + APIVersion: "embeddedcluster.replicated.com/v1beta1", + Kind: "Installation", + }, + Status: ecv1beta1.InstallationStatus{ + Conditions: []metav1.Condition{ + { + Type: ecv1beta1.ConditionTypeV2MigrationInProgress, + Status: metav1.ConditionTrue, + Reason: "MigrationInProgress", + }, + }, + }, }, }, - updateFunc: func(in *ecv1beta1.Installation) { - in.Status.State = ecv1beta1.InstallationStateObsolete - in.Status.Reason = "This is not the most recent installation object" - }, - expectError: true, - errorString: "installation is obsolete", + expectError: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - // Create a fake client with the test installation cli := fake.NewClientBuilder(). WithScheme(scheme). - WithObjects(tt.installation). - WithStatusSubresource(tt.installation). + WithObjects(tt.args.installation). + WithStatusSubresource(&ecv1beta1.Installation{}). Build() - // If there's an update function, run it in a goroutine after a short delay - if tt.updateFunc != nil { - go func() { - time.Sleep(100 * time.Millisecond) - var installation ecv1beta1.Installation - err := cli.Get(context.Background(), client.ObjectKey{Name: tt.installation.Name}, &installation) - require.NoError(t, err) - tt.updateFunc(&installation) - err = cli.Status().Update(context.Background(), &installation) - require.NoError(t, err) - }() - } - - // Call waitForInstallationStateInstalled - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - err := waitForInstallationStateInstalled(ctx, t.Logf, cli, tt.installation) - - if tt.expectError { - require.Error(t, err) - if tt.errorString != "" { - assert.Contains(t, err.Error(), tt.errorString) - } - } else { - require.NoError(t, err) - } + // Discard log messages + logf := func(format string, args ...any) {} + + err := setV2MigrationComplete(context.Background(), logf, cli, tt.args.installation) + require.NoError(t, err) + + // Verify that the condition was set correctly + var updatedInstallation ecv1beta1.Installation + err = cli.Get(context.Background(), client.ObjectKey{Name: "test-installation"}, &updatedInstallation) + require.NoError(t, err) + + condition := meta.FindStatusCondition(updatedInstallation.Status.Conditions, ecv1beta1.ConditionTypeV2MigrationInProgress) + require.NotNil(t, condition, "Expected V2MigrationInProgress condition to be set") + require.Equal(t, metav1.ConditionFalse, condition.Status) + require.Equal(t, "MigrationComplete", condition.Reason) + require.Empty(t, condition.Message) }) } } -func Test_ensureInstallationStateInstalled(t *testing.T) { +func Test_setV2MigrationFailed(t *testing.T) { scheme := runtime.NewScheme() require.NoError(t, ecv1beta1.AddToScheme(scheme)) - require.NoError(t, corev1.AddToScheme(scheme)) - tests := []struct { - name string + type args struct { installation *ecv1beta1.Installation - expectError bool + failure error + } + tests := []struct { + name string + args args + expectError bool }{ { - name: "updates installation state and removes conditions", - installation: &ecv1beta1.Installation{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-installation", + name: "set v2 migration in progress", + args: args{ + installation: &ecv1beta1.Installation{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-installation", + }, + TypeMeta: metav1.TypeMeta{ + APIVersion: "embeddedcluster.replicated.com/v1beta1", + Kind: "Installation", + }, }, - Status: ecv1beta1.InstallationStatus{ - State: ecv1beta1.InstallationStateAddonsInstalled, - Conditions: []metav1.Condition{ - { - Type: ecv1beta1.ConditionTypeV2MigrationInProgress, - Status: metav1.ConditionTrue, - Reason: "V2MigrationInProgress", - ObservedGeneration: 1, - }, - { - Type: ecv1beta1.ConditionTypeDisableReconcile, - Status: metav1.ConditionTrue, - Reason: "V2MigrationInProgress", - ObservedGeneration: 1, + failure: fmt.Errorf("failed migration"), + }, + expectError: false, + }, + { + name: "updates the condition", + args: args{ + installation: &ecv1beta1.Installation{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-installation", + }, + TypeMeta: metav1.TypeMeta{ + APIVersion: "embeddedcluster.replicated.com/v1beta1", + Kind: "Installation", + }, + Status: ecv1beta1.InstallationStatus{ + Conditions: []metav1.Condition{ + { + Type: ecv1beta1.ConditionTypeV2MigrationInProgress, + Status: metav1.ConditionTrue, + Reason: "MigrationInProgress", + }, }, }, }, + failure: fmt.Errorf("failed migration"), }, expectError: false, }, @@ -183,37 +235,28 @@ func Test_ensureInstallationStateInstalled(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - // Create a fake client with the test installation cli := fake.NewClientBuilder(). WithScheme(scheme). - WithObjects(tt.installation). - WithStatusSubresource(tt.installation). + WithObjects(tt.args.installation). + WithStatusSubresource(&ecv1beta1.Installation{}). Build() - // Call ensureInstallationStateInstalled - err := ensureInstallationStateInstalled(context.Background(), t.Logf, cli, tt.installation) + // Discard log messages + logf := func(format string, args ...any) {} - if tt.expectError { - require.Error(t, err) - return - } + err := setV2MigrationFailed(context.Background(), logf, cli, tt.args.installation, tt.args.failure) require.NoError(t, err) - // Verify the installation was updated correctly + // Verify that the condition was set correctly var updatedInstallation ecv1beta1.Installation - err = cli.Get(context.Background(), client.ObjectKey{Name: tt.installation.Name}, &updatedInstallation) + err = cli.Get(context.Background(), client.ObjectKey{Name: "test-installation"}, &updatedInstallation) require.NoError(t, err) - // Check state is set to Installed - assert.Equal(t, ecv1beta1.InstallationStateInstalled, updatedInstallation.Status.State) - assert.Equal(t, "V2MigrationComplete", updatedInstallation.Status.Reason) - - // Check conditions are removed condition := meta.FindStatusCondition(updatedInstallation.Status.Conditions, ecv1beta1.ConditionTypeV2MigrationInProgress) - assert.Nil(t, condition, "V2MigrationInProgress condition should be removed") - - condition = meta.FindStatusCondition(updatedInstallation.Status.Conditions, ecv1beta1.ConditionTypeDisableReconcile) - assert.Nil(t, condition, "DisableReconcile condition should be removed") + require.NotNil(t, condition, "Expected V2MigrationInProgress condition to be set") + require.Equal(t, metav1.ConditionFalse, condition.Status) + require.Equal(t, "MigrationFailed", condition.Reason) + require.Equal(t, condition.Message, tt.args.failure.Error()) }) } } diff --git a/operator/pkg/cli/migratev2/k0s.go b/operator/pkg/cli/migratev2/k0s.go new file mode 100644 index 000000000..cdbaebc4b --- /dev/null +++ b/operator/pkg/cli/migratev2/k0s.go @@ -0,0 +1,122 @@ +package migratev2 + +import ( + "context" + "fmt" + "time" + + k0shelmv1beta1 "github.com/k0sproject/k0s/pkg/apis/helm/v1beta1" + k0sv1beta1 "github.com/k0sproject/k0s/pkg/apis/k0s/v1beta1" + "github.com/replicatedhq/embedded-cluster/pkg/helpers" + apitypes "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// needsK0sChartCleanup checks if the k0s controller is managing any Helm Charts. +func needsK0sChartCleanup(ctx context.Context, cli client.Client) (bool, error) { + var helmCharts k0shelmv1beta1.ChartList + if err := cli.List(ctx, &helmCharts); err != nil { + return false, fmt.Errorf("list k0s charts: %w", err) + } + if len(helmCharts.Items) > 0 { + return true, nil + } + + var clusterConfig k0sv1beta1.ClusterConfig + err := cli.Get(ctx, apitypes.NamespacedName{Namespace: "kube-system", Name: "k0s"}, &clusterConfig) + if err != nil { + return false, fmt.Errorf("get cluster config: %w", err) + } + + if clusterConfig.Spec.Extensions.Helm != nil && len(clusterConfig.Spec.Extensions.Helm.Charts) > 0 { + return true, nil + } + + return false, nil +} + +// cleanupK0sCharts removes control of the Helm Charts from the k0s controller. +func cleanupK0sCharts(ctx context.Context, logf LogFunc, cli client.Client) error { + logf("Force deleting Chart custom resources") + // forceDeleteChartCRs is necessary because the k0s controller will otherwise uninstall the + // Helm releases and we don't want that. + err := forceDeleteChartCRs(ctx, cli) + if err != nil { + return fmt.Errorf("delete chart custom resources: %w", err) + } + logf("Successfully force deleted Chart custom resources") + + logf("Removing Helm Charts from ClusterConfig") + err = removeClusterConfigHelmExtensions(ctx, cli) + if err != nil { + return fmt.Errorf("cleanup cluster config: %w", err) + } + logf("Successfully removed Helm Charts from ClusterConfig") + + return nil +} + +func forceDeleteChartCRs(ctx context.Context, cli client.Client) error { + var chartList k0shelmv1beta1.ChartList + err := cli.List(ctx, &chartList) + if err != nil { + return fmt.Errorf("list charts: %w", err) + } + + for _, chart := range chartList.Items { + chart.ObjectMeta.Finalizers = []string{} + err := cli.Update(ctx, &chart) + if err != nil { + return fmt.Errorf("update chart: %w", err) + } + } + + // wait for all finalizers to be removed before deleting the charts + for hasFinalizers := true; hasFinalizers; { + err = cli.List(ctx, &chartList) + if err != nil { + return fmt.Errorf("list charts: %w", err) + } + + hasFinalizers = false + for _, chart := range chartList.Items { + if len(chart.GetFinalizers()) > 0 { + hasFinalizers = true + break + } + } + + time.Sleep(100 * time.Millisecond) + } + + for _, chart := range chartList.Items { + err := cli.Delete(ctx, &chart, client.GracePeriodSeconds(0)) + if err != nil { + return fmt.Errorf("delete chart: %w", err) + } + } + + return nil +} + +func removeClusterConfigHelmExtensions(ctx context.Context, cli client.Client) error { + var clusterConfig k0sv1beta1.ClusterConfig + err := cli.Get(ctx, apitypes.NamespacedName{Namespace: "kube-system", Name: "k0s"}, &clusterConfig) + if err != nil { + return fmt.Errorf("get cluster config: %w", err) + } + + clusterConfig.Spec.Extensions.Helm = &k0sv1beta1.HelmExtensions{} + + unstructured, err := helpers.K0sClusterConfigTo129Compat(&clusterConfig) + if err != nil { + return fmt.Errorf("convert cluster config to 1.29 compat: %w", err) + } + + err = cli.Update(ctx, unstructured) + if err != nil { + return fmt.Errorf("update cluster config: %w", err) + } + + return nil +} diff --git a/operator/pkg/cli/migratev2/k0s_test.go b/operator/pkg/cli/migratev2/k0s_test.go new file mode 100644 index 000000000..f63fb7c0e --- /dev/null +++ b/operator/pkg/cli/migratev2/k0s_test.go @@ -0,0 +1,111 @@ +package migratev2 + +import ( + "context" + "testing" + + k0shelmv1beta1 "github.com/k0sproject/k0s/pkg/apis/helm/v1beta1" + k0sv1beta1 "github.com/k0sproject/k0s/pkg/apis/k0s/v1beta1" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestNeedsK0sChartCleanup(t *testing.T) { + tests := []struct { + name string + objects []runtime.Object + wantCleanup bool + wantErr bool + }{ + { + name: "no charts or config", + objects: []runtime.Object{}, + wantCleanup: false, + wantErr: true, // should error because cluster config is missing + }, + { + name: "empty cluster config", + objects: []runtime.Object{ + &k0sv1beta1.ClusterConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "k0s", + Namespace: "kube-system", + }, + Spec: &k0sv1beta1.ClusterSpec{}, + }, + }, + wantCleanup: false, + wantErr: false, + }, + { + name: "has helm charts", + objects: []runtime.Object{ + &k0sv1beta1.ClusterConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "k0s", + Namespace: "kube-system", + }, + Spec: &k0sv1beta1.ClusterSpec{}, + }, + &k0shelmv1beta1.Chart{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-chart", + }, + }, + }, + wantCleanup: true, + wantErr: false, + }, + { + name: "has helm extensions in config", + objects: []runtime.Object{ + &k0sv1beta1.ClusterConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "k0s", + Namespace: "kube-system", + }, + Spec: &k0sv1beta1.ClusterSpec{ + Extensions: &k0sv1beta1.ClusterExtensions{ + Helm: &k0sv1beta1.HelmExtensions{ + Charts: []k0sv1beta1.Chart{ + { + Name: "test-chart", + Version: "1.0.0", + }, + }, + }, + }, + }, + }, + }, + wantCleanup: true, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + scheme := runtime.NewScheme() + err := k0sv1beta1.AddToScheme(scheme) + require.NoError(t, err) + err = k0shelmv1beta1.AddToScheme(scheme) + require.NoError(t, err) + + cli := fake.NewClientBuilder(). + WithScheme(scheme). + WithRuntimeObjects(tt.objects...). + Build() + + needsCleanup, err := needsK0sChartCleanup(context.Background(), cli) + if tt.wantErr { + require.Error(t, err) + return + } + + require.NoError(t, err) + require.Equal(t, tt.wantCleanup, needsCleanup) + }) + } +} diff --git a/operator/pkg/cli/migratev2/manager.go b/operator/pkg/cli/migratev2/manager.go deleted file mode 100644 index 3d6ed0daf..000000000 --- a/operator/pkg/cli/migratev2/manager.go +++ /dev/null @@ -1,194 +0,0 @@ -package migratev2 - -import ( - "context" - "fmt" - "strings" - "time" - - ecv1beta1 "github.com/replicatedhq/embedded-cluster/kinds/apis/v1beta1" - "github.com/replicatedhq/embedded-cluster/operator/pkg/metadata" - "github.com/replicatedhq/embedded-cluster/operator/pkg/release" - "github.com/replicatedhq/embedded-cluster/pkg/kubeutils" - "golang.org/x/sync/errgroup" - corev1 "k8s.io/api/core/v1" - k8serrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -const ( - podNamespace = "embedded-cluster" - podNamePrefix = "install-v2-manager-" -) - -// runManagerInstallPodsAndWait runs the v2 manager install pod on all nodes and waits for the pods -// to finish. -func runManagerInstallPodsAndWait( - ctx context.Context, logf LogFunc, cli client.Client, - in *ecv1beta1.Installation, - migrationSecret string, appSlug string, appVersionLabel string, -) error { - logf("Ensuring installation config map") - if err := ensureInstallationConfigMap(ctx, cli, in); err != nil { - return fmt.Errorf("ensure installation config map: %w", err) - } - logf("Successfully ensured installation config map") - - logf("Getting operator image name") - operatorImage, err := getOperatorImageName(ctx, cli, in) - if err != nil { - return fmt.Errorf("get operator image name: %w", err) - } - logf("Successfully got operator image name") - - var nodeList corev1.NodeList - if err := cli.List(ctx, &nodeList); err != nil { - return fmt.Errorf("list nodes: %w", err) - } - - for _, node := range nodeList.Items { - logf("Ensuring manager install pod for node %s", node.Name) - _, err := ensureManagerInstallPodForNode(ctx, cli, node, in, operatorImage, migrationSecret, appSlug, appVersionLabel) - if err != nil { - return fmt.Errorf("create pod for node %s: %w", node.Name, err) - } - logf("Successfully ensured manager install pod for node %s", node.Name) - } - - logf("Waiting for manager install pods to finish") - err = waitForManagerInstallPods(ctx, cli, nodeList.Items) - if err != nil { - return fmt.Errorf("wait for pods: %w", err) - } - logf("Successfully waited for manager install pods to finish") - - return nil -} - -func getOperatorImageName(ctx context.Context, cli client.Client, in *ecv1beta1.Installation) (string, error) { - if in.Spec.AirGap { - err := metadata.CopyVersionMetadataToCluster(ctx, cli, in) - if err != nil { - return "", fmt.Errorf("copy version metadata to cluster: %w", err) - } - } - - meta, err := release.MetadataFor(ctx, in, cli) - if err != nil { - return "", fmt.Errorf("get release metadata: %w", err) - } - - for _, image := range meta.Images { - if strings.Contains(image, "embedded-cluster-operator-image") { - return image, nil - } - } - return "", fmt.Errorf("no embedded-cluster-operator image found in release metadata") -} - -func ensureManagerInstallPodForNode( - ctx context.Context, cli client.Client, - node corev1.Node, in *ecv1beta1.Installation, operatorImage string, - migrationSecret string, appSlug string, appVersionLabel string, -) (string, error) { - existing, err := getManagerInstallPodForNode(ctx, cli, node) - if err == nil { - if managerInstallPodHasSucceeded(existing) { - return existing.Name, nil - } else if managerInstallPodHasFailed(existing) { - err := cli.Delete(ctx, &existing) - if err != nil { - return "", fmt.Errorf("delete pod %s: %w", existing.Name, err) - } - } else { - // still running - return existing.Name, nil - } - } else if !k8serrors.IsNotFound(err) { - return "", fmt.Errorf("get pod for node %s: %w", node.Name, err) - } - - pod := getManagerInstallPodSpecForNode(node, in, operatorImage, migrationSecret, appSlug, appVersionLabel) - if err := cli.Create(ctx, pod); err != nil { - return "", err - } - - return pod.Name, nil -} - -// deleteManagerInstallPods deletes all manager install pods on all nodes. -func deleteManagerInstallPods(ctx context.Context, logf LogFunc, cli client.Client) error { - logf("Deleting manager install pods") - - var nodeList corev1.NodeList - if err := cli.List(ctx, &nodeList); err != nil { - return fmt.Errorf("list nodes: %w", err) - } - - for _, node := range nodeList.Items { - podName := getManagerInstallPodName(node) - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: podNamespace, Name: podName, - }, - } - err := cli.Delete(ctx, pod, client.PropagationPolicy(metav1.DeletePropagationBackground)) - if err != nil { - return fmt.Errorf("delete pod for node %s: %w", node.Name, err) - } - } - - logf("Successfully deleted manager install pods") - - return nil -} - -func waitForManagerInstallPods(ctx context.Context, cli client.Client, nodes []corev1.Node) error { - eg := errgroup.Group{} - - for _, node := range nodes { - podName := getManagerInstallPodName(node) - eg.Go(func() error { - err := waitForManagerInstallPod(ctx, cli, podName) - if err != nil { - return fmt.Errorf("wait for pod for node %s: %v", node.Name, err) - } - return nil - }) - } - - // wait cancels - err := eg.Wait() - if err != nil { - return err - } - - return nil -} - -func waitForManagerInstallPod(ctx context.Context, cli client.Client, podName string) error { - // 60 steps at 5 second intervals = ~ 5 minutes - backoff := wait.Backoff{Steps: 60, Duration: 2 * time.Second, Factor: 1.0, Jitter: 0.1} - return kubeutils.WaitForPodComplete(ctx, cli, podNamespace, podName, &kubeutils.WaitOptions{Backoff: &backoff}) -} - -func getManagerInstallPodForNode(ctx context.Context, cli client.Client, node corev1.Node) (corev1.Pod, error) { - podName := getManagerInstallPodName(node) - - var pod corev1.Pod - err := cli.Get(ctx, client.ObjectKey{Namespace: podNamespace, Name: podName}, &pod) - if err != nil { - return pod, fmt.Errorf("get pod %s: %w", podName, err) - } - return pod, nil -} - -func managerInstallPodHasSucceeded(pod corev1.Pod) bool { - return pod.Status.Phase == corev1.PodSucceeded -} - -func managerInstallPodHasFailed(pod corev1.Pod) bool { - return pod.Status.Phase == corev1.PodFailed -} diff --git a/operator/pkg/cli/migratev2/manager_test.go b/operator/pkg/cli/migratev2/manager_test.go deleted file mode 100644 index 4f1373dcb..000000000 --- a/operator/pkg/cli/migratev2/manager_test.go +++ /dev/null @@ -1,409 +0,0 @@ -package migratev2 - -import ( - "context" - "encoding/json" - "net/http" - "net/http/httptest" - "testing" - "time" - - ecv1beta1 "github.com/replicatedhq/embedded-cluster/kinds/apis/v1beta1" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - batchv1 "k8s.io/api/batch/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/fake" -) - -func Test_runManagerInstallPodsAndWait(t *testing.T) { - // Create test nodes, one with taints - nodes := []corev1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node1", - }, - Spec: corev1.NodeSpec{ - Taints: []corev1.Taint{ - { - Key: "node-role.kubernetes.io/control-plane", - Effect: corev1.TaintEffectNoSchedule, - }, - }, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node2", - }, - }, - } - - // Start a mock server for the metadata request - metadataServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - metadata := map[string]interface{}{ - "images": []string{ - "embedded-cluster-operator-image:1.0", - }, - } - json.NewEncoder(w).Encode(metadata) - })) - defer metadataServer.Close() - - // Create test installation with metadata override URL - installation := &ecv1beta1.Installation{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-install", - }, - Spec: ecv1beta1.InstallationSpec{ - Config: &ecv1beta1.ConfigSpec{ - Version: "1.0.0", - MetadataOverrideURL: metadataServer.URL, - }, - }, - } - - // Set up the test scheme - scheme := runtime.NewScheme() - require.NoError(t, corev1.AddToScheme(scheme)) - require.NoError(t, batchv1.AddToScheme(scheme)) - require.NoError(t, ecv1beta1.AddToScheme(scheme)) - - // Create fake client with nodes - cli := fake.NewClientBuilder(). - WithScheme(scheme). - WithObjects(nodesToRuntimeObjects(nodes)...). - WithObjects(installation). - Build() - - // Create a context with timeout - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - // Start a goroutine to simulate the pods completing successfully - go func() { - for { - time.Sleep(100 * time.Millisecond) // Give time for pods to be created - - // List the pods - var pods corev1.PodList - err := cli.List(ctx, &pods) - require.NoError(t, err) - - // Update each pod to be successful - for _, pod := range pods.Items { - // Update pod status - pod.Status.Phase = corev1.PodSucceeded - err = cli.Status().Update(ctx, &pod) - require.NoError(t, err) - } - - if len(pods.Items) == len(nodes) { - break - } - } - }() - - // Run the function - logf := func(format string, args ...any) { - // No-op logger for testing - } - - err := runManagerInstallPodsAndWait(ctx, logf, cli, installation, "test-secret", "test-app", "v1.0.0") - require.NoError(t, err) - - // Verify the pods were created and completed - var pods corev1.PodList - err = cli.List(ctx, &pods) - require.NoError(t, err) - - // Verify number of pods matches number of nodes - assert.Len(t, pods.Items, len(nodes)) - - // Verify each pod - for _, pod := range pods.Items { - // Verify pod succeeded - assert.Equal(t, corev1.PodSucceeded, pod.Status.Phase) - - // Verify pod has correct labels - assert.Equal(t, "install-v2-manager", pod.Labels["app"]) - - // Verify pod spec - assert.Equal(t, "install-v2-manager", pod.Spec.Containers[0].Name) - assert.Equal(t, corev1.RestartPolicyNever, pod.Spec.RestartPolicy) - - // Verify node affinity - if pod.Name == "install-v2-manager-node1" { - assert.Equal(t, "node1", pod.Spec.NodeSelector["kubernetes.io/hostname"]) - - // Verify tolerations are set for tainted nodes - expectedToleration := corev1.Toleration{ - Key: "node-role.kubernetes.io/control-plane", - Value: "", - Operator: corev1.TolerationOpEqual, - } - assert.Contains(t, pod.Spec.Tolerations, expectedToleration) - } else { - assert.Equal(t, "node2", pod.Spec.NodeSelector["kubernetes.io/hostname"]) - } - - // Verify volumes - foundInstallationVolume := false - foundLicenseVolume := false - for _, volume := range pod.Spec.Volumes { - if volume.Name == "installation" { - foundInstallationVolume = true - assert.Equal(t, "test-install", volume.ConfigMap.Name) - } - if volume.Name == "license" { - foundLicenseVolume = true - assert.Equal(t, "test-secret", volume.Secret.SecretName) - } - } - assert.True(t, foundInstallationVolume, "expected installation volume to be mounted") - assert.True(t, foundLicenseVolume, "expected license volume to be mounted") - - // Verify command arguments - container := pod.Spec.Containers[0] - assert.Equal(t, container.Image, "embedded-cluster-operator-image:1.0") - assert.Contains(t, container.Command, "migrate-v2") - assert.Contains(t, container.Command, "install-manager") - assert.Contains(t, container.Command, "--app-slug") - assert.Contains(t, container.Command, "test-app") - assert.Contains(t, container.Command, "--app-version-label") - assert.Contains(t, container.Command, "v1.0.0") - } -} - -func Test_deleteManagerInstallPods(t *testing.T) { - // Create test nodes - nodes := []corev1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node1", - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node2", - }, - }, - } - - // Create existing pods - pods := []corev1.Pod{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "install-v2-manager-node1", - Namespace: "embedded-cluster", - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "install-v2-manager-node2", - Namespace: "embedded-cluster", - }, - }, - } - - // Set up the test scheme - scheme := runtime.NewScheme() - require.NoError(t, corev1.AddToScheme(scheme)) - require.NoError(t, batchv1.AddToScheme(scheme)) - - // Create fake client with nodes and pods - cli := fake.NewClientBuilder(). - WithScheme(scheme). - WithObjects(append( - nodesToRuntimeObjects(nodes), - podsToRuntimeObjects(pods)..., - )...). - Build() - - // Run the function - logf := func(format string, args ...any) { - // No-op logger for testing - } - - err := deleteManagerInstallPods(context.Background(), logf, cli) - require.NoError(t, err) - - // Verify pods were deleted - var remainingPods corev1.PodList - err = cli.List(context.Background(), &remainingPods) - require.NoError(t, err) - assert.Empty(t, remainingPods.Items, "expected all pods to be deleted") -} - -func nodesToRuntimeObjects(nodes []corev1.Node) []client.Object { - objects := make([]client.Object, len(nodes)) - for i := range nodes { - objects[i] = &nodes[i] - } - return objects -} - -func Test_ensureManagerInstallPodForNode(t *testing.T) { - // Set up common test objects - node := corev1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-node", - }, - Spec: corev1.NodeSpec{ - Taints: []corev1.Taint{ - { - Key: "node-role.kubernetes.io/control-plane", - Effect: corev1.TaintEffectNoSchedule, - }, - }, - }, - } - - installation := &ecv1beta1.Installation{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-install", - }, - Spec: ecv1beta1.InstallationSpec{ - Config: &ecv1beta1.ConfigSpec{ - Version: "1.0.0", - }, - }, - } - - tests := []struct { - name string - existingPod *corev1.Pod - expectNewPod bool - validatePod func(*testing.T, *corev1.Pod) - expectError bool - }{ - { - name: "creates new pod when none exists", - existingPod: nil, - validatePod: func(t *testing.T, pod *corev1.Pod) { - assert.Equal(t, "install-v2-manager-test-node", pod.Name) - assert.Equal(t, "embedded-cluster", pod.Namespace) - assert.Equal(t, "install-v2-manager", pod.Labels["app"]) - - assert.Equal(t, "test-node", pod.Spec.NodeSelector["kubernetes.io/hostname"]) - - // Verify tolerations - expectedToleration := corev1.Toleration{ - Key: "node-role.kubernetes.io/control-plane", - Value: "", - Operator: corev1.TolerationOpEqual, - } - assert.Contains(t, pod.Spec.Tolerations, expectedToleration) - - // Verify volumes - foundInstallationVolume := false - foundLicenseVolume := false - for _, volume := range pod.Spec.Volumes { - if volume.Name == "installation" { - foundInstallationVolume = true - assert.Equal(t, "test-install", volume.ConfigMap.Name) - } - if volume.Name == "license" { - foundLicenseVolume = true - assert.Equal(t, "test-secret", volume.Secret.SecretName) - } - } - assert.True(t, foundInstallationVolume, "expected installation volume to be mounted") - assert.True(t, foundLicenseVolume, "expected license volume to be mounted") - - // Verify container - container := pod.Spec.Containers[0] - assert.Equal(t, "embedded-cluster-operator-image:1.0", container.Image) - assert.Contains(t, container.Command, "migrate-v2") - assert.Contains(t, container.Command, "install-manager") - assert.Contains(t, container.Command, "--app-slug") - assert.Contains(t, container.Command, "test-app") - assert.Contains(t, container.Command, "--app-version-label") - assert.Contains(t, container.Command, "v1.0.0") - }, - expectError: false, - }, - { - name: "reuses existing successful pod", - existingPod: &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "install-v2-manager-test-node", - Namespace: "embedded-cluster", - }, - Status: corev1.PodStatus{ - Phase: corev1.PodSucceeded, - }, - }, - validatePod: func(t *testing.T, pod *corev1.Pod) { - assert.Equal(t, corev1.PodSucceeded, pod.Status.Phase) - }, - expectError: false, - }, - { - name: "replaces failed pod", - existingPod: &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "install-v2-manager-test-node", - Namespace: "embedded-cluster", - }, - Status: corev1.PodStatus{ - Phase: corev1.PodFailed, - }, - }, - validatePod: func(t *testing.T, pod *corev1.Pod) { - assert.Equal(t, corev1.PodPhase(""), pod.Status.Phase) - }, - expectError: false, - }, - } - - // Set up the test scheme - scheme := runtime.NewScheme() - require.NoError(t, corev1.AddToScheme(scheme)) - require.NoError(t, batchv1.AddToScheme(scheme)) - require.NoError(t, ecv1beta1.AddToScheme(scheme)) - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - - // Create fake client - builder := fake.NewClientBuilder().WithScheme(scheme) - if tt.existingPod != nil { - builder = builder.WithObjects(tt.existingPod) - } - cli := builder.Build() - - // Run the function - podName, err := ensureManagerInstallPodForNode( - context.Background(), - cli, - node, - installation, - "embedded-cluster-operator-image:1.0", - "test-secret", - "test-app", - "v1.0.0", - ) - - if tt.expectError { - require.Error(t, err) - return - } - require.NoError(t, err) - - // Get the pod - var pod corev1.Pod - err = cli.Get(context.Background(), client.ObjectKey{ - Namespace: "embedded-cluster", - Name: podName, - }, &pod) - require.NoError(t, err) - - // Run validation - tt.validatePod(t, &pod) - }) - } -} diff --git a/operator/pkg/cli/migratev2/managerpod.go b/operator/pkg/cli/migratev2/managerpod.go deleted file mode 100644 index 19070f407..000000000 --- a/operator/pkg/cli/migratev2/managerpod.go +++ /dev/null @@ -1,198 +0,0 @@ -package migratev2 - -import ( - "context" - "fmt" - - ecv1beta1 "github.com/replicatedhq/embedded-cluster/kinds/apis/v1beta1" - "github.com/replicatedhq/embedded-cluster/operator/pkg/util" - "github.com/replicatedhq/embedded-cluster/pkg/manager" - "github.com/replicatedhq/embedded-cluster/pkg/runtimeconfig" - "github.com/sirupsen/logrus" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/ptr" -) - -var _managerInstallPodSpec = corev1.Pod{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "Pod", - }, - ObjectMeta: metav1.ObjectMeta{ - Namespace: podNamespace, - Name: "install-v2-manager-DYNAMIC", - Labels: map[string]string{ - "app": "install-v2-manager", - }, - }, - Spec: corev1.PodSpec{ - RestartPolicy: corev1.RestartPolicyNever, - Containers: []corev1.Container{ - { - Name: "install-v2-manager", - Image: "DYNAMIC", - ImagePullPolicy: corev1.PullIfNotPresent, - SecurityContext: &corev1.SecurityContext{ - RunAsUser: ptr.To(int64(0)), - Privileged: ptr.To(true), - }, - Command: []string{ - "/manager", "migrate-v2", "install-manager", - "--installation", "/ec/installation/installation", - "--license", "/ec/license/license", - // "--app-slug", "DYNAMIC", - // "--app-version-label", "DYNAMIC", - }, - Env: []corev1.EnvVar{ - { - Name: "DBUS_SYSTEM_BUS_ADDRESS", // required to run systemctl commands - Value: "unix:path=/host/run/dbus/system_bus_socket", - }, - }, - VolumeMounts: []corev1.VolumeMount{ - { - Name: "installation", // required to set runtime config - MountPath: "/ec/installation", - ReadOnly: true, - }, - { - Name: "license", // required to download the manager binary - MountPath: "/ec/license", - ReadOnly: true, - }, - { - Name: "host-run-dbus-system-bus-socket", // required to run systemctl commands - MountPath: "/host/run/dbus/system_bus_socket", - }, - { - Name: "host-etc-systemd", // required to write systemd unit files - MountPath: "/etc/systemd", - }, - { - Name: "host-data-dir", // required to materialize files - MountPath: ecv1beta1.DefaultDataDir, - }, - }, - }, - }, - Volumes: []corev1.Volume{ - // { - // Name: "installation", - // VolumeSource: corev1.VolumeSource{ - // ConfigMap: &corev1.ConfigMapVolumeSource{ - // LocalObjectReference: corev1.LocalObjectReference{ - // Name: "DYNAMIC", - // }, - // }, - // }, - // }, - // { - // Name: "license", - // VolumeSource: corev1.VolumeSource{ - // Secret: &corev1.SecretVolumeSource{ - // SecretName: "DYNAMIC", - // }, - // }, - // }, - { - Name: "host-run-dbus-system-bus-socket", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/run/dbus/system_bus_socket", - Type: ptr.To(corev1.HostPathSocket), - }, - }, - }, - { - Name: "host-etc-systemd", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/etc/systemd", - Type: ptr.To(corev1.HostPathDirectory), - }, - }, - }, - { - Name: "host-data-dir", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: ecv1beta1.DefaultDataDir, - Type: ptr.To(corev1.HostPathDirectory), - }, - }, - }, - }, - }, -} - -// InstallAndStartManager installs and starts the manager service on the host. This is run in a pod -// on all nodes in the cluster. -func InstallAndStartManager(ctx context.Context, licenseID string, licenseEndpoint string, appVersionLabel string) error { - binPath := runtimeconfig.PathToEmbeddedClusterBinary("manager") - - // TODO: airgap - err := manager.DownloadBinaryOnline(ctx, binPath, licenseID, licenseEndpoint, appVersionLabel) - if err != nil { - return fmt.Errorf("download manager binary: %w", err) - } - - err = manager.Install(ctx, logrus.Infof) - if err != nil { - return fmt.Errorf("install manager: %w", err) - } - - return nil -} - -func getManagerInstallPodSpecForNode( - node corev1.Node, in *ecv1beta1.Installation, operatorImage string, - migrationSecret string, appSlug string, appVersionLabel string, -) *corev1.Pod { - pod := _managerInstallPodSpec.DeepCopy() - - pod.ObjectMeta.Name = getManagerInstallPodName(node) - - // pin to a specific node - pod.Spec.NodeSelector = map[string]string{"kubernetes.io/hostname": node.Name} - - // tolerate all taints - for _, taint := range node.Spec.Taints { - pod.Spec.Tolerations = append(pod.Spec.Tolerations, corev1.Toleration{ - Key: taint.Key, - Value: taint.Value, - Operator: corev1.TolerationOpEqual, - }) - } - - pod.Spec.Containers[0].Image = operatorImage - pod.Spec.Containers[0].Command = append(pod.Spec.Containers[0].Command, - "--app-slug", appSlug, - "--app-version-label", appVersionLabel, - ) - - pod.Spec.Volumes = append(pod.Spec.Volumes, corev1.Volume{ - Name: "installation", - VolumeSource: corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: in.Name, - }, - }, - }, - }) - pod.Spec.Volumes = append(pod.Spec.Volumes, corev1.Volume{ - Name: "license", - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: migrationSecret, - }, - }, - }) - - return pod -} - -func getManagerInstallPodName(node corev1.Node) string { - return util.NameWithLengthLimit(podNamePrefix, node.Name) -} diff --git a/operator/pkg/cli/migratev2/migrate.go b/operator/pkg/cli/migratev2/migrate.go index 2b02f4339..b52f2f44f 100644 --- a/operator/pkg/cli/migratev2/migrate.go +++ b/operator/pkg/cli/migratev2/migrate.go @@ -5,7 +5,6 @@ import ( "fmt" ecv1beta1 "github.com/replicatedhq/embedded-cluster/kinds/apis/v1beta1" - "github.com/replicatedhq/embedded-cluster/pkg/helm" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -15,56 +14,58 @@ type LogFunc func(string, ...any) // Run runs the v1 to v2 migration. It installs the manager service on all nodes, copies the // installations to configmaps, enables the v2 admin console, and finally removes the operator // chart. -func Run( - ctx context.Context, logf LogFunc, cli client.Client, helmCLI helm.Client, - in *ecv1beta1.Installation, - migrationSecret string, appSlug string, appVersionLabel string, -) error { - err := setV2MigrationInProgress(ctx, logf, cli, in) +func Run(ctx context.Context, logf LogFunc, cli client.Client, in *ecv1beta1.Installation) (err error) { + ok, err := needsMigration(ctx, cli) if err != nil { - return fmt.Errorf("set v2 migration in progress: %w", err) + return fmt.Errorf("check if migration is needed: %w", err) } - - err = waitForInstallationStateInstalled(ctx, logf, cli, in) - if err != nil { - return fmt.Errorf("failed to wait for addon installation: %w", err) + if !ok { + logf("No v2 migration needed") + return nil } - err = runManagerInstallPodsAndWait(ctx, logf, cli, in, migrationSecret, appSlug, appVersionLabel) - if err != nil { - return fmt.Errorf("run manager install pods: %w", err) - } - - err = deleteManagerInstallPods(ctx, logf, cli) - if err != nil { - return fmt.Errorf("delete pods: %w", err) - } + logf("Running v2 migration") - err = copyInstallationsToConfigMaps(ctx, logf, cli) + err = setV2MigrationInProgress(ctx, logf, cli, in) if err != nil { - return fmt.Errorf("copy installations to config maps: %w", err) + return fmt.Errorf("set v2 migration in progress: %w", err) } + defer func() { + if err == nil { + return + } + if err := setV2MigrationFailed(ctx, logf, cli, in, err); err != nil { + logf("Failed to set v2 migration failed: %v", err) + } + }() - // disable the operator to ensure that it does not reconcile and revert our changes. - err = disableOperator(ctx, logf, cli, in) + // scale down the operator to ensure that it does not reconcile and revert our changes. + err = scaleDownOperator(ctx, logf, cli) if err != nil { return fmt.Errorf("disable operator: %w", err) } - err = enableV2AdminConsole(ctx, logf, cli, in) + err = cleanupK0sCharts(ctx, logf, cli) if err != nil { - return fmt.Errorf("enable v2 admin console: %w", err) + return fmt.Errorf("cleanup k0s: %w", err) } - err = ensureInstallationStateInstalled(ctx, logf, cli, in) + err = setV2MigrationComplete(ctx, logf, cli, in) if err != nil { - return fmt.Errorf("set installation state to installed: %w", err) + return fmt.Errorf("set v2 migration complete: %w", err) } - err = cleanupV1(ctx, logf, cli) + logf("Successfully migrated from v2") + + return nil +} + +// needsMigration checks if the installation needs to be migrated to v2. +func needsMigration(ctx context.Context, cli client.Client) (bool, error) { + ok, err := needsK0sChartCleanup(ctx, cli) if err != nil { - return fmt.Errorf("cleanup v1: %w", err) + return false, fmt.Errorf("check if k0s charts need cleanup: %w", err) } - return nil + return ok, nil } diff --git a/operator/pkg/cli/migratev2/operator.go b/operator/pkg/cli/migratev2/operator.go index 60b9afc80..0151cfcec 100644 --- a/operator/pkg/cli/migratev2/operator.go +++ b/operator/pkg/cli/migratev2/operator.go @@ -5,126 +5,91 @@ import ( "fmt" "time" - k0shelmv1beta1 "github.com/k0sproject/k0s/pkg/apis/helm/v1beta1" - k0sv1beta1 "github.com/k0sproject/k0s/pkg/apis/k0s/v1beta1" - ecv1beta1 "github.com/replicatedhq/embedded-cluster/kinds/apis/v1beta1" - "github.com/replicatedhq/embedded-cluster/pkg/helm" - "github.com/replicatedhq/embedded-cluster/pkg/helpers" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" apitypes "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" "sigs.k8s.io/controller-runtime/pkg/client" ) -// disableOperator sets the DisablingReconcile condition to true on the installation object which -// will prevent the operator from reconciling the installation. -func disableOperator(ctx context.Context, logf LogFunc, cli client.Client, in *ecv1beta1.Installation) error { - logf("Disabling operator") - - err := setInstallationCondition(ctx, cli, in, metav1.Condition{ - Type: ecv1beta1.ConditionTypeDisableReconcile, - Status: metav1.ConditionTrue, - Reason: "V2MigrationInProgress", - }) - if err != nil { - return fmt.Errorf("set disable reconcile condition: %w", err) - } +const ( + OperatorNamespace = "embedded-cluster" + OperatorDeploymentName = "embedded-cluster-operator" +) - logf("Successfully disabled operator") - return nil -} +// scaleDownOperator scales down the operator deployment to 0 replicas to prevent the operator from +// reconciling the installation. +func scaleDownOperator(ctx context.Context, logf LogFunc, cli client.Client) error { + logf("Scaling down operator") -// cleanupV1 removes control of the Helm Charts from the k0s controller and uninstalls the Embedded -// Cluster operator. -func cleanupV1(ctx context.Context, logf LogFunc, cli client.Client) error { - logf("Force deleting Chart custom resources") - // forceDeleteChartCRs is necessary because the k0s controller will otherwise uninstall the - // Helm releases and we don't want that. - err := forceDeleteChartCRs(ctx, cli) + err := setOperatorDeploymentReplicasZero(ctx, cli) if err != nil { - return fmt.Errorf("delete chart custom resources: %w", err) + return fmt.Errorf("set operator deployment replicas to 0: %w", err) } - logf("Successfully force deleted Chart custom resources") - logf("Removing Helm Charts from ClusterConfig") - err = removeClusterConfigHelmExtensions(ctx, cli) + logf("Waiting for operator to scale down") + + err = waitForOperatorDeployment(ctx, cli) if err != nil { - return fmt.Errorf("cleanup cluster config: %w", err) + return fmt.Errorf("wait for operator deployment: %w", err) } - logf("Successfully removed Helm Charts from ClusterConfig") + logf("Successfully scaled down operator") return nil } -func forceDeleteChartCRs(ctx context.Context, cli client.Client) error { - var chartList k0shelmv1beta1.ChartList - err := cli.List(ctx, &chartList) - if err != nil { - return fmt.Errorf("list charts: %w", err) +func setOperatorDeploymentReplicasZero(ctx context.Context, cli client.Client) error { + obj := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: OperatorDeploymentName, + Namespace: OperatorNamespace, + }, } - for _, chart := range chartList.Items { - chart.ObjectMeta.Finalizers = []string{} - err := cli.Update(ctx, &chart) - if err != nil { - return fmt.Errorf("update chart: %w", err) - } + patch := []byte(fmt.Sprintf(`{"spec":{"replicas":%d}}`, 0)) + err := cli.Patch(ctx, obj, client.RawPatch(apitypes.MergePatchType, patch)) + if err != nil { + return fmt.Errorf("patch deployment: %w", err) } + return nil +} - // wait for all finalizers to be removed before deleting the charts - for hasFinalizers := true; hasFinalizers; { - err = cli.List(ctx, &chartList) - if err != nil { - return fmt.Errorf("list charts: %w", err) - } - - hasFinalizers = false - for _, chart := range chartList.Items { - if len(chart.GetFinalizers()) > 0 { - hasFinalizers = true - break +// waitForOperatorDeployment waits for the operator deployment to be updated. +func waitForOperatorDeployment(ctx context.Context, cli client.Client) error { + backoff := wait.Backoff{Steps: 60, Duration: 5 * time.Second, Factor: 1.0, Jitter: 0.1} + var lasterr error + if err := wait.ExponentialBackoffWithContext( + ctx, backoff, func(ctx context.Context) (bool, error) { + ready, err := isOperatorDeploymentUpdated(ctx, cli) + if err != nil { + lasterr = fmt.Errorf("check deployment: %w", err) + return false, nil } + return ready, nil + }, + ); err != nil { + if lasterr != nil { + return lasterr } - - time.Sleep(100 * time.Millisecond) + return err } - - for _, chart := range chartList.Items { - err := cli.Delete(ctx, &chart, client.GracePeriodSeconds(0)) - if err != nil { - return fmt.Errorf("delete chart: %w", err) - } - } - return nil } -func removeClusterConfigHelmExtensions(ctx context.Context, cli client.Client) error { - var clusterConfig k0sv1beta1.ClusterConfig - err := cli.Get(ctx, apitypes.NamespacedName{Namespace: "kube-system", Name: "k0s"}, &clusterConfig) - if err != nil { - return fmt.Errorf("get cluster config: %w", err) - } - - clusterConfig.Spec.Extensions.Helm = &k0sv1beta1.HelmExtensions{} - - unstructured, err := helpers.K0sClusterConfigTo129Compat(&clusterConfig) - if err != nil { - return fmt.Errorf("convert cluster config to 1.29 compat: %w", err) - } - - err = cli.Update(ctx, unstructured) +// isOperatorDeploymentUpdated checks that the operator pods are removed. +func isOperatorDeploymentUpdated(ctx context.Context, cli client.Client) (bool, error) { + var podList corev1.PodList + err := cli.List(ctx, &podList, client.InNamespace(OperatorNamespace), client.MatchingLabels(operatorPodLabels())) if err != nil { - return fmt.Errorf("update cluster config: %w", err) + return false, fmt.Errorf("list embedded-cluster-operator pods: %w", err) } - - return nil + return len(podList.Items) == 0, nil } -func helmUninstallOperator(ctx context.Context, helmCLI helm.Client) error { - return helmCLI.Uninstall(ctx, helm.UninstallOptions{ - ReleaseName: "embedded-cluster-operator", - Namespace: "embedded-cluster", - Wait: true, - IgnoreNotFound: true, - }) +func operatorPodLabels() map[string]string { + return map[string]string{ + "app.kubernetes.io/instance": "embedded-cluster-operator", + "app.kubernetes.io/name": "embedded-cluster-operator", + } } diff --git a/operator/pkg/cli/migratev2/operator_test.go b/operator/pkg/cli/migratev2/operator_test.go index b00edf773..f5cf4e80c 100644 --- a/operator/pkg/cli/migratev2/operator_test.go +++ b/operator/pkg/cli/migratev2/operator_test.go @@ -4,50 +4,80 @@ import ( "context" "testing" - k0shelmv1beta1 "github.com/k0sproject/k0s/pkg/apis/helm/v1beta1" - k0sv1beta1 "github.com/k0sproject/k0s/pkg/apis/k0s/v1beta1" - ecv1beta1 "github.com/replicatedhq/embedded-cluster/kinds/apis/v1beta1" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "k8s.io/apimachinery/pkg/api/meta" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" apitypes "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" ) -func Test_disableOperator(t *testing.T) { +func Test_setOperatorDeploymentReplicasZero(t *testing.T) { scheme := runtime.NewScheme() - require.NoError(t, ecv1beta1.AddToScheme(scheme)) + require.NoError(t, appsv1.AddToScheme(scheme)) tests := []struct { - name string - installation *ecv1beta1.Installation - expectError bool + name string + deployment *appsv1.Deployment + expectError bool + validate func(t *testing.T, deployment appsv1.Deployment) }{ { - name: "disables operator reconciliation", - installation: &ecv1beta1.Installation{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-installation", + name: "scales down operator", + deployment: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: OperatorDeploymentName, Namespace: OperatorNamespace}, + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To[int32](1), + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "embedded-cluster-operator", + Image: "replicated-operator:v1", + }, + }, + }, + }, }, }, + validate: func(t *testing.T, deployment appsv1.Deployment) { + // Validate that the patch did not modify the rest of the deployment + assert.Equal(t, 1, len(deployment.Spec.Template.Spec.Containers), "expected 1 container") + }, expectError: false, }, + { + name: "scales down operator 0 replicas", + deployment: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: OperatorDeploymentName, Namespace: OperatorNamespace}, + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To[int32](0), + }, + }, + expectError: false, + }, + { + name: "fails if operator is not found", + expectError: true, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { // Create a fake client with the test case's initial installation - cli := fake.NewClientBuilder(). - WithScheme(scheme). - WithObjects(tt.installation). - WithStatusSubresource(tt.installation). - Build() + builder := fake.NewClientBuilder(). + WithScheme(scheme) + if tt.deployment != nil { + builder = builder.WithObjects(tt.deployment) + } + cli := builder.Build() // Call disableOperator - err := disableOperator(context.Background(), t.Logf, cli, tt.installation) + err := setOperatorDeploymentReplicasZero(context.Background(), cli) // Check error expectation if tt.expectError { @@ -57,170 +87,93 @@ func Test_disableOperator(t *testing.T) { require.NoError(t, err) // Verify the installation status was updated - var updatedInstallation ecv1beta1.Installation - err = cli.Get(context.Background(), client.ObjectKey{Name: tt.installation.Name}, &updatedInstallation) + var updatedDeployment appsv1.Deployment + nsn := apitypes.NamespacedName{Namespace: OperatorNamespace, Name: OperatorDeploymentName} + err = cli.Get(context.Background(), nsn, &updatedDeployment) require.NoError(t, err) - // Check that the DisableReconcile condition was set correctly - condition := meta.FindStatusCondition(updatedInstallation.Status.Conditions, ecv1beta1.ConditionTypeDisableReconcile) - require.NotNil(t, condition) - assert.Equal(t, metav1.ConditionTrue, condition.Status) - assert.Equal(t, "V2MigrationInProgress", condition.Reason) - assert.Equal(t, tt.installation.Generation, condition.ObservedGeneration) + // Check that replicas has been scaled down + assert.Equal(t, int32(0), *updatedDeployment.Spec.Replicas, "expected replicas to be 0") + + if tt.validate != nil { + tt.validate(t, updatedDeployment) + } }) } } -func Test_forceDeleteChartCRs(t *testing.T) { - scheme := runtime.NewScheme() - require.NoError(t, k0sv1beta1.AddToScheme(scheme)) - require.NoError(t, k0shelmv1beta1.AddToScheme(scheme)) - +func Test_isOperatorDeploymentUpdated(t *testing.T) { tests := []struct { - name string - initialCRs []k0shelmv1beta1.Chart - expectError bool + name string + pods []corev1.Pod + want bool + wantErr bool }{ { - name: "removes finalizers and deletes charts", - initialCRs: []k0shelmv1beta1.Chart{ + name: "pod exists", + pods: []corev1.Pod{ { ObjectMeta: metav1.ObjectMeta{ - Name: "chart1", - Namespace: "default", - Finalizers: []string{"helm.k0s.k0sproject.io/uninstall-helm-release"}, + Name: "embedded-cluster-operator-123", + Namespace: OperatorNamespace, + Labels: operatorPodLabels(), }, }, { ObjectMeta: metav1.ObjectMeta{ - Name: "chart2", - Namespace: "default", - Finalizers: []string{"helm.k0s.k0sproject.io/uninstall-helm-release"}, + Name: "some-other-pod", + Namespace: OperatorNamespace, }, }, }, - expectError: false, + want: false, + wantErr: false, }, { - name: "handles no charts", - initialCRs: []k0shelmv1beta1.Chart{}, - expectError: false, - }, - { - name: "handles charts without finalizers", - initialCRs: []k0shelmv1beta1.Chart{ + name: "pod does not exist", + pods: []corev1.Pod{ { ObjectMeta: metav1.ObjectMeta{ - Name: "chart1", - Namespace: "default", + Name: "some-other-pod", + Namespace: OperatorNamespace, }, }, }, - expectError: false, + want: true, + wantErr: false, + }, + { + name: "no pods found", + pods: []corev1.Pod{}, + want: true, + wantErr: false, }, } + scheme := runtime.NewScheme() + require.NoError(t, corev1.AddToScheme(scheme)) + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - // Create a fake client with the test case's initial CRs cli := fake.NewClientBuilder(). - WithScheme(scheme). - WithObjects(chartCRsToRuntimeObjects(tt.initialCRs)...). + WithObjects(podsToRuntimeObjects(tt.pods)...). Build() - // Call forceDeleteChartCRs - err := forceDeleteChartCRs(context.Background(), cli) - - // Check error expectation - if tt.expectError { + got, err := isOperatorDeploymentUpdated(context.Background(), cli) + if tt.wantErr { require.Error(t, err) return } require.NoError(t, err) - - // Verify all charts were deleted - var remainingCharts k0shelmv1beta1.ChartList - err = cli.List(context.Background(), &remainingCharts) - require.NoError(t, err) - assert.Empty(t, remainingCharts.Items, "expected all charts to be deleted") + assert.Equal(t, tt.want, got) }) } } -// Helper function to convert []Chart to []runtime.Object -func chartCRsToRuntimeObjects(charts []k0shelmv1beta1.Chart) []client.Object { - objects := make([]client.Object, len(charts)) - for i := range charts { - objects[i] = &charts[i] +func podsToRuntimeObjects(pods []corev1.Pod) []client.Object { + objects := make([]client.Object, len(pods)) + for i := range pods { + objects[i] = &pods[i] } return objects } - -func Test_removeClusterConfigHelmExtensions(t *testing.T) { - tests := []struct { - name string - initialConfig *k0sv1beta1.ClusterConfig - expectError bool - }{ - { - name: "cleans up helm extensions", - initialConfig: &k0sv1beta1.ClusterConfig{ - ObjectMeta: metav1.ObjectMeta{ - Name: "k0s", - Namespace: "kube-system", - }, - Spec: &k0sv1beta1.ClusterSpec{ - Extensions: &k0sv1beta1.ClusterExtensions{ - Helm: &k0sv1beta1.HelmExtensions{ - Charts: k0sv1beta1.ChartsSettings{ - {Name: "chart1"}, - {Name: "chart2"}, - }, - }, - }, - }, - }, - expectError: false, - }, - { - name: "handles nil extensions", - initialConfig: &k0sv1beta1.ClusterConfig{ - ObjectMeta: metav1.ObjectMeta{ - Name: "k0s", - Namespace: "kube-system", - }, - Spec: &k0sv1beta1.ClusterSpec{}, - }, - expectError: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - scheme := runtime.NewScheme() - require.NoError(t, k0sv1beta1.AddToScheme(scheme)) - - cli := fake.NewClientBuilder(). - WithScheme(scheme). - WithObjects(tt.initialConfig). - Build() - - err := removeClusterConfigHelmExtensions(context.Background(), cli) - - if tt.expectError { - require.Error(t, err) - return - } - require.NoError(t, err) - - var updatedConfig k0sv1beta1.ClusterConfig - err = cli.Get(context.Background(), apitypes.NamespacedName{ - Namespace: "kube-system", - Name: "k0s", - }, &updatedConfig) - require.NoError(t, err) - - assert.Equal(t, &k0sv1beta1.HelmExtensions{}, updatedConfig.Spec.Extensions.Helm) - }) - } -} diff --git a/operator/pkg/cli/upgrade.go b/operator/pkg/cli/upgrade.go index c09c2f17b..907003ba4 100644 --- a/operator/pkg/cli/upgrade.go +++ b/operator/pkg/cli/upgrade.go @@ -9,7 +9,6 @@ import ( "github.com/replicatedhq/embedded-cluster/operator/pkg/k8sutil" "github.com/replicatedhq/embedded-cluster/operator/pkg/upgrade" "github.com/replicatedhq/embedded-cluster/pkg/kubeutils" - "github.com/replicatedhq/embedded-cluster/pkg/manager" "github.com/replicatedhq/embedded-cluster/pkg/runtimeconfig" "github.com/spf13/cobra" "k8s.io/apimachinery/pkg/runtime" @@ -23,9 +22,6 @@ func UpgradeCmd() *cobra.Command { var installation *ecv1beta1.Installation - var migrateV2 bool - var migrateV2Secret, migrateV2AppSlug, migrateV2AppVersionLabel string - cmd := &cobra.Command{ Use: "upgrade", Short: "create a job to upgrade the embedded cluster operator", @@ -40,20 +36,6 @@ func UpgradeCmd() *cobra.Command { // set the runtime config from the installation spec runtimeconfig.Set(installation.Spec.RuntimeConfig) - if migrateV2 { - if migrateV2Secret == "" { - return fmt.Errorf("--migrate-v2 is set to true but --migrate-v2-secret is not set") - } - if migrateV2AppSlug == "" { - return fmt.Errorf("--migrate-v2 is set to true but --app-slug is not set") - } - if migrateV2AppVersionLabel == "" { - return fmt.Errorf("--migrate-v2 is set to true but --app-version-label is not set") - } - - manager.SetServiceName(migrateV2AppSlug) - } - return nil }, RunE: func(cmd *cobra.Command, args []string) error { @@ -79,7 +61,6 @@ func UpgradeCmd() *cobra.Command { err = upgrade.CreateUpgradeJob( cmd.Context(), cli, installation, localArtifactMirrorImage, previousInstallation.Spec.Config.Version, - migrateV2, migrateV2Secret, migrateV2AppSlug, migrateV2AppVersionLabel, ) if err != nil { return fmt.Errorf("failed to upgrade: %w", err) @@ -100,11 +81,6 @@ func UpgradeCmd() *cobra.Command { panic(err) } - cmd.Flags().BoolVar(&migrateV2, "migrate-v2", false, "Set to true to run the v2 migration") - cmd.Flags().StringVar(&migrateV2Secret, "migrate-v2-secret", "", "The secret name from which to read the license (required if --migrate-v2 is set to true)") - cmd.Flags().StringVar(&migrateV2AppSlug, "app-slug", "", "The application slug (required if --migrate-v2 is set to true)") - cmd.Flags().StringVar(&migrateV2AppVersionLabel, "app-version-label", "", "The application version label (required if --migrate-v2 is set to true)") - return cmd } diff --git a/operator/pkg/cli/upgrade_job.go b/operator/pkg/cli/upgrade_job.go index e590ab749..30762b61e 100644 --- a/operator/pkg/cli/upgrade_job.go +++ b/operator/pkg/cli/upgrade_job.go @@ -2,12 +2,13 @@ package cli import ( "fmt" + "os" "time" ecv1beta1 "github.com/replicatedhq/embedded-cluster/kinds/apis/v1beta1" + "github.com/replicatedhq/embedded-cluster/operator/pkg/cli/migratev2" "github.com/replicatedhq/embedded-cluster/operator/pkg/k8sutil" "github.com/replicatedhq/embedded-cluster/operator/pkg/upgrade" - "github.com/replicatedhq/embedded-cluster/pkg/manager" "github.com/replicatedhq/embedded-cluster/pkg/runtimeconfig" "github.com/replicatedhq/embedded-cluster/pkg/versions" "github.com/spf13/cobra" @@ -19,9 +20,6 @@ func UpgradeJobCmd() *cobra.Command { var installationFile, previousInstallationVersion string var installation *ecv1beta1.Installation - var migrateV2 bool - var migrateV2Secret, appSlug, appVersionLabel string - cmd := &cobra.Command{ Use: "upgrade-job", Short: "Upgrade k0s and then all addons from within a job that may be restarted", @@ -36,20 +34,6 @@ func UpgradeJobCmd() *cobra.Command { // set the runtime config from the installation spec runtimeconfig.Set(installation.Spec.RuntimeConfig) - if migrateV2 { - if migrateV2Secret == "" { - return fmt.Errorf("--migrate-v2 is set to true but --migrate-v2-secret is not set") - } - if appSlug == "" { - return fmt.Errorf("--migrate-v2 is set to true but --app-slug is not set") - } - if appVersionLabel == "" { - return fmt.Errorf("--migrate-v2 is set to true but --app-version-label is not set") - } - - manager.SetServiceName(appSlug) - } - return nil }, RunE: func(cmd *cobra.Command, args []string) error { @@ -67,6 +51,17 @@ func UpgradeJobCmd() *cobra.Command { i := 0 sleepDuration := time.Second * 5 for { + logf := func(format string, args ...any) { + fmt.Println(fmt.Sprintf(format, args...)) + } + + if os.Getenv("MIGRATE_V2") == "true" { + err := migratev2.Run(ctx, logf, cli, installation) + if err != nil { + return fmt.Errorf("failed to run v2 migration: %w", err) + } + } + err = upgrade.Upgrade(ctx, cli, installation) if err != nil { fmt.Printf("Upgrade failed, retrying: %s\n", err.Error()) @@ -83,17 +78,6 @@ func UpgradeJobCmd() *cobra.Command { fmt.Println("Upgrade completed successfully") - if migrateV2 { - logf := func(format string, args ...any) { - fmt.Println(fmt.Sprintf(format, args...)) - } - - err = runMigrateV2PodAndWait(ctx, logf, cli, installation, migrateV2Secret, appSlug, appVersionLabel) - if err != nil { - return fmt.Errorf("failed to run v2 migration: %w", err) - } - } - return nil }, } @@ -109,10 +93,5 @@ func UpgradeJobCmd() *cobra.Command { panic(err) } - cmd.Flags().BoolVar(&migrateV2, "migrate-v2", false, "Set to true to run the v2 migration") - cmd.Flags().StringVar(&migrateV2Secret, "migrate-v2-secret", "", "The secret name from which to read the license (required if --migrate-v2 is set to true)") - cmd.Flags().StringVar(&appSlug, "app-slug", "", "The application slug (required if --migrate-v2 is set to true)") - cmd.Flags().StringVar(&appVersionLabel, "app-version-label", "", "The application version label (required if --migrate-v2 is set to true)") - return cmd } diff --git a/operator/pkg/upgrade/job.go b/operator/pkg/upgrade/job.go index 188769df4..0757a174f 100644 --- a/operator/pkg/upgrade/job.go +++ b/operator/pkg/upgrade/job.go @@ -34,7 +34,6 @@ import ( func CreateUpgradeJob( ctx context.Context, cli client.Client, in *clusterv1beta1.Installation, localArtifactMirrorImage string, previousInstallVersion string, - migrateV2 bool, migrateV2Secret string, appSlug string, appVersionLabel string, ) error { // check if the job already exists - if it does, we've already rolled out images and can return now job := &batchv1.Job{} @@ -200,15 +199,6 @@ func CreateUpgradeJob( }, } - if migrateV2 { - job.Spec.Template.Spec.Containers[0].Command = append(job.Spec.Template.Spec.Containers[0].Command, - "--migrate-v2", - "--migrate-v2-secret", migrateV2Secret, - "--app-slug", appSlug, - "--app-version-label", appVersionLabel, - ) - } - if err = cli.Create(ctx, job); err != nil { return fmt.Errorf("failed to create upgrade job: %w", err) }