Skip to content

Commit

Permalink
EVEREST-682: copy credentials during db provisioning (#217)
Browse files Browse the repository at this point in the history
  • Loading branch information
Michal Kralik authored Dec 21, 2023
1 parent b47543f commit 4d89ac0
Show file tree
Hide file tree
Showing 5 changed files with 157 additions and 23 deletions.
109 changes: 97 additions & 12 deletions controllers/databasecluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -208,6 +208,13 @@ func (r *DatabaseClusterReconciler) Reconcile(ctx context.Context, req ctrl.Requ
}
}

if database.Spec.DataSource != nil && database.Spec.DataSource.DBClusterBackupName != "" {
// We don't handle database.Spec.DataSource.BackupSource in operator
if err := r.copyCredentialsFromDBBackup(ctx, database.Spec.DataSource.DBClusterBackupName, database); err != nil {
return reconcile.Result{}, err
}
}

if database.Spec.Engine.Type == everestv1alpha1.DatabaseEnginePXC {
err := r.reconcilePXC(ctx, req, database)
return reconcile.Result{}, err
Expand All @@ -226,6 +233,60 @@ func (r *DatabaseClusterReconciler) Reconcile(ctx context.Context, req ctrl.Requ
return reconcile.Result{}, nil
}

// copyCredentialsFromDBBackup copies credentials from an old DB to the new DB about to be
// provisioned by providing a DB Backup name of the old DB.
func (r *DatabaseClusterReconciler) copyCredentialsFromDBBackup(
ctx context.Context, dbBackupName string, db *everestv1alpha1.DatabaseCluster,
) error {
logger := log.FromContext(ctx)

dbb := &everestv1alpha1.DatabaseClusterBackup{}
err := r.Get(ctx, types.NamespacedName{
Name: dbBackupName,
Namespace: db.Namespace,
}, dbb)
if err != nil {
return errors.Join(err, errors.New("could not get DB backup to copy credentials from old DB cluster"))
}

newSecretName := fmt.Sprintf("everest-secrets-%s", db.Name)
newSecret := &corev1.Secret{}
err = r.Get(ctx, types.NamespacedName{
Name: newSecretName,
Namespace: db.Namespace,
}, newSecret)
if err != nil && !k8serrors.IsNotFound(err) {
return errors.Join(err, errors.New("could not get secret to copy credentials from old DB cluster"))
}

if err == nil {
logger.Info(fmt.Sprintf("Secret %s already exists. Skipping secret copy during provisioning", newSecretName))
return nil
}

prevSecretName := fmt.Sprintf("everest-secrets-%s", dbb.Spec.DBClusterName)
secret := &corev1.Secret{}
err = r.Get(ctx, types.NamespacedName{
Name: prevSecretName,
Namespace: db.Namespace,
}, secret)
if err != nil {
return errors.Join(err, errors.New("could not get secret to copy credentials from old DB cluster"))
}

secret.ObjectMeta = metav1.ObjectMeta{
Name: newSecretName,
Namespace: secret.Namespace,
}
if err := r.createOrUpdate(ctx, secret, false); err != nil {
return errors.Join(err, errors.New("could not create new secret to copy credentials from old DB cluster"))
}

logger.Info(fmt.Sprintf("Copied secret %s to %s", prevSecretName, newSecretName))

return nil
}

func (r *DatabaseClusterReconciler) getClusterType(ctx context.Context) (ClusterType, error) {
clusterType := ClusterTypeMinikube
unstructuredResource := &unstructured.Unstructured{}
Expand Down Expand Up @@ -2108,6 +2169,7 @@ func (r *DatabaseClusterReconciler) genPGDataSourceSpec(ctx context.Context, dat

var backupBaseName string
var backupStorageName string
var dest string
if database.Spec.DataSource.DBClusterBackupName != "" {
dbClusterBackup := &everestv1alpha1.DatabaseClusterBackup{}
err := r.Get(ctx, types.NamespacedName{Name: database.Spec.DataSource.DBClusterBackupName, Namespace: database.Namespace}, dbClusterBackup)
Expand All @@ -2121,18 +2183,30 @@ func (r *DatabaseClusterReconciler) genPGDataSourceSpec(ctx context.Context, dat

backupBaseName = filepath.Base(*dbClusterBackup.Status.Destination)
backupStorageName = dbClusterBackup.Spec.BackupStorageName
dest = *dbClusterBackup.Status.Destination
}

if database.Spec.DataSource.BackupSource != nil {
backupBaseName = filepath.Base(database.Spec.DataSource.BackupSource.Path)
backupStorageName = database.Spec.DataSource.BackupSource.BackupStorageName
}

backupStorage := &everestv1alpha1.BackupStorage{}
err := r.Get(ctx, types.NamespacedName{Name: backupStorageName, Namespace: r.defaultNamespace}, backupStorage)
if err != nil {
return nil, errors.Join(err, fmt.Errorf("failed to get backup storage %s", backupStorageName))
}
if database.Namespace != r.defaultNamespace {
if err := r.reconcileBackupStorageSecret(ctx, backupStorage, database); err != nil {
return nil, err
}
}

repoName := "repo1"
pgDataSource := &crunchyv1beta1.DataSource{
PGBackRest: &crunchyv1beta1.PGBackRestDataSource{
Global: map[string]string{
fmt.Sprintf(pgBackRestPathTmpl, repoName): "/" + backupStoragePrefix(database),
fmt.Sprintf(pgBackRestPathTmpl, repoName): globalDatasourceDestination(dest, database, backupStorage),
},
Stanza: "db",
Options: []string{
Expand All @@ -2149,17 +2223,6 @@ func (r *DatabaseClusterReconciler) genPGDataSourceSpec(ctx context.Context, dat
},
}

backupStorage := &everestv1alpha1.BackupStorage{}
err := r.Get(ctx, types.NamespacedName{Name: backupStorageName, Namespace: r.defaultNamespace}, backupStorage)
if err != nil {
return nil, errors.Join(err, fmt.Errorf("failed to get backup storage %s", backupStorageName))
}
if database.Namespace != r.defaultNamespace {
if err := r.reconcileBackupStorageSecret(ctx, backupStorage, database); err != nil {
return nil, err
}
}

switch backupStorage.Spec.Type {
case everestv1alpha1.BackupStorageTypeS3:
pgBackRestSecretIni, err := ini.Load([]byte{})
Expand Down Expand Up @@ -2272,6 +2335,28 @@ func (r *DatabaseClusterReconciler) genPGDataSourceSpec(ctx context.Context, dat
return pgDataSource, nil
}

func globalDatasourceDestination(dest string, db *everestv1alpha1.DatabaseCluster, backupStorage *everestv1alpha1.BackupStorage) string {
if dest == "" {
dest = "/" + backupStoragePrefix(db)
} else {
// Extract the relevant prefix from the backup destination
switch backupStorage.Spec.Type {
case everestv1alpha1.BackupStorageTypeS3:
dest = strings.TrimPrefix(dest, "s3://")
case everestv1alpha1.BackupStorageTypeAzure:
dest = strings.TrimPrefix(dest, "azure://")
}

dest = strings.TrimPrefix(dest, backupStorage.Spec.Bucket)
dest = strings.TrimLeft(dest, "/")
prefix := backupStoragePrefix(db)
prefixCount := len(strings.Split(prefix, "/"))
dest = "/" + strings.Join(strings.SplitN(dest, "/", prefixCount+1)[0:prefixCount], "/")
}

return dest
}

//nolint:gocognit,maintidx,gocyclo,cyclop
func (r *DatabaseClusterReconciler) reconcilePG(ctx context.Context, req ctrl.Request, database *everestv1alpha1.DatabaseCluster) error {
version, err := r.getOperatorVersion(ctx, types.NamespacedName{
Expand Down
53 changes: 53 additions & 0 deletions controllers/databasecluster_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2662,3 +2662,56 @@ func TestReconcilePGBackRestReposEmpty(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, expRepos, repos)
}

func Test_globalDatasourceDestination(t *testing.T) {
t.Parallel()

t.Run("empty dest", func(t *testing.T) {
t.Parallel()

db := &everestv1alpha1.DatabaseCluster{}
db.Name = "db-name"
db.UID = "db-uid"

bs := &everestv1alpha1.BackupStorage{}

dest := globalDatasourceDestination("", db, bs)
assert.Equal(t, "/"+backupStoragePrefix(db), dest)
})

t.Run("not-empty dest s3", func(t *testing.T) {
t.Parallel()

db := &everestv1alpha1.DatabaseCluster{}
db.Name = "db-name"
db.UID = "db-uid"

bs := &everestv1alpha1.BackupStorage{
Spec: everestv1alpha1.BackupStorageSpec{
Type: everestv1alpha1.BackupStorageTypeS3,
Bucket: "some/bucket/here",
},
}

dest := globalDatasourceDestination("s3://some/bucket/here/db-name/db-uid/some/folders/later", db, bs)
assert.Equal(t, "/db-name/db-uid", dest)
})

t.Run("not-empty dest azure", func(t *testing.T) {
t.Parallel()

db := &everestv1alpha1.DatabaseCluster{}
db.Name = "db-name"
db.UID = "db-uid"

bs := &everestv1alpha1.BackupStorage{
Spec: everestv1alpha1.BackupStorageSpec{
Type: everestv1alpha1.BackupStorageTypeAzure,
Bucket: "some/bucket/here",
},
}

dest := globalDatasourceDestination("azure://some/bucket/here/db-name/db-uid/some/folders/later", db, bs)
assert.Equal(t, "/db-name/db-uid", dest)
})
}
1 change: 1 addition & 0 deletions e2e-tests/tests/features/dbbackup_pg/20-dbb.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
apiVersion: kuttl.dev/v1
kind: TestStep
timeout: 600
---
apiVersion: v1
kind: Secret
Expand Down
11 changes: 0 additions & 11 deletions e2e-tests/tests/features/dbbackup_pxc/81-delete-restore-pitr.yaml

This file was deleted.

6 changes: 6 additions & 0 deletions e2e-tests/tests/features/dbbackup_pxc/81-delete-restore.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
apiVersion: kuttl.dev/v1
kind: TestStep
timeout: 10
commands:
- command: kubectl -n "${NAMESPACE}" delete dbr --all
- command: kubectl -n "${NAMESPACE}" delete pxc-restore --all

0 comments on commit 4d89ac0

Please sign in to comment.