diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/api_reference.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/api_reference.mdx
index 8bf9a23385c..79ac60447e8 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/api_reference.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/api_reference.mdx
@@ -22,6 +22,7 @@ Below you will find a description of the defined resources:
- [AffinityConfiguration](#AffinityConfiguration)
+- [AzureCredentials](#AzureCredentials)
- [Backup](#Backup)
- [BackupConfiguration](#BackupConfiguration)
- [BackupList](#BackupList)
@@ -77,6 +78,21 @@ Name | Description
`additionalPodAntiAffinity` | AdditionalPodAntiAffinity allows to specify pod anti-affinity terms to be added to the ones generated by the operator if EnablePodAntiAffinity is set to true (default) or to be used exclusively if set to false. | *corev1.PodAntiAffinity
`additionalPodAffinity ` | AdditionalPodAffinity allows to specify pod affinity terms to be passed to all the cluster's pods. | *corev1.PodAffinity
+
+
+## AzureCredentials
+
+AzureCredentials is the type for the credentials to be used to upload files to Azure Blob Storage. The connection string contains every needed information. If the connection string is not specified, we'll need the storage account name and also one (and only one) of:
+
+- storageKey - storageSasToken
+
+Name | Description | Type
+---------------- | --------------------------------------------------------------------------------- | ----------------------------------------
+`connectionString` | The connection string to be used | [*SecretKeySelector](#SecretKeySelector)
+`storageAccount ` | The storage account where to upload data | [*SecretKeySelector](#SecretKeySelector)
+`storageKey ` | The storage account key to be used in conjunction with the storage account name | [*SecretKeySelector](#SecretKeySelector)
+`storageSasToken ` | A shared-access-signature to be used in conjunction with the storage account name | [*SecretKeySelector](#SecretKeySelector)
+
## Backup
@@ -126,24 +142,25 @@ Name | Description | Type
BackupStatus defines the observed state of Backup
-Name | Description | Type
---------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------
-`s3Credentials ` | The credentials to use to upload data to S3 - *mandatory* | [S3Credentials](#S3Credentials)
-`endpointURL ` | Endpoint to be used to upload data to the cloud, overriding the automatic endpoint discovery | string
-`destinationPath` | The path where to store the backup (i.e. s3://bucket/path/to/folder) this path, with different destination folders, will be used for WALs and for data - *mandatory* | string
-`serverName ` | The server name on S3, the cluster name is used if this parameter is omitted | string
-`encryption ` | Encryption method required to S3 API | string
-`backupId ` | The ID of the Barman backup | string
-`phase ` | The last backup status | BackupPhase
-`startedAt ` | When the backup was started | [*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#time-v1-meta)
-`stoppedAt ` | When the backup was terminated | [*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#time-v1-meta)
-`beginWal ` | The starting WAL | string
-`endWal ` | The ending WAL | string
-`beginLSN ` | The starting xlog | string
-`endLSN ` | The ending xlog | string
-`error ` | The detected error | string
-`commandOutput ` | Unused. Retained for compatibility with old versions. | string
-`commandError ` | The backup command output in case of error | string
+Name | Description | Type
+---------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------
+`s3Credentials ` | The credentials to be used to upload data to S3 | [*S3Credentials](#S3Credentials)
+`azureCredentials` | The credentials to be used to upload data to Azure Blob Storage | [*AzureCredentials](#AzureCredentials)
+`endpointURL ` | Endpoint to be used to upload data to the cloud, overriding the automatic endpoint discovery | string
+`destinationPath ` | The path where to store the backup (i.e. s3://bucket/path/to/folder) this path, with different destination folders, will be used for WALs and for data - *mandatory* | string
+`serverName ` | The server name on S3, the cluster name is used if this parameter is omitted | string
+`encryption ` | Encryption method required to S3 API | string
+`backupId ` | The ID of the Barman backup | string
+`phase ` | The last backup status | BackupPhase
+`startedAt ` | When the backup was started | [*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#time-v1-meta)
+`stoppedAt ` | When the backup was terminated | [*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#time-v1-meta)
+`beginWal ` | The starting WAL | string
+`endWal ` | The ending WAL | string
+`beginLSN ` | The starting xlog | string
+`endLSN ` | The ending xlog | string
+`error ` | The detected error | string
+`commandOutput ` | Unused. Retained for compatibility with old versions. | string
+`commandError ` | The backup command output in case of error | string
@@ -151,15 +168,16 @@ Name | Description
BarmanObjectStoreConfiguration contains the backup configuration using Barman against an S3-compatible object storage
-Name | Description | Type
---------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------
-`s3Credentials ` | The credentials to use to upload data to S3 - *mandatory* | [S3Credentials](#S3Credentials)
-`endpointURL ` | Endpoint to be used to upload data to the cloud, overriding the automatic endpoint discovery | string
-`endpointCA ` | EndpointCA store the CA bundle of the barman endpoint. Useful when using self-signed certificates to avoid errors with certificate issuer and barman-cloud-wal-archive | [*SecretKeySelector](#SecretKeySelector)
-`destinationPath` | The path where to store the backup (i.e. s3://bucket/path/to/folder) this path, with different destination folders, will be used for WALs and for data - *mandatory* | string
-`serverName ` | The server name on S3, the cluster name is used if this parameter is omitted | string
-`wal ` | The configuration for the backup of the WAL stream. When not defined, WAL files will be stored uncompressed and may be unencrypted in the object store, according to the bucket default policy. | [*WalBackupConfiguration](#WalBackupConfiguration)
-`data ` | The configuration to be used to backup the data files When not defined, base backups files will be stored uncompressed and may be unencrypted in the object store, according to the bucket default policy. | [*DataBackupConfiguration](#DataBackupConfiguration)
+Name | Description | Type
+---------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------
+`s3Credentials ` | The credentials to use to upload data to S3 | [*S3Credentials](#S3Credentials)
+`azureCredentials` | The credentials to use to upload data in Azure Blob Storage | [*AzureCredentials](#AzureCredentials)
+`endpointURL ` | Endpoint to be used to upload data to the cloud, overriding the automatic endpoint discovery | string
+`endpointCA ` | EndpointCA store the CA bundle of the barman endpoint. Useful when using self-signed certificates to avoid errors with certificate issuer and barman-cloud-wal-archive | [*SecretKeySelector](#SecretKeySelector)
+`destinationPath ` | The path where to store the backup (i.e. s3://bucket/path/to/folder) this path, with different destination folders, will be used for WALs and for data - *mandatory* | string
+`serverName ` | The server name on S3, the cluster name is used if this parameter is omitted | string
+`wal ` | The configuration for the backup of the WAL stream. When not defined, WAL files will be stored uncompressed and may be unencrypted in the object store, according to the bucket default policy. | [*WalBackupConfiguration](#WalBackupConfiguration)
+`data ` | The configuration to be used to backup the data files When not defined, base backups files will be stored uncompressed and may be unencrypted in the object store, according to the bucket default policy. | [*DataBackupConfiguration](#DataBackupConfiguration)
@@ -204,10 +222,11 @@ Name | Description | Typ
BootstrapRecovery contains the configuration required to restore the backup with the specified name and, after having changed the password with the one chosen for the superuser, will use it to bootstrap a full cluster cloning all the instances from the restored primary. Refer to the Bootstrap page of the documentation for more information.
-Name | Description | Type
--------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------
-`backup ` | The backup we need to restore - *mandatory* | [LocalObjectReference](#LocalObjectReference)
-`recoveryTarget` | By default, the recovery process applies all the available WAL files in the archive (full recovery). However, you can also end the recovery as soon as a consistent state is reached or recover to a point-in-time (PITR) by specifying a `RecoveryTarget` object, as expected by PostgreSQL (i.e., timestamp, transaction Id, LSN, ...). More info: https://www.postgresql.org/docs/current/runtime-config-wal.html#RUNTIME-CONFIG-WAL-RECOVERY-TARGET | [*RecoveryTarget](#RecoveryTarget)
+Name | Description | Type
+-------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------
+`backup ` | The backup we need to restore | [*LocalObjectReference](#LocalObjectReference)
+`source ` | The external cluster whose backup we will restore. This is also used as the name of the folder under which the backup is stored, so it must be set to the name of the source cluster | string
+`recoveryTarget` | By default, the recovery process applies all the available WAL files in the archive (full recovery). However, you can also end the recovery as soon as a consistent state is reached or recover to a point-in-time (PITR) by specifying a `RecoveryTarget` object, as expected by PostgreSQL (i.e., timestamp, transaction Id, LSN, ...). More info: https://www.postgresql.org/docs/current/runtime-config-wal.html#RUNTIME-CONFIG-WAL-RECOVERY-TARGET | [*RecoveryTarget](#RecoveryTarget)
@@ -262,32 +281,36 @@ Name | Description
ClusterSpec defines the desired state of Cluster
-Name | Description | Type
---------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------
-`description ` | Description of this PostgreSQL cluster | string
-`imageName ` | Name of the container image, supporting both tags (`:`) and digests for deterministic and repeatable deployments (`:@sha256:`) | string
-`postgresUID ` | The UID of the `postgres` user inside the image, defaults to `26` | int64
-`postgresGID ` | The GID of the `postgres` user inside the image, defaults to `26` | int64
-`instances ` | Number of instances required in the cluster - *mandatory* | int32
-`minSyncReplicas ` | Minimum number of instances required in synchronous replication with the primary. Undefined or 0 allow writes to complete when no standby is available. | int32
-`maxSyncReplicas ` | The target value for the synchronous replication quorum, that can be decreased if the number of ready standbys is lower than this. Undefined or 0 disable synchronous replication. | int32
-`postgresql ` | Configuration of the PostgreSQL server | [PostgresConfiguration](#PostgresConfiguration)
-`bootstrap ` | Instructions to bootstrap this cluster | [*BootstrapConfiguration](#BootstrapConfiguration)
-`replica ` | Replica cluster configuration | [*ReplicaClusterConfiguration](#ReplicaClusterConfiguration)
-`superuserSecret ` | The secret containing the superuser password. If not defined a new secret will be created with a randomly generated password | [*LocalObjectReference](#LocalObjectReference)
-`certificates ` | The configuration for the CA and related certificates | [*CertificatesConfiguration](#CertificatesConfiguration)
-`imagePullSecrets ` | The list of pull secrets to be used to pull the images. If the license key contains a pull secret that secret will be automatically included. | [[]LocalObjectReference](#LocalObjectReference)
-`storage ` | Configuration of the storage of the instances | [StorageConfiguration](#StorageConfiguration)
-`startDelay ` | The time in seconds that is allowed for a PostgreSQL instance to successfully start up (default 30) | int32
-`stopDelay ` | The time in seconds that is allowed for a PostgreSQL instance node to gracefully shutdown (default 30) | int32
-`affinity ` | Affinity/Anti-affinity rules for Pods | [AffinityConfiguration](#AffinityConfiguration)
-`resources ` | Resources requirements of every generated Pod. Please refer to https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ for more information. | [corev1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#resourcerequirements-v1-core)
-`primaryUpdateStrategy` | Strategy to follow to upgrade the primary server during a rolling update procedure, after all replicas have been successfully updated: it can be automated (`unsupervised` - default) or manual (`supervised`) | PrimaryUpdateStrategy
-`backup ` | The configuration to be used for backups | [*BackupConfiguration](#BackupConfiguration)
-`nodeMaintenanceWindow` | Define a maintenance window for the Kubernetes nodes | [*NodeMaintenanceWindow](#NodeMaintenanceWindow)
-`licenseKey ` | The license key of the cluster. When empty, the cluster operates in trial mode and after the expiry date (default 30 days) the operator will cease any reconciliation attempt. For details, please refer to the license agreement that comes with the operator. | string
-`monitoring ` | The configuration of the monitoring infrastructure of this cluster | [*MonitoringConfiguration](#MonitoringConfiguration)
-`externalClusters ` | The list of external clusters which are used in the configuration | [[]ExternalCluster](#ExternalCluster)
+Name | Description | Type
+--------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------
+`description ` | Description of this PostgreSQL cluster | string
+`imageName ` | Name of the container image, supporting both tags (`:`) and digests for deterministic and repeatable deployments (`:@sha256:`) | string
+`imagePullPolicy ` | Image pull policy. One of `Always`, `Never` or `IfNotPresent`. If not defined, it defaults to `IfNotPresent`. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images | corev1.PullPolicy
+`postgresUID ` | The UID of the `postgres` user inside the image, defaults to `26` | int64
+`postgresGID ` | The GID of the `postgres` user inside the image, defaults to `26` | int64
+`instances ` | Number of instances required in the cluster - *mandatory* | int32
+`minSyncReplicas ` | Minimum number of instances required in synchronous replication with the primary. Undefined or 0 allow writes to complete when no standby is available. | int32
+`maxSyncReplicas ` | The target value for the synchronous replication quorum, that can be decreased if the number of ready standbys is lower than this. Undefined or 0 disable synchronous replication. | int32
+`postgresql ` | Configuration of the PostgreSQL server | [PostgresConfiguration](#PostgresConfiguration)
+`bootstrap ` | Instructions to bootstrap this cluster | [*BootstrapConfiguration](#BootstrapConfiguration)
+`replica ` | Replica cluster configuration | [*ReplicaClusterConfiguration](#ReplicaClusterConfiguration)
+`superuserSecret ` | The secret containing the superuser password. If not defined a new secret will be created with a randomly generated password | [*LocalObjectReference](#LocalObjectReference)
+`enableSuperuserAccess` | When this option is enabled, the operator will use the `SuperuserSecret` to update the `postgres` user password (if the secret is not present, the operator will automatically create one). When this option is disabled, the operator will ignore the `SuperuserSecret` content, delete it when automatically created, and then blank the password of the `postgres` user by setting it to `NULL`. Enabled by default. | *bool
+`certificates ` | The configuration for the CA and related certificates | [*CertificatesConfiguration](#CertificatesConfiguration)
+`imagePullSecrets ` | The list of pull secrets to be used to pull the images. If the license key contains a pull secret that secret will be automatically included. | [[]LocalObjectReference](#LocalObjectReference)
+`storage ` | Configuration of the storage of the instances | [StorageConfiguration](#StorageConfiguration)
+`startDelay ` | The time in seconds that is allowed for a PostgreSQL instance to successfully start up (default 30) | int32
+`stopDelay ` | The time in seconds that is allowed for a PostgreSQL instance node to gracefully shutdown (default 30) | int32
+`affinity ` | Affinity/Anti-affinity rules for Pods | [AffinityConfiguration](#AffinityConfiguration)
+`resources ` | Resources requirements of every generated Pod. Please refer to https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ for more information. | [corev1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#resourcerequirements-v1-core)
+`primaryUpdateStrategy` | Strategy to follow to upgrade the primary server during a rolling update procedure, after all replicas have been successfully updated: it can be automated (`unsupervised` - default) or manual (`supervised`) | PrimaryUpdateStrategy
+`backup ` | The configuration to be used for backups | [*BackupConfiguration](#BackupConfiguration)
+`nodeMaintenanceWindow` | Define a maintenance window for the Kubernetes nodes | [*NodeMaintenanceWindow](#NodeMaintenanceWindow)
+`licenseKey ` | The license key of the cluster. When empty, the cluster operates in trial mode and after the expiry date (default 30 days) the operator will cease any reconciliation attempt. For details, please refer to the license agreement that comes with the operator. | string
+`licenseKeySecret ` | The reference to the license key. When this is set it take precedence over LicenseKey. | [*corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#secretkeyselector-v1-core)
+`monitoring ` | The configuration of the monitoring infrastructure of this cluster | [*MonitoringConfiguration](#MonitoringConfiguration)
+`externalClusters ` | The list of external clusters which are used in the configuration | [[]ExternalCluster](#ExternalCluster)
+`logLevel ` | The instances' log level, one of the following values: error, info (default), debug, trace | string
@@ -316,6 +339,7 @@ Name | Description
`secretsResourceVersion ` | The list of resource versions of the secrets managed by the operator. Every change here is done in the interest of the instance manager, which will refresh the secret data | [SecretsResourceVersion](#SecretsResourceVersion)
`configMapResourceVersion` | The list of resource versions of the configmaps, managed by the operator. Every change here is done in the interest of the instance manager, which will refresh the configmap data | [ConfigMapResourceVersion](#ConfigMapResourceVersion)
`certificates ` | The configuration for the CA and related certificates, initialized with defaults. | [CertificatesStatus](#CertificatesStatus)
+`firstRecoverabilityPoint` | The first recoverability point, stored as a date in RFC3339 format | string
@@ -364,7 +388,7 @@ Name | Description | Type
## ExternalCluster
-ExternalCluster represents the connection parameters of an external server which is used in the cluster configuration
+ExternalCluster represents the connection parameters to an external cluster which is used in the other sections of the configuration
Name | Description | Type
-------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------
@@ -374,6 +398,7 @@ Name | Description
`sslKey ` | The reference to an SSL private key to be used to connect to this instance | [*corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#secretkeyselector-v1-core)
`sslRootCert ` | The reference to an SSL CA public key to be used to connect to this instance | [*corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#secretkeyselector-v1-core)
`password ` | The reference to the password to be used to connect to the server | [*corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#secretkeyselector-v1-core)
+`barmanObjectStore ` | The configuration for the barman-cloud tool suite | [*BarmanObjectStoreConfiguration](#BarmanObjectStoreConfiguration)
@@ -448,7 +473,7 @@ ReplicaClusterConfiguration encapsulates the configuration of a replica cluster
Name | Description | Type
------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------
`enabled` | If replica mode is enabled, this cluster will be a replica of an existing cluster. A cluster of such type can be created only using bootstrap via pg_basebackup - *mandatory* | bool
-`source ` | The name of the external server which is the replication origin - *mandatory* | string
+`source ` | The name of the external cluster which is the replication origin - *mandatory* | string
@@ -501,11 +526,12 @@ Name | Description
ScheduledBackupSpec defines the desired state of ScheduledBackup
-Name | Description | Type
--------- | -------------------------------------------------------------------- | ---------------------------------------------
-`suspend ` | If this backup is suspended of not | *bool
-`schedule` | The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - *mandatory* | string
-`cluster ` | The cluster to backup | [LocalObjectReference](#LocalObjectReference)
+Name | Description | Type
+--------- | --------------------------------------------------------------------- | ---------------------------------------------
+`suspend ` | If this backup is suspended or not | *bool
+`immediate` | If the first backup has to be immediately start after creation or not | *bool
+`schedule ` | The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - *mandatory* | string
+`cluster ` | The cluster to backup | [LocalObjectReference](#LocalObjectReference)
@@ -569,5 +595,4 @@ WalBackupConfiguration is the configuration of the backup of the WAL stream
Name | Description | Type
----------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------
`compression` | Compress a WAL file before sending it to the object store. Available options are empty string (no compression, default), `gzip` or `bzip2`. | CompressionType
-`encryption ` | Whenever to force the encryption of files (if the bucket is not already configured for that). Allowed options are empty string (use the bucket policy, default), `AES256` and `aws:kms` | EncryptionType
-
+`encryption ` | Whenever to force the encryption of files (if the bucket is not already configured for that). Allowed options are empty string (use the bucket policy, default), `AES256` and `aws:kms` | EncryptionType
\ No newline at end of file
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/architecture.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/architecture.mdx
index 1d69d93e385..7834177b43a 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/architecture.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/architecture.mdx
@@ -4,11 +4,19 @@ originalFilePath: 'src/architecture.md'
product: 'Cloud Native Operator'
---
-For High Availability goals, the PostgreSQL database management system provides administrators with built-in **physical replication** capabilities based on **Write Ahead Log (WAL) shipping**.
+For High Availability and Scalability goals, the PostgreSQL database management
+system provides administrators with built-in **physical replication**
+capabilities based on **Write Ahead Log (WAL) shipping**.
-PostgreSQL supports both asynchronous and synchronous streaming replication, as well as asynchronous file-based log shipping (normally used as a fallback option, for example, to store WAL files in an object store). Replicas are usually called *standby servers* and can also be used for read-only workloads, thanks to the *Hot Standby* feature.
+PostgreSQL supports both asynchronous and synchronous streaming replication
+over the network, as well as asynchronous file-based log shipping (normally
+used as a fallback option, for example, to store WAL files in an object store).
+Replicas are usually called *standby servers* and can also be used for
+read-only workloads, thanks to the *Hot Standby* feature.
-Cloud Native PostgreSQL currently supports clusters based on asynchronous and synchronous streaming replication to manage multiple hot standby replicas, with the following specifications:
+Cloud Native PostgreSQL supports clusters based on asynchronous and synchronous
+streaming replication to manage multiple hot standby replicas within the same
+Kubernetes cluster, with the following specifications:
* One primary, with optional multiple hot standby replicas for High Availability
* Available services for applications:
@@ -16,32 +24,38 @@ Cloud Native PostgreSQL currently supports clusters based on asynchronous and sy
* `-ro`: applications connect to the only hot standby replicas for read-only-workloads
* `-r`: applications connect to any of the instances for read-only workloads
* Shared-nothing architecture recommended for better resilience of the PostgreSQL cluster:
- * PostgreSQL instances should reside on different Kubernetes worker nodes and share only the network
- * PostgreSQL instances can reside in different availability zones in the same region
+ * PostgreSQL instances should reside on different Kubernetes worker nodes
+ and share only the network
+ * PostgreSQL instances can reside in different
+ availability zones in the same region
* All nodes of a PostgreSQL cluster should reside in the same region
!!! Seealso "Replication"
Please refer to the ["Replication" section](replication.md) for more
- information about how Cloud Native PostgreSQL relies on PostgreSQL replication.
+ information about how Cloud Native PostgreSQL relies on PostgreSQL replication,
+ including synchronous settings.
## Read-write workloads
-Applications can decide to connect to the PostgreSQL instance elected as *current primary*
-by the Kubernetes operator, as depicted in the following diagram:
+Applications can decide to connect to the PostgreSQL instance elected as
+*current primary* by the Kubernetes operator, as depicted in the following
+diagram:
![Applications writing to the single primary](./images/architecture-rw.png)
Applications can use the `-rw` suffix service.
In case of temporary or permanent unavailability of the primary, Kubernetes
-will move the `-rw` to another instance of the cluster for high availability
+will move the `-rw` service to another instance of the cluster for high availability
purposes.
## Read-only workloads
!!! Important
- Applications must be aware of the limitations that [Hot Standby](https://www.postgresql.org/docs/current/hot-standby.html)
- presents and familiar with the way PostgreSQL operates when dealing with these workloads.
+ Applications must be aware of the limitations that
+ [Hot Standby](https://www.postgresql.org/docs/current/hot-standby.html)
+ presents and familiar with the way PostgreSQL operates when dealing with
+ these workloads.
Applications can access hot standby replicas through the `-ro` service made available
by the operator. This service enables the application to offload read-only queries from the
@@ -51,7 +65,8 @@ The following diagram shows the architecture:
![Applications reading from hot standby replicas in round robin](./images/architecture-read-only.png)
-Applications can also access any PostgreSQL instance at any time through the `-r` service at connection time.
+Applications can also access any PostgreSQL instance through the
+`-r` service.
## Application deployments
@@ -67,8 +82,8 @@ implement a form of Virtual IP as described in the
["Service" page of the Kubernetes Documentation](https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies).
!!! Hint
- It is highly recommended to use those services in your applications,
- and avoid connecting directly to a specific PostgreSQL instance, as the latter
+ It is highly recommended using those services in your applications,
+ and avoiding connecting directly to a specific PostgreSQL instance, as the latter
can change during the cluster lifetime.
You can use these services in your applications through:
@@ -76,7 +91,7 @@ You can use these services in your applications through:
* DNS resolution
* environment variables
-As far as the credentials to connect to PostgreSQL are concerned, you can
+For the credentials to connect to PostgreSQL, you can
use the secrets generated by the operator.
!!! Warning
@@ -84,18 +99,18 @@ use the secrets generated by the operator.
service is used internally to manage PostgreSQL instance discovery.
It's not supposed to be used directly by applications.
-## DNS resolution
+### DNS resolution
-You can use the Kubernetes DNS service, which is required by this operator,
-to point to a given server.
-You can do that by just using the name of the service if the application is
+You can use the Kubernetes DNS service to point to a given server.
+The Kubernetes DNS service is required by the operator.
+You can do that by using the name of the service if the application is
deployed in the same namespace as the PostgreSQL cluster.
In case the PostgreSQL cluster resides in a different namespace, you can use the
full qualifier: `service-name.namespace-name`.
DNS is the preferred and recommended discovery method.
-## Environment variables
+### Environment variables
If you deploy your application in the same namespace that contains the
PostgreSQL cluster, you can also use environment variables to connect to the database.
@@ -112,10 +127,10 @@ you can use the following environment variables in your applications:
* `PG_DATABASE_RW_SERVICE_HOST`: the IP address of the
service pointing to the *primary* instance of the cluster
-## Secrets
+### Secrets
-The PostgreSQL operator will generate two secrets for every PostgreSQL cluster
-it deploys:
+The PostgreSQL operator will generate two `basic-auth` type secrets for every
+PostgreSQL cluster it deploys:
* `[cluster name]-superuser`
* `[cluster name]-app`
@@ -129,3 +144,80 @@ connecting to the PostgreSQL cluster.
The `-superuser` ones are supposed to be used only for administrative purposes.
+## Multi-cluster deployments
+
+!!! Info
+ Cloud Native PostgreSQL supports deploying PostgreSQL across multiple
+ Kubernetes clusters through a feature called **Replica Cluster**,
+ which is described in this section.
+
+In a distributed PostgreSQL cluster there can only be a single PostgreSQL
+instance acting as a primary at all times. This means that applications can
+only write inside a single Kubernetes cluster, at any time.
+
+!!! Tip
+ If you are interested in a PostgreSQL architecture where all instances accept writes,
+ please take a look at [BDR (Bi-Directional Replication) by EDB](https://www.enterprisedb.com/docs/bdr/latest/).
+ For Kubernetes, BDR will have its own Operator, expected late in 2021.
+
+However, for business continuity objectives it is fundamental to:
+
+- reduce global **recovery point objectives** (RPO) by storing PostgreSQL backup data
+ in multiple locations, regions and possibly using different providers
+ (**Disaster Recovery**)
+- reduce global **recovery time objectives** (RTO) by taking advantage of PostgreSQL
+ replication beyond the primary Kubernetes cluster (**High Availability**)
+
+In order to address the above concerns, Cloud Native PostgreSQL introduces the
+concept of a *PostgreSQL Replica Cluster*. Replica clusters are the Cloud
+Native PostgreSQL way to enable multi-cluster deployments in private, public,
+hybrid, and multi-cloud contexts.
+
+A replica cluster is a separate `Cluster` resource:
+
+1. having either `pg_basebackup` or full `recovery` as the `bootstrap`
+ option from a defined external source cluster
+2. having the `replica.enabled` option set to `true`
+3. replicating from a defined external cluster identified by `replica.source`,
+ normally located outside the Kubernetes cluster
+4. replaying WAL information received from the recovery object store
+ (using PostgreSQL's `restore_command` parameter), or via streaming
+ replication (using PostgreSQL's `primary_conninfo` parameter), or any of
+ the two (in case both the `barmanObjectStore` and `connectionParameters`
+ are defined in the external cluster)
+5. accepting only read connections, as supported by PostgreSQL's Hot Standby
+
+!!! Seealso
+ Please refer to the ["Bootstrap" section](bootstrap.md) for more information
+ about cloning a PostgreSQL cluster from another one (defined in the
+ `externalClusters` section).
+
+The diagram below depicts a PostgreSQL cluster spanning over two different
+Kubernetes clusters, where the primary cluster is in the first Kubernetes
+cluster and the replica cluster is in the second. The second Kubernetes cluster
+acts as the company's disaster recovery cluster, ready to be activated in case
+of disaster and unavailability of the first one.
+
+![An example of multi-cluster deployment with a primary and a replica cluster](./images/multi-cluster.png)
+
+A replica cluster can have the same architecture of the primary cluster. In
+place of the primary instance, a replica cluster has a **designated primary**
+instance, which is a standby server with an arbitrary number of cascading
+standby servers in streaming replication (symmetric architecture).
+
+The designated primary can be promoted at any time, making the replica cluster
+a primary cluster capable of accepting write connections.
+
+!!! Warning
+ Cloud Native PostgreSQL does not perform any cross-cluster switchover
+ or failover at the moment. Such operation must be performed manually
+ or delegated to a multi-cluster/federated cluster aware authority.
+ Each PostgreSQL cluster is independent from any other.
+
+The designated primary in the above example is fed via WAL streaming
+(`primary_conninfo`), with fallback option for file-based WAL shipping through
+the `restore_command` and `barman-cloud-wal-restore`.
+
+Cloud Native PostgreSQL allows you to define multiple replica clusters.
+You can also define replica clusters with a lower number of replicas, and then
+increase this number when the cluster is promoted to primary.
\ No newline at end of file
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/backup_recovery.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/backup_recovery.mdx
index e6152653e81..3e1822a77bc 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/backup_recovery.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/backup_recovery.mdx
@@ -23,11 +23,23 @@ as it is composed of a community PostgreSQL image and the latest
Barman to Barman Cloud in the future. For the time being, it is your responsibility
to configure retention policies directly on the object store.
-## Cloud credentials
+## Cloud provider support
-You can archive the backup files in any service whose API is compatible
-with AWS S3. You will need the following information about your
-environment:
+You can archive the backup files in any service that is supported
+by the Barman Cloud infrastructure. That is:
+
+- [AWS S3](https://aws.amazon.com/s3/)
+- [Microsoft Azure Blob Storage](https://azure.microsoft.com/en-us/services/storage/blobs/).
+
+You can also use any compatible implementation of the
+supported services.
+
+The required setup depends on the chosen storage provider and is
+discussed in the following sections.
+
+### S3
+
+You will need the following information about your environment:
- `ACCESS_KEY_ID`: the ID of the access key that will be used
to upload files in S3
@@ -50,10 +62,6 @@ kubectl create secret generic aws-creds \
The credentials will be stored inside Kubernetes and will be encrypted
if encryption at rest is configured in your installation.
-## Configuring the Cluster
-
-### S3
-
Given that secret, you can configure your cluster like in
the following example:
@@ -113,8 +121,8 @@ spec:
### MinIO Gateway
Optionally, you can use MinIO Gateway as a common interface which
-relays backup objects to other cloud storage solutions, like S3, GCS or
-Azure. For more information, please refer to [MinIO official documentation](https://docs.min.io/).
+relays backup objects to other cloud storage solutions, like S3 or GCS.
+For more information, please refer to [MinIO official documentation](https://docs.min.io/).
Specifically, the Cloud Native PostgreSQL cluster can directly point to a local
MinIO Gateway as an endpoint, using previously created credentials and service.
@@ -231,6 +239,79 @@ spec:
Verify on `s3://BUCKET_NAME/` the presence of archived WAL files before
proceeding with a backup.
+### Azure Blob Storage
+
+In order to access your storage account, you will need one of the following combinations
+of credentials:
+
+- [**Connection String**](https://docs.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string#configure-a-connection-string-for-an-azure-storage-account)
+- **Storage account name** and [**Storage account access key**](https://docs.microsoft.com/en-us/azure/storage/common/storage-account-keys-manage)
+- **Storage account name** and [**Storage account SAS Token**](https://docs.microsoft.com/en-us/azure/storage/blobs/sas-service-create).
+
+The credentials need to be stored inside a Kubernetes Secret, adding data entries only when
+needed. The following command performs that:
+
+```
+kubectl create secret generic azure-creds \
+ --from-literal=AZURE_STORAGE_ACCOUNT= \
+ --from-literal=AZURE_STORAGE_KEY= \
+ --from-literal=AZURE_STORAGE_SAS_TOKEN= \
+ --from-literal=AZURE_STORAGE_CONNECTION_STRING=
+```
+
+The credentials will be encrypted at rest, if this feature is enabled in the used
+Kubernetes cluster.
+
+Given the previous secret, the provided credentials can be injected inside the cluster
+configuration:
+
+```yaml
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Cluster
+[...]
+spec:
+ backup:
+ barmanObjectStore:
+ destinationPath: ""
+ azureCredentials:
+ connectionString:
+ name: azurite
+ key: AZURE_CONNECTION_STRING
+ storageAccount:
+ name: azurite
+ key: AZURE_STORAGE_ACCOUNT
+ storageKey:
+ name: azurite
+ key: AZURE_STORAGE_KEY
+ storageSasToken:
+ name: azurite
+ key: AZURE_STORAGE_SAS_TOKEN
+```
+
+When using the Azure Blob Storage, the `destinationPath` fulfills the following
+structure:
+
+```
+://..core.windows.net/
+```
+
+where `` is `/`. The **account name**,
+which is also called **storage account name**, is included in the used host name.
+
+### Other Azure Blob Storage compatible providers
+
+If you are using a different implementation of the Azure Blob Storage APIs,
+the `destinationPath` will have the following structure:
+
+```
+://://
+```
+
+In that case, `` is the first component of the path.
+
+This is required if you are testing the Azure support via the Azure Storage
+Emulator or [Azurite](https://github.com/Azure/Azurite).
+
## On-demand backups
To request a new backup, you need to create a new Backup resource
@@ -334,6 +415,12 @@ spec:
The proposed specification will schedule a backup every day at midnight.
+ScheduledBackups can be suspended if needed by setting `.spec.suspend: true`,
+this will stop any new backup to be scheduled as long as the option is set to false.
+
+In case you want to issue a backup as soon as the ScheduledBackup resource is created
+you can set `.spec.immediate: true`.
+
## WAL archiving
WAL archiving is enabled as soon as you choose a destination path
@@ -436,4 +523,4 @@ manager running in the Pods.
You can optionally specify a `recoveryTarget` to perform a point in time
recovery. If left unspecified, the recovery will continue up to the latest
available WAL on the default target timeline (`current` for PostgreSQL up to
-11, `latest` for version 12 and above).
+11, `latest` for version 12 and above).
\ No newline at end of file
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/before_you_start.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/before_you_start.mdx
index 6b9d5f3a4f9..c7717f918d3 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/before_you_start.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/before_you_start.mdx
@@ -35,6 +35,9 @@ Cloud Native PostgreSQL requires Kubernetes 1.16 or higher.
| Replica | A PostgreSQL instance replicating from the only primary instance in a cluster and is kept updated by reading a stream of Write-Ahead Log (WAL) records. A replica is also known as *standby* or *secondary* server. PostgreSQL relies on physical streaming replication (async/sync) and file-based log shipping (async). |
| Hot Standby | PostgreSQL feature that allows a *replica* to accept read-only workloads. |
| Cluster | To be intended as High Availability (HA) Cluster: a set of PostgreSQL instances made up by a single primary and an optional arbitrary number of replicas. |
+| Replica Cluster | A Cloud Native PostgreSQL `Cluster` that is in continuous recovery mode from a selected PostgreSQL cluster, normally residing outside the Kubernetes cluster. It is a feature that enables multi-cluster deployments in private, public, hybrid, and multi-cloud contexts. |
+| Designated Primary | A PostgreSQL standby instance in a replica cluster that is in continuous recovery from another PostgreSQL cluster and that is designated to become primary in case the replica cluster becomes primary. |
+| Superuser | In PostgreSQL a *superuser* is any role with both `LOGIN` and `SUPERUSER` privileges. For security reasons, Cloud Native PostgreSQL performs administrative tasks by connecting to the `postgres` database as the `postgres` user via `peer` authentication over the local Unix Domain Socket. |
## Cloud terminology
@@ -46,4 +49,4 @@ Cloud Native PostgreSQL requires Kubernetes 1.16 or higher.
## What to do next
Now that you have familiarized with the terminology, you can decide to
-[test Cloud Native PostgreSQL on your laptop using a local cluster](quickstart.md) before deploying the operator in your selected cloud environment.
+[test Cloud Native PostgreSQL on your laptop using a local cluster](quickstart.md) before deploying the operator in your selected cloud environment.
\ No newline at end of file
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/bootstrap.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/bootstrap.mdx
index 69a8ca66262..0914469f5ef 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/bootstrap.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/bootstrap.mdx
@@ -11,43 +11,90 @@ product: 'Cloud Native Operator'
This section describes the options you have to create a new
PostgreSQL cluster and the design rationale behind them.
+There are primarily two ways to bootstrap a new cluster:
-When a PostgreSQL cluster is defined, you can configure the
-*bootstrap* method using the `bootstrap` section of the cluster
-specification.
+- from scratch (`initdb`)
+- from an existing PostgreSQL cluster, either directly (`pg_basebackup`)
+ or indirectly (`recovery`)
-In the following example:
+!!! Important
+ Bootstrapping from an existing cluster opens up the possibility
+ to create a **replica cluster**, that is an independent PostgreSQL
+ cluster which is in continuous recovery, synchronized with the source
+ and that accepts read-only connections.
-```yaml
-apiVersion: postgresql.k8s.enterprisedb.io/v1
-kind: Cluster
-metadata:
- name: cluster-example-initdb
-spec:
- instances: 3
+!!! Warning
+ Cloud Native PostgreSQL requires both the `postgres` user and database to
+ always exists. Using the local Unix Domain Socket, it needs to connect
+ as `postgres` user to the `postgres` database via `peer` authentication in
+ order to perform administrative tasks on the cluster.
+ **DO NOT DELETE** the `postgres` user or the `postgres` database!!!
- bootstrap:
- initdb:
- database: appdb
- owner: appuser
+## The `bootstrap` section
- storage:
- size: 1Gi
-```
+The *bootstrap* method can be defined in the `bootstrap` section of the cluster
+specification.
+Cloud Native PostgreSQL currently supports the following bootstrap methods:
+
+- `initdb`: initialize an empty PostgreSQL cluster (default)
+- `recovery`: create a PostgreSQL cluster by restoring from an existing cluster
+ via a backup object store, and replaying all the available WAL files or up to
+ a given *point in time*
+- `pg_basebackup`: create a PostgreSQL cluster by cloning an existing one of
+ the same major version using `pg_basebackup` via streaming replication protocol -
+ useful if you want to migrate databases to Cloud Native PostgreSQL, even
+ from outside Kubernetes.
+
+Differently from the `initdb` method, both `recovery` and `pg_basebackup`
+create a new cluster based on another one (either offline or online) and can be
+used to spin up replica clusters. They both rely on the definition of external
+clusters.
+
+!!! Seealso "API reference"
+ Please refer to the ["API reference for the `bootstrap` section](api_reference.md#BootstrapConfiguration)
+ for more information.
+
+## The `externalClusters` section
+
+The `externalClusters` section allows you to define one or more PostgreSQL
+clusters that are somehow related to the current one. While in the future
+this section will enable more complex scenarios, it is currently intended
+to define a cross-region PostgreSQL cluster based on physical replication,
+and spanning over different Kubernetes clusters or even traditional VM/bare-metal
+environments.
+
+As far as bootstrapping is concerned, `externalClusters` can be used
+to define the source PostgreSQL cluster for either the `pg_basebackup`
+method or the `recovery` one. An external cluster needs to have:
+
+- a name that identifies the origin cluster, to be used as a reference via the
+ `source` option
+- at least one of the following:
+
+ - information about streaming connection
+ - information about the **recovery object store**, which is a Barman Cloud
+ compatible object store that contains the backup files of the source
+ cluster - that is, base backups and WAL archives.
-The `initdb` bootstrap method is used.
+!!! Note
+ A recovery object store is normally an AWS S3 or an Azure Blob Storage
+ compatible source that is managed by Barman Cloud.
+
+When only the streaming connection is defined, the source can be used for the
+`pg_basebackup` method. When only the recovery object store is defined, the
+source can be used for the `recovery` method. When both are defined, any of the
+two bootstrap methods can be chosen.
-We currently support the following bootstrap methods:
+Furthermore, in case of `pg_basebackup` or full `recovery`point in time), the
+cluster is eligible for replica cluster mode. This means that the cluster is
+continuously fed from the source, either via streaming, via WAL shipping
+through the PostgreSQL's `restore_command`, or any of the two.
-- `initdb`: initialize an empty PostgreSQL cluster
-- `recovery`: create a PostgreSQL cluster by restoring from an existing backup
- and replaying all the available WAL files or up to a given point in time
-- `pg_basebackup`: create a PostgreSQL cluster by cloning an existing one of the
- same major version using `pg_basebackup` via streaming replication protocol -
- useful if you want to migrate databases to Cloud Native PostgreSQL, even
- from outside Kubernetes.
+!!! Seealso "API reference"
+ Please refer to the ["API reference for the `externalClusters` section](api_reference.md#ExternalCluster)
+ for more information.
-## initdb
+## Bootstrap an empty cluster (`initdb`)
The `initdb` bootstrap method is used to create a new PostgreSQL cluster from
scratch. It is the default one unless specified differently.
@@ -67,10 +114,10 @@ spec:
bootstrap:
initdb:
- database: appdb
- owner: appuser
+ database: app
+ owner: app
secret:
- name: appuser-secret
+ name: app-secret
storage:
size: 1Gi
@@ -79,10 +126,11 @@ spec:
The above example of bootstrap will:
1. create a new `PGDATA` folder using PostgreSQL's native `initdb` command
-2. set a *superuser* password from the secret named `superuser-secret`
-3. create an *unprivileged* user named `appuser`
-4. set the password of the latter using the one in the `appuser-secret` secret
-5. create a database called `appdb` owned by the `appuser` user.
+2. set a password for the `postgres` *superuser* from the secret named `superuser-secret`
+3. create an *unprivileged* user named `app`
+4. set the password of the latter (`app`) using the one in the `app-secret`
+ secret (make sure that `username` matches the same name of the `owner`)
+5. create a database called `app` owned by the `app` user.
Thanks to the *convention over configuration paradigm*, you can let the
operator choose a default database name (`app`) and a default application
@@ -95,19 +143,19 @@ and use them in the PostgreSQL cluster - as described in the above example.
The supplied secrets must comply with the specifications of the
[`kubernetes.io/basic-auth` type](https://kubernetes.io/docs/concepts/configuration/secret/#basic-authentication-secret).
-The operator will only use the `password` field of the secret,
-ignoring the `username` one. If you plan to reuse the secret for application
-connections, you can set the `username` field to the same value as the `owner`.
+As a result, the `username` in the secret must match the one of the `owner`
+(for the application secret) and `postgres` for the superuser one.
The following is an example of a `basic-auth` secret:
```yaml
apiVersion: v1
data:
+ username: YXBw
password: cGFzc3dvcmQ=
kind: Secret
metadata:
- name: cluster-example-app-user
+ name: app-secret
type: kubernetes.io/basic-auth
```
@@ -119,8 +167,8 @@ the application database.
Future implementations of the operator might allow you to create
additional users in a declarative configuration fashion.
-The superuser and the `postgres` database are supposed to be used only
-by the operator to configure the cluster.
+The `postgres` superuser and the `postgres` database are supposed to be used
+only by the operator to configure the cluster.
In case you don't supply any database name, the operator will proceed
by convention and create the `app` database, and adds it to the cluster
@@ -150,8 +198,8 @@ spec:
bootstrap:
initdb:
- database: appdb
- owner: appuser
+ database: app
+ owner: app
options:
- "-k"
- "--locale=en_US"
@@ -161,7 +209,8 @@ spec:
The user can also specify a custom list of queries that will be executed
once, just after the database is created and configured. These queries will
-be executed as the *superuser*, connected to the `postgres` database:
+be executed as the *superuser* (`postgres`), connected to the `postgres`
+database:
```yaml
apiVersion: postgresql.k8s.enterprisedb.io/v1
@@ -173,8 +222,8 @@ spec:
bootstrap:
initdb:
- database: appdb
- owner: appuser
+ database: app
+ owner: app
options:
- "-k"
- "--locale=en_US"
@@ -212,8 +261,8 @@ spec:
bootstrap:
initdb:
- database: appdb
- owner: appuser
+ database: app
+ owner: app
redwood: false
storage:
size: 1Gi
@@ -222,14 +271,96 @@ spec:
!!! Important
EDB Postgres Advanced requires a valid license key (trial or production) to start.
-## recovery
+## Bootstrap from another cluster
+
+Cloud Native PostgreSQL enables the bootstrap of a cluster starting from
+another one of the same major version.
+This operation can happen by connecting directly to the source cluster via
+streaming replication (`pg_basebackup`), or indirectly via a *recovery object
+store* (`recovery`).
+
+The source cluster must be defined in the `externalClusters` section, identified
+by `name` (our recommendation is to use the same `name` of the origin cluster).
+
+!!! Important
+ By default the `recovery` method strictly uses the `name` of the
+ cluster in the `externalClusters` section to locate the main folder
+ of the backup data within the object store, which is normally reserved
+ for the name of the server. You can specify a different one with the
+ `backupObjectStore.serverName` property (by default assigned to the
+ value of `name` in the external cluster definition).
+
+
+### Bootstrap from a backup (`recovery`)
The `recovery` bootstrap mode lets you create a new cluster from
-an existing backup. You can find more information about the recovery
-feature in the ["Backup and recovery" page](backup_recovery.md).
+an existing backup, namely a *recovery object store*.
-The following example contains the full structure of the `recovery`
-section:
+There are two ways to achieve this result in Cloud Native PostgreSQL:
+
+- using a recovery object store, that is a backup of another cluster
+ created by Barman Cloud and defined via the `barmanObjectStore` option
+ in the `externalClusters` section
+- using an existing `Backup` object in the same namespace (this was the
+ only option available before version 1.8.0).
+
+Both recovery methods enable either full recovery (up to the last
+available WAL) or up to a [point in time](#point-in-time-recovery).
+When performing a full recovery, the cluster can also be started
+in replica mode.
+
+!!! Note
+ You can find more information about backup and recovery of a running cluster
+ in the ["Backup and recovery" page](backup_recovery.md).
+
+#### Recovery from an object store
+
+You can recover from a backup created by Barman Cloud and stored on a supported
+object storage. Once you have defined the external cluster, including all the
+required configuration in the `barmanObjectStore` section, you need to
+reference it in the `.spec.recovery.source` option. The following example
+defines a recovery object store in a blob container in Azure:
+
+```yaml
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Cluster
+metadata:
+ name: cluster-restore
+spec:
+ [...]
+
+ superuserSecret:
+ name: superuser-secret
+
+ bootstrap:
+ recovery:
+ source: clusterBackup
+
+ externalClusters:
+ - name: clusterBackup
+ barmanObjectStore:
+ destinationPath: https://STORAGEACCOUNTNAME.blob.core.windows.net/CONTAINERNAME/
+ azureCredentials:
+ storageAccount:
+ name: recovery-object-store-secret
+ key: storage_account_name
+ storageKey:
+ name: recovery-object-store-secret
+ key: storage_account_key
+```
+
+!!! Important
+ By default the `recovery` method strictly uses the `name` of the
+ cluster in the `externalClusters` section to locate the main folder
+ of the backup data within the object store, which is normally reserved
+ for the name of the server. You can specify a different one with the
+ `backupObjectStore.serverName` property (by default assigned to the
+ value of `name` in the external clusters definition).
+
+#### Recovery from a `Backup` object
+
+In case a Backup resource is already available in the namespace in which the cluster should be created,
+you can specify its name through `.spec.bootstrap.recovery.backup.name`, as in the following example:
```yaml
apiVersion: postgresql.k8s.enterprisedb.io/v1
@@ -254,32 +385,37 @@ spec:
This bootstrap method allows you to specify just a reference to the
backup that needs to be restored.
-The application database name and the application database user are preserved
+#### Additional considerations
+
+Whether you recover from a recovery object store or an existing `Backup`
+resource, the following considerations apply:
+
+- The application database name and the application database user are preserved
from the backup that is being restored. The operator does not currently attempt
-to backup the underlying secrets, as this is part of the usual maintenance
+to back up the underlying secrets, as this is part of the usual maintenance
activity of the Kubernetes cluster itself.
-
-In case you don't supply any `superuserSecret`, a new one is automatically
+- In case you don't supply any `superuserSecret`, a new one is automatically
generated with a secure and random password. The secret is then used to
reset the password for the `postgres` user of the cluster.
-
-By default, the recovery will continue up to the latest
+- By default, the recovery will continue up to the latest
available WAL on the default target timeline (`current` for PostgreSQL up to
11, `latest` for version 12 and above).
You can optionally specify a `recoveryTarget` to perform a point in time
-recovery (see the ["Point in time recovery" chapter](#point-in-time-recovery)).
+recovery (see the ["Point in time recovery" section](#point-in-time-recovery)).
+
+#### Point in time recovery (PITR)
-### Point in time recovery
+Instead of replaying all the WALs up to the latest one, we can ask PostgreSQL
+to stop replaying WALs at any given point in time, after having extracted a
+base backup. PostgreSQL uses this technique to achieve *point-in-time* recovery
+(PITR).
-Instead of replaying all the WALs up to the latest one,
-we can ask PostgreSQL to stop replaying WALs at any given point in time.
-PostgreSQL uses this technique to implement *point-in-time* recovery.
-This allows you to restore the database to its state at any time after
-the base backup was taken.
+!!! Note
+ PITR is available from recovery object stores as well as `Backup` objects.
The operator will generate the configuration parameters required for this
-feature to work if a recovery target is specified like in the following
-example:
+feature to work in case a recovery target is specified, like in the following
+example that uses a recovery object stored in Azure:
```yaml
apiVersion: postgresql.k8s.enterprisedb.io/v1
@@ -294,14 +430,24 @@ spec:
bootstrap:
recovery:
- backup:
- name: backup-example
-
+ source: clusterBackup
recoveryTarget:
targetTime: "2020-11-26 15:22:00.00000+00"
+
+ externalClusters:
+ - name: clusterBackup
+ barmanObjectStore:
+ destinationPath: https://STORAGEACCOUNTNAME.blob.core.windows.net/CONTAINERNAME/
+ azureCredentials:
+ storageAccount:
+ name: recovery-object-store-secret
+ key: storage_account_name
+ storageKey:
+ name: recovery-object-store-secret
+ key: storage_account_key
```
-Beside `targetTime`, you can use the following criteria to stop the recovery:
+Besides `targetTime`, you can use the following criteria to stop the recovery:
- `targetXID` specify a transaction ID up to which recovery will proceed
@@ -323,7 +469,7 @@ timeline.
By default, the previous parameters are considered to be exclusive, stopping
just before the recovery target. You can request inclusive behavior,
stopping right after the recovery target, setting the `exclusive` parameter to
-`false` like in the following example:
+`false` like in the following example relying on a blob container in Azure:
```yaml
apiVersion: postgresql.k8s.enterprisedb.io/v1
@@ -338,15 +484,25 @@ spec:
bootstrap:
recovery:
- backup:
- name: backup-example
-
+ source: clusterBackup
recoveryTarget:
targetName: "maintenance-activity"
exclusive: false
+
+ externalClusters:
+ - name: clusterBackup
+ barmanObjectStore:
+ destinationPath: https://STORAGEACCOUNTNAME.blob.core.windows.net/CONTAINERNAME/
+ azureCredentials:
+ storageAccount:
+ name: recovery-object-store-secret
+ key: storage_account_name
+ storageKey:
+ name: recovery-object-store-secret
+ key: storage_account_key
```
-## pg_basebackup
+### Bootstrap from a live cluster (`pg_basebackup`)
The `pg_basebackup` bootstrap mode lets you create a new cluster (*target*) as
an exact physical copy of an existing and **binary compatible** PostgreSQL
@@ -387,7 +543,7 @@ PostgreSQL server in general, and might be the easiest way if the source
instance is on a traditional environment outside Kubernetes.
Both cases are explained below.
-### Requirements
+#### Requirements
The following requirements apply to the `pg_basebackup` bootstrap method:
@@ -414,7 +570,7 @@ The following requirements apply to the `pg_basebackup` bootstrap method:
["High Availability, Load Balancing, and Replication" chapter](https://www.postgresql.org/docs/current/high-availability.html)
in the PostgreSQL documentation.
-### About the replication user
+#### About the replication user
As explained in the requirements section, you need to have a user
with either the `SUPERUSER` or, preferably, just the `REPLICATION`
@@ -443,7 +599,7 @@ will need to add it to a secret in the target instance.
for the sake of simplicity. Feel free to change it as you like,
provided you adapt the instructions in the following sections.
-### Username/Password authentication
+#### Username/Password authentication
The first authentication method supported by Cloud Native PostgreSQL
with the `pg_basebackup` bootstrap is based on username and password matching.
@@ -463,7 +619,7 @@ file on the source PostgreSQL instance:
host replication streaming_replica all md5
```
-The following manifest creates a new PostgreSQL 13.3 cluster,
+The following manifest creates a new PostgreSQL 13.4 cluster,
called `target-db`, using the `pg_basebackup` bootstrap method
to clone an external PostgreSQL cluster defined as `source-db`
(in the `externalClusters` array). As you can see, the `source-db`
@@ -478,7 +634,7 @@ metadata:
name: target-db
spec:
instances: 3
- imageName: quay.io/enterprisedb/postgresql:13.3
+ imageName: quay.io/enterprisedb/postgresql:13.4
bootstrap:
pg_basebackup:
@@ -498,9 +654,9 @@ spec:
```
All the requirements must be met for the clone operation to work, including
-the same PostgreSQL version (in our case 13.3).
+the same PostgreSQL version (in our case 13.4).
-### TLS certificate authentication
+#### TLS certificate authentication
The second authentication method supported by Cloud Native PostgreSQL
with the `pg_basebackup` bootstrap is based on TLS client certificates.
@@ -513,7 +669,7 @@ in the same Kubernetes cluster.
This example can be easily adapted to cover an instance that resides
outside the Kubernetes cluster.
-The manifest defines a new PostgreSQL 13.3 cluster called `cluster-clone-tls`,
+The manifest defines a new PostgreSQL 13.4 cluster called `cluster-clone-tls`,
which is bootstrapped using the `pg_basebackup` method from the `cluster-example`
external cluster. The host is identified by the read/write service
in the same cluster, while the `streaming_replica` user is authenticated
@@ -528,7 +684,7 @@ metadata:
name: cluster-clone-tls
spec:
instances: 3
- imageName: quay.io/enterprisedb/postgresql:13.3
+ imageName: quay.io/enterprisedb/postgresql:13.4
bootstrap:
pg_basebackup:
@@ -554,9 +710,9 @@ spec:
key: ca.crt
```
-### Current limitations
+#### Current limitations
-#### Missing tablespace support
+##### Missing tablespace support
Cloud Native PostgreSQL does not currently include full declarative management
of PostgreSQL global objects, namely roles, databases, and tablespaces.
@@ -571,7 +727,7 @@ migrate in Cloud Native PostgreSQL a PostgreSQL instance that takes advantage
of tablespaces (you first need to remove them from the source or, if your
organization requires this feature, contact EDB to prioritize it).
-#### Snapshot copy
+##### Snapshot copy
The `pg_basebackup` method takes a snapshot of the source instance in the form of
a PostgreSQL base backup. All transactions written from the start of
@@ -597,4 +753,4 @@ This will open up two main use cases:
- replication over different Kubernetes clusters in Cloud Native PostgreSQL
- *0 cutover time* migrations to Cloud Native PostgreSQL with the `pg_basebackup`
- bootstrap method
+ bootstrap method
\ No newline at end of file
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/certificates.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/certificates.mdx
index 2c7da4c9678..9a8227b52c3 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/certificates.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/certificates.mdx
@@ -62,15 +62,15 @@ generated server TLS secret in addition to the default ones.
#### Client CA Secret
-The same self-signed CA as the Server CA is used by default. The public part
+The same self-signed CA as the Server CA is used by default. The public part
will be passed as `ssl_ca_file` to all the instances in order to be able to verify
client certificates it signed. The private key will be stored in the same secret and
used to sign Client certificates generated by the `kubectl cnp` plugin.
#### Client streaming_replica Certificate
-The operator uses the generated self-signed CA to sign a client certificate for
-the user `streaming_replica`, storing it in a Secret of type `kubernetes.io/tls`.
+The operator uses the generated self-signed CA to sign a client certificate for
+the user `streaming_replica`, storing it in a Secret of type `kubernetes.io/tls`.
This certificate will be passed as `sslcert` and `sslkey` in replicas' connection strings,
to allow securely connecting to the primary instance.
@@ -144,7 +144,7 @@ The new cluster will use the provided server certificates for TLS connections.
#### Cert-manager Example
-Here a simple example about how to use [cert-manager](https://cert-manager.io/) to set up a self-signed CA and generate
+Here is a simple example about how to use [cert-manager](https://cert-manager.io/) to set up a self-signed CA and generate
the needed TLS server certificate:
```yaml
@@ -211,13 +211,13 @@ the [cluster-example-cert-manager.yaml](../samples/cluster-example-cert-manager.
### Client Certificate
If required, you can also provide the two client certificates, generating them
-using a separate component such as [cert-manager](https://cert-manager.io/) or
+using a separate component such as [cert-manager](https://cert-manager.io/) or
[hashicorp vault](https://www.vaultproject.io/docs/secrets/pki). In order to
use a custom CA to verify client certificates for a Cluster, you must specify
the following parameters:
- `replicationTLSSecret`: the name of a Secret of type `kubernetes.io/tls`,
- containing the client certificate for user `streaming_replica`. It must contain
+ containing the client certificate for user `streaming_replica`. It must contain
both the standard `tls.crt` and `tls.key` keys.
- `clientCASecret`: the name of a Secret containing the `ca.crt` key of the CA
that should be used to verify client certificate.
@@ -289,4 +289,4 @@ spec:
```
You can find a complete example using cert-manager to manage both server and client CA and certificates in
-the [cluster-example-cert-manager.yaml](../samples/cluster-example-cert-manager.yaml) deployment manifest.
+the [cluster-example-cert-manager.yaml](../samples/cluster-example-cert-manager.yaml) deployment manifest.
\ No newline at end of file
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/cnp-plugin.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/cnp-plugin.mdx
index b5ad6bc4dd9..227c756ea1b 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/cnp-plugin.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/cnp-plugin.mdx
@@ -93,8 +93,8 @@ PostgreSQL HBA Rules
local all all peer
# Require client certificate authentication for the streaming_replica user
-hostssl postgres streaming_replica all cert clientcert=1
-hostssl replication streaming_replica all cert clientcert=1
+hostssl postgres streaming_replica all cert
+hostssl replication streaming_replica all cert
# Otherwise use md5 authentication
host all all all md5
@@ -174,4 +174,4 @@ The following command will reload all configurations for a given cluster:
```shell
kubectl cnp reload [cluster_name]
-```
+```
\ No newline at end of file
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/container_images.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/container_images.mdx
index 9bf0e2a7fbd..eea64d2837f 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/container_images.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/container_images.mdx
@@ -21,7 +21,7 @@ with the following requirements:
- `barman-cloud-restore`
- `barman-cloud-backup-list`
- PGAudit extension installed (optional - only if PGAudit is required
- in the actual deployed clusters)
+ in the deployed clusters)
- Sensible locale settings
No entry point and/or command is required in the image definition, as Cloud
@@ -58,4 +58,4 @@ Examples of accepted image tags:
- `12.3.2.1-1`
!!! Warning
- `latest` is not considered a valid tag for the image.
+ `latest` is not considered a valid tag for the image.
\ No newline at end of file
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/credits.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/credits.mdx
index 8d123bca7c6..bb3df044411 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/credits.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/credits.mdx
@@ -20,5 +20,4 @@ developed, and tested by the EnterpriseDB Cloud Native team:
- Gabriele Quaresima
- Philippe Scorsolini
- Jitendra Wadle
-- Adam Wright
-
+- Adam Wright
\ No newline at end of file
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/e2e.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/e2e.mdx
index 80da649d473..af1fb35ff9e 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/e2e.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/e2e.mdx
@@ -21,6 +21,7 @@ process:
The following PostgreSQL versions are tested:
+* PostgreSQL 14
* PostgreSQL 13
* PostgreSQL 12
* PostgreSQL 11
@@ -41,8 +42,10 @@ and the following suite of E2E tests are performed on that cluster:
* Manage PostgreSQL configuration changes;
* Rolling updates when changing PostgreSQL images;
* Backup and ScheduledBackups execution;
+* Backup and ScheduledBackups execution using Barman Cloud on Azure blob storage;
* Synchronous replication;
* Restore from backup;
+* Restore from backup using Barman Cloud on Azure blob storage;
* Pod affinity using `NodeSelector`;
* Metrics collection;
* JSON log format;
@@ -53,17 +56,10 @@ and the following suite of E2E tests are performed on that cluster:
* Operator High Availability;
* Node drain;
* Primary endpoint switch in case of failover in less than 10 seconds;
- the threshold is raised to 20 seconds on GKE and 30 on AKS;
* Primary endpoint switch in case of switchover in less than 20 seconds;
* Recover from a degraded state in less than 60 seconds;
* Physical replica clusters;
* Storage expansion;
-* Data corruption.
+* Data corruption;
-The E2E tests suite is also run for OpenShift versions 4.5, 4.6, and 4.7
-and the latest Kubernetes and PostgreSQL releases on clusters created on the
-following services:
-
-* Google GKE
-* Amazon EKS
-* Microsoft Azure AKS
+The E2E tests suite is also run for OpenShift versions 4.6 and 4.7.
\ No newline at end of file
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/evaluation.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/evaluation.mdx
index 13c0ada93b5..fd52d7bfda0 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/evaluation.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/evaluation.mdx
@@ -38,4 +38,4 @@ by setting in the `spec` section of the `Cluster` deployment configuration file:
- `imageName` to point to the `quay.io/enterprisedb/edb-postgres-advanced` repository
- `licenseKey` to your license key (in the form of a string)
-Please refer to the full example in the [configuration samples](samples.md) section.
+Please refer to the full example in the [configuration samples](samples.md) section.
\ No newline at end of file
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/expose_pg_services.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/expose_pg_services.mdx
index bca1c0e31ce..8d322282301 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/expose_pg_services.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/expose_pg_services.mdx
@@ -144,4 +144,4 @@ You can access the primary from your machine running:
```sh
psql -h $(minikube ip) -p 5432 -U postgres
-```
+```
\ No newline at end of file
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/failure_modes.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/failure_modes.mdx
index edebd978d53..a6c536394eb 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/failure_modes.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/failure_modes.mdx
@@ -17,10 +17,10 @@ Each pod of a `Cluster` has a `postgres` container with a **liveness**
and a **readiness**
[probe](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes).
-The liveness and readiness probes check if the database is up and able to accept
-connections using the superuser credentials.
-The two probes will report a failure if the probe command fails 3 times with a
-10 seconds interval between each check.
+The liveness probe relies on `pg_isready`, while the readiness probe checks if
+the database is up and able to accept connections using the superuser
+credentials. The two probes will report a failure if the probe command fails 3
+times with a 10 seconds interval between each check.
For now, the operator doesn't configure a `startupProbe` on the Pods, since
startup probes have been introduced only in Kubernetes 1.17.
@@ -150,4 +150,4 @@ to solve the problem manually.
!!! Important
In such cases, please do not perform any manual operation without the
- support and assistance of EnterpriseDB engineering team.
+ support and assistance of EnterpriseDB engineering team.
\ No newline at end of file
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/images/multi-cluster.png b/advocacy_docs/kubernetes/cloud_native_postgresql/images/multi-cluster.png
new file mode 100644
index 00000000000..cbee5229bc0
--- /dev/null
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/images/multi-cluster.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:35bfcbf1a93d05eac13ca242b0681df3cdbc6887e7e232884cb7e7eb78adea9a
+size 202406
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/index.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/index.mdx
index b66a05af6b4..c8cb3285285 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/index.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/index.mdx
@@ -16,7 +16,6 @@ navigation:
- installation_upgrade
- quickstart
- interactive_demo
- - cloud_setup
- bootstrap
- security
- scheduling
@@ -44,13 +43,12 @@ navigation:
- api_reference
- release_notes
- credits
-
---
**Cloud Native PostgreSQL** is an [operator](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/)
designed by [EnterpriseDB](https://www.enterprisedb.com)
to manage [PostgreSQL](https://www.postgresql.org/) workloads on any supported [Kubernetes](https://kubernetes.io)
-cluster running in private, public, or hybrid cloud environments.
+cluster running in private, public, hybrid, or multi-cloud environments.
Cloud Native PostgreSQL adheres to DevOps principles and concepts
such as declarative configuration and immutable infrastructure.
@@ -76,18 +74,21 @@ You need a valid license key to use Cloud Native PostgreSQL in production.
users can expect a **"Level V - Auto Pilot"** set of capabilities from the
Cloud Native PostgreSQL Operator.
-## Requirements
+## Supported Kubernetes distributions
-Cloud Native PostgreSQL requires Kubernetes 1.16 or higher, tested on AWS,
-Google, Azure (with multiple availability zones).
+Cloud Native PostgreSQL requires Kubernetes 1.16 or higher.
Cloud Native PostgreSQL has also been certified for
[RedHat OpenShift Container Platform (OCP)](https://www.openshift.com/products/container-platform)
-4.5+ and is available directly from the [RedHat Catalog](https://catalog.redhat.com/).
+4.6+ and is available directly from the [RedHat Catalog](https://catalog.redhat.com/).
OpenShift Container Platform is an open-source distribution of Kubernetes which is
[maintained and commercially supported](https://access.redhat.com/support/policy/updates/openshift#ocp4)
by Red Hat.
+Please refer to the
+["Platform Compatibility"](https://www.enterprisedb.com/product-compatibility#cnp)
+page from the EDB website for a list of the currently supported Kubernetes distributions.
+
## Supported PostgreSQL versions
PostgreSQL and EDB Postgres Advanced 13, 12, 11 and 10 are currently supported.
@@ -111,6 +112,8 @@ PostgreSQL and EDB Postgres Advanced 13, 12, 11 and 10 are currently supported.
* Support for custom TLS certificates (including integration with cert-manager)
* Continuous backup to an S3 compatible object store
* Full recovery and Point-In-Time recovery from an S3 compatible object store backup
+* Replica clusters for PostgreSQL deployments across multiple Kubernetes
+ clusters, enabling private, public, hybrid, and multi-cloud architectures
* Support for Synchronous Replicas
* Support for node affinity via `nodeSelector`
* Native customizable exporter of user defined metrics for Prometheus through the `metrics` port (9187)
@@ -128,4 +131,4 @@ please consult the ["Before you start" section](before_you_start.md).
!!! Note
Although the guide primarily addresses Kubernetes, all concepts can
- be extended to OpenShift as well.
+ be extended to OpenShift as well.
\ No newline at end of file
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/installation_upgrade.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/installation_upgrade.mdx
index f99d1d9cc06..e2ec7181d7e 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/installation_upgrade.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/installation_upgrade.mdx
@@ -11,12 +11,12 @@ product: 'Cloud Native Operator'
The operator can be installed like any other resource in Kubernetes,
through a YAML manifest applied via `kubectl`.
-You can install the [latest operator manifest](https://get.enterprisedb.io/cnp/postgresql-operator-1.7.1.yaml)
+You can install the [latest operator manifest](https://get.enterprisedb.io/cnp/postgresql-operator-1.8.0.yaml)
as follows:
```sh
kubectl apply -f \
- https://get.enterprisedb.io/cnp/postgresql-operator-1.7.1.yaml
+ https://get.enterprisedb.io/cnp/postgresql-operator-1.8.0.yaml
```
Once you have run the `kubectl` command, Cloud Native PostgreSQL will be installed in your Kubernetes cluster.
@@ -137,7 +137,7 @@ users need to complete the rolling update by manually promoting a new instance
through the `cnp` plugin for `kubectl`.
!!! Seealso "Rolling updates"
- This process is discussed in-depth in the [Rolling Updates](rolling_update.md) page.
+ This process is discussed in-depth on the [Rolling Updates](rolling_update.md) page.
!!! Important
In case `primaryUpdateStrategy` is set to the default value of `unsupervised`,
@@ -147,18 +147,18 @@ through the `cnp` plugin for `kubectl`.
### Compatibility among versions
We strive to maintain compatibility between different operator versions, but in
-some cases this might not be possible.
+some cases, this might not be possible.
Every version of the operator is compatible with the previous one, unless
[release notes](release_notes.md) state the opposite.
The release notes page indeed contains a detailed list of the changes introduced
in every released version of the Cloud Native PostgreSQL Operator, and it must
be read before upgrading to a newer version of the software.
-Most versions are directly upgradable and in that case applying the newer
+Most versions are directly upgradable and in that case, applying the newer
manifest for plain Kubernetes installations or using the native package
manager of the chosen distribution is enough.
-When versions are not directly upgradable, the old version need to be
+When versions are not directly upgradable, the old version needs to be
removed before installing the new one. This won't affect user data but
only the operator itself. Please consult the release notes for
detailed information on how to upgrade to any released version.
@@ -181,11 +181,11 @@ kubectl delete deployments \
!!! Warning
Remember to install the new version of the operator after having performed
- the above command. Otherwise, your PostgreSQL clusters will keep running
+ the above command. Otherwise, your PostgreSQL clusters will keep running
without an operator and, as such, without any self-healing and high-availability
capabilities.
!!! Note
In case you deployed the operator in a different namespace than the default
(`postgresql-operator-system`), you need to use the correct namespace for
- the `-n` option in the above command.
+ the `-n` option in the above command.
\ No newline at end of file
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/interactive_demo.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/interactive_demo.mdx
index 4e62638c45c..26046fe4b3c 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/interactive_demo.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/interactive_demo.mdx
@@ -2,7 +2,7 @@
title: "Installation, Configuration and Deployment Demo"
description: "Walk through the process of installing, configuring and deploying the Cloud Native PostgreSQL Operator via a browser-hosted Minikube console"
navTitle: Install, Configure, Deploy
-product: 'Cloud Native PostgreSQL Operator'
+product: 'Cloud Native Operator'
platform: ubuntu
tags:
- postgresql
@@ -65,7 +65,7 @@ You will see one node called `minikube`. If the status isn't yet "Ready", wait f
Now that the Minikube cluster is running, you can proceed with Cloud Native PostgreSQL installation as described in the ["Installation"](installation_upgrade.md) section:
```shell
-kubectl apply -f https://get.enterprisedb.io/cnp/postgresql-operator-1.7.1.yaml
+kubectl apply -f https://get.enterprisedb.io/cnp/postgresql-operator-1.8.0.yaml
__OUTPUT__
namespace/postgresql-operator-system created
customresourcedefinition.apiextensions.k8s.io/backups.postgresql.k8s.enterprisedb.io created
@@ -165,13 +165,13 @@ metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"postgresql.k8s.enterprisedb.io/v1","kind":"Cluster","metadata":{"annotations":{},"name":"cluster-example","namespace":"default"},"spec":{"instances":3,"primaryUpdateStrategy":"unsupervised","storage":{"size":"1Gi"}}}
- creationTimestamp: "2021-07-13T06:48:56Z"
+ creationTimestamp: "2021-09-14T00:13:02Z"
generation: 1
name: cluster-example
namespace: default
- resourceVersion: "2270"
+ resourceVersion: "2338"
selfLink: /apis/postgresql.k8s.enterprisedb.io/v1/namespaces/default/clusters/cluster-example
- uid: 405c5b71-6e9b-4baf-a186-2086bbb23a2b
+ uid: 47475a76-c5ee-442c-923b-927cf52f8a89
spec:
affinity:
podAntiAffinityType: preferred
@@ -180,11 +180,11 @@ spec:
initdb:
database: app
owner: app
- imageName: quay.io/enterprisedb/postgresql:13.3
+ imageName: quay.io/enterprisedb/postgresql:13.4
instances: 3
+ logLevel: info
postgresql:
parameters:
- cluster_name: ""
log_destination: csvlog
log_directory: /controller/log
log_filename: postgres
@@ -205,9 +205,9 @@ status:
certificates:
clientCASecret: cluster-example-ca
expirations:
- cluster-example-ca: 2022-07-13 06:43:56 +0000 UTC
- cluster-example-replication: 2022-07-13 06:43:56 +0000 UTC
- cluster-example-server: 2022-07-13 06:43:56 +0000 UTC
+ cluster-example-ca: 2021-12-13 00:08:03 +0000 UTC
+ cluster-example-replication: 2021-12-13 00:08:03 +0000 UTC
+ cluster-example-server: 2021-12-13 00:08:03 +0000 UTC
replicationTLSSecret: cluster-example-replication
serverAltDNSNames:
- cluster-example-rw
@@ -221,11 +221,12 @@ status:
- cluster-example-ro.default.svc
serverCASecret: cluster-example-ca
serverTLSSecret: cluster-example-server
+ configMapResourceVersion: {}
currentPrimary: cluster-example-1
healthyPVC:
- - cluster-example-3
- cluster-example-1
- cluster-example-2
+ - cluster-example-3
instances: 3
instancesStatus:
healthy:
@@ -236,7 +237,7 @@ status:
licenseStatus:
isImplicit: true
isTrial: true
- licenseExpiration: "2021-08-12T06:48:56Z"
+ licenseExpiration: "2021-10-14T00:13:02Z"
licenseStatus: Implicit trial license
repositoryAccess: false
valid: true
@@ -245,12 +246,12 @@ status:
readService: cluster-example-r
readyInstances: 3
secretsResourceVersion:
- applicationSecretVersion: "957"
- clientCaSecretVersion: "953"
- replicationSecretVersion: "955"
- serverCaSecretVersion: "953"
- serverSecretVersion: "954"
- superuserSecretVersion: "956"
+ applicationSecretVersion: "880"
+ clientCaSecretVersion: "876"
+ replicationSecretVersion: "878"
+ serverCaSecretVersion: "876"
+ serverSecretVersion: "877"
+ superuserSecretVersion: "879"
targetPrimary: cluster-example-1
writeService: cluster-example-rw
```
@@ -278,7 +279,7 @@ curl -sSfL \
sudo sh -s -- -b /usr/local/bin
__OUTPUT__
EnterpriseDB/kubectl-cnp info checking GitHub for latest tag
-EnterpriseDB/kubectl-cnp info found version: 1.7.1 for v1.7.1/linux/x86_64
+EnterpriseDB/kubectl-cnp info found version: 1.8.0 for v1.8.0/linux/x86_64
EnterpriseDB/kubectl-cnp info installed /usr/local/bin/kubectl-cnp
```
@@ -290,7 +291,7 @@ __OUTPUT__
Cluster in healthy state
Name: cluster-example
Namespace: default
-PostgreSQL Image: quay.io/enterprisedb/postgresql:13.3
+PostgreSQL Image: quay.io/enterprisedb/postgresql:13.4
Primary instance: cluster-example-1
Instances: 3
Ready instances: 3
@@ -325,7 +326,7 @@ __OUTPUT__
Failing over Failing over to cluster-example-2
Name: cluster-example
Namespace: default
-PostgreSQL Image: quay.io/enterprisedb/postgresql:13.3
+PostgreSQL Image: quay.io/enterprisedb/postgresql:13.4
Primary instance: cluster-example-2
Instances: 3
Ready instances: 2
@@ -346,7 +347,7 @@ __OUTPUT__
Cluster in healthy state
Name: cluster-example
Namespace: default
-PostgreSQL Image: quay.io/enterprisedb/postgresql:13.3
+PostgreSQL Image: quay.io/enterprisedb/postgresql:13.4
Primary instance: cluster-example-2
Instances: 3
Ready instances: 3
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/kubernetes_upgrade.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/kubernetes_upgrade.mdx
index 5f54cc93609..b9dd802af2d 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/kubernetes_upgrade.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/kubernetes_upgrade.mdx
@@ -29,7 +29,7 @@ at a time by:
3. re-joining the node to the cluster (`uncordon`)
The above process requires workloads to be either stopped for the
-entire duration of the upgrade or migrated on another node.
+entire duration of the upgrade or migrated to another node.
While the latest case is the expected one in terms of service
reliability and self-healing capabilities of Kubernetes, there can
@@ -87,4 +87,4 @@ the new PostgreSQL instance takes shorter than waiting.
When performing the `kubectl drain` command, you will need
to add the `--delete-local-data` option.
Don't be afraid: it refers to another volume internally used
- by the operator - not the PostgreSQL data directory.
+ by the operator - not the PostgreSQL data directory.
\ No newline at end of file
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/labels_annotations.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/labels_annotations.mdx
index a9235008ed6..17d8854a62b 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/labels_annotations.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/labels_annotations.mdx
@@ -81,4 +81,4 @@ Cloud Native PostgreSQL does not currently support synchronization of labels
or annotations after a resource has been created. For example, suppose you
deploy a cluster. When you add a new annotation to be inherited and define it
in the existing cluster, the operator will not automatically set it
-on the associated resources.
+on the associated resources.
\ No newline at end of file
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/license_keys.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/license_keys.mdx
index bbe9f0d1b79..d551138389a 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/license_keys.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/license_keys.mdx
@@ -15,8 +15,9 @@ trial license - which automatically expires after 30 days.
A license key allows you to create an unlimited number of PostgreSQL
clusters in your installation.
-The license key needs to be available in a `ConfigMap` in the same
-namespace where the operator is deployed.
+The license key needs to be available in a `Secret` in the same namespace where
+the operator is deployed (`ConfigMap` is also available, but not recommended
+for a license key).
!!! Seealso "Operator configuration"
For more information, please refer to the ["Operator configuration"](operator_conf.md) section.
@@ -68,17 +69,18 @@ or to make it available in all namespaces. The second option is
the default one, and the operator will be installed in
`openshift-operators` on OpenShift and `operators` on OperatorHub.
-Given the namespace name, and the license key, you can create
-the config map with the following command:
+You can add a given license key to an entire cluster, by creating a secret in
+the desired namespace (`openshift-operators`, `operators`, or the selected one)
+with the following command:
```
-kubectl create configmap -n [NAMESPACE_NAME_HERE] \
+kubectl create secret generic -n [NAMESPACE_NAME_HERE] \
postgresql-operator-controller-manager-config \
--from-literal=EDB_LICENSE_KEY=[LICENSE_KEY_HERE]
```
You'll need to delete the current operator pods. New pods will be
-automatically recreated and will use the configmap:
+automatically recreated and will use the secret:
```sh
kubectl delete pods -n [NAMESPACE_NAME_HERE] \
@@ -108,6 +110,15 @@ status:
A cluster license key can be updated with a new one at any moment, to extend
the expiration date or move the cluster to a production license.
+## License key secret at cluster level
+
+Each `Cluster` resource can also have a `licenseKeySecret` parameter, which contains
+the name and key of a secret. That secret contains the license key provided by EnterpriseDB.
+
+This field will take precedence over `licenseKey`: it will be refreshed
+when you change the secret, in order to extend the expiration date, or switching from a trial
+license to a production license.
+
Cloud Native PostgreSQL is distributed under the EnterpriseDB Limited Usage License
Agreement, available at [enterprisedb.com/limited-use-license](https://www.enterprisedb.com/limited-use-license).
@@ -120,4 +131,4 @@ attempt on the cluster, effectively stopping to manage its status. This also
includes any self-healing and high availability capabilities, such as automated
failover and switchovers.
-The pods and the data will still be available.
+The pods and the data will still be available.
\ No newline at end of file
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/logging.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/logging.mdx
index 13ded55490c..8e78ebc9675 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/logging.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/logging.mdx
@@ -16,6 +16,15 @@ Each log entry has the following fields:
- `record`: the actual record (with structure that varies depending on the
`logger` type)
+## Operator log
+
+A log level can be specified in the cluster spec with the option `logLevel` and
+can be set to any of `error`, `info`(default), `debug` or `trace`.
+
+At the moment, the log level can only be set when an instance starts and can not be
+changed at runtime. If the value is changed in the cluster spec after the cluster
+was started, this will take effect only in the new pods and not the old ones.
+
## PostgreSQL log
Each entry in the PostgreSQL log is a JSON object having the `logger` key set
@@ -69,10 +78,10 @@ All you need to do is add the required `pgaudit` parameters to the `postgresql`
section in the configuration of the cluster.
!!! Important
- It is unnecessary to add the PGAudit library to `shared_preload_libraries`.
- The library will be added automatically by Cloud Native PostgreSQL based on the
- presence of `pgaudit.*` parameters in the postgresql configuration.
- The operator will detect and manage the addition and removal of the
+ It is unnecessary to add the PGAudit library to `shared_preload_libraries`.
+ The library will be added automatically by Cloud Native PostgreSQL based on the
+ presence of `pgaudit.*` parameters in the postgresql configuration.
+ The operator will detect and manage the addition and removal of the
library from `shared_preload_libraries`.
The operator also takes care of creating and removing the extension from all
@@ -262,4 +271,4 @@ Therefore, all the possible `logger` values are the following ones:
Except for `postgres` and `edb_audit` that have the aforementioned structures,
all other possible values just have `msg` set to the escaped message that is
-logged.
+logged.
\ No newline at end of file
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/monitoring.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/monitoring.mdx
index 935fda54f09..0b4ec254ed5 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/monitoring.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/monitoring.mdx
@@ -8,7 +8,7 @@ product: 'Cloud Native Operator'
For each PostgreSQL instance, the operator provides an exporter of metrics for
[Prometheus](https://prometheus.io/) via HTTP, on port 9187, named `metrics`.
-The operator comes with a predefined set of metrics, as well as a highly
+The operator comes with a [predefined set of metrics](#predefined-set-of-metrics), as well as a highly
configurable and customizable system to define additional queries via one or
more `ConfigMap` or `Secret` resources (see the
["User defined metrics" section](#user-defined-metrics) below for details).
@@ -19,7 +19,7 @@ Metrics can be accessed as follows:
curl http://:9187/metrics
```
-All monitoring queries are:
+All monitoring queries that are performed on PostgreSQL are:
- transactionally atomic (one transaction per query)
- executed with the `pg_monitor` role
@@ -65,6 +65,186 @@ spec:
Make sure you modify the example above with a unique name as well as the
correct cluster's namespace and labels (we are using `cluster-example`).
+### Predefined set of metrics
+
+Every PostgreSQL instance exporter automatically exposes a set of predefined
+metrics, which can be classified in two major categories:
+
+- PostgreSQL related metrics, starting with `cnp_collector_*`, including:
+
+ - number of WAL files and total size on disk
+ - number of `.ready` and `.done` files in the archive status folder
+ - requested minimum and maximum number of synchronous replicas, as well as
+ the expected and actually observed values
+ - flag indicating if replica cluster mode is enabled or disabled
+ - flag indicating if a manual switchover is required
+
+- Go runtime related metrics, starting with `go_*`
+
+Below is a sample of the metrics returned by the `localhost:9187/metrics`
+endpoint of an instance. As you can see, the Prometheus format is
+self-documenting:
+
+```text
+# HELP cnp_collector_collection_duration_seconds Collection time duration in seconds
+# TYPE cnp_collector_collection_duration_seconds gauge
+cnp_collector_collection_duration_seconds{collector="Collect.up"} 0.0031393
+
+# HELP cnp_collector_collections_total Total number of times PostgreSQL was accessed for metrics.
+# TYPE cnp_collector_collections_total counter
+cnp_collector_collections_total 2
+
+# HELP cnp_collector_last_collection_error 1 if the last collection ended with error, 0 otherwise.
+# TYPE cnp_collector_last_collection_error gauge
+cnp_collector_last_collection_error 0
+
+# HELP cnp_collector_manual_switchover_required 1 if a manual switchover is required, 0 otherwise
+# TYPE cnp_collector_manual_switchover_required gauge
+cnp_collector_manual_switchover_required 0
+
+# HELP cnp_collector_pg_wal Total size in bytes of WAL segments in the '/var/lib/postgresql/data/pgdata/pg_wal' directory computed as (wal_segment_size * count)
+# TYPE cnp_collector_pg_wal gauge
+cnp_collector_pg_wal{value="count"} 7
+cnp_collector_pg_wal{value="size"} 1.17440512e+08
+
+# HELP cnp_collector_pg_wal_archive_status Number of WAL segments in the '/var/lib/postgresql/data/pgdata/pg_wal/archive_status' directory (ready, done)
+# TYPE cnp_collector_pg_wal_archive_status gauge
+cnp_collector_pg_wal_archive_status{value="done"} 6
+cnp_collector_pg_wal_archive_status{value="ready"} 0
+
+# HELP cnp_collector_replica_mode 1 if the cluster is in replica mode, 0 otherwise
+# TYPE cnp_collector_replica_mode gauge
+cnp_collector_replica_mode 0
+
+# HELP cnp_collector_sync_replicas Number of requested synchronous replicas (synchronous_standby_names)
+# TYPE cnp_collector_sync_replicas gauge
+cnp_collector_sync_replicas{value="expected"} 0
+cnp_collector_sync_replicas{value="max"} 0
+cnp_collector_sync_replicas{value="min"} 0
+cnp_collector_sync_replicas{value="observed"} 0
+
+# HELP cnp_collector_up 1 if PostgreSQL is up, 0 otherwise.
+# TYPE cnp_collector_up gauge
+cnp_collector_up 1
+
+# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles.
+# TYPE go_gc_duration_seconds summary
+go_gc_duration_seconds{quantile="0"} 5.01e-05
+go_gc_duration_seconds{quantile="0.25"} 7.27e-05
+go_gc_duration_seconds{quantile="0.5"} 0.0001748
+go_gc_duration_seconds{quantile="0.75"} 0.0002959
+go_gc_duration_seconds{quantile="1"} 0.0012776
+go_gc_duration_seconds_sum 0.0035741
+go_gc_duration_seconds_count 13
+
+# HELP go_goroutines Number of goroutines that currently exist.
+# TYPE go_goroutines gauge
+go_goroutines 25
+
+# HELP go_info Information about the Go environment.
+# TYPE go_info gauge
+go_info{version="go1.16.7"} 1
+
+# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
+# TYPE go_memstats_alloc_bytes gauge
+go_memstats_alloc_bytes 4.493744e+06
+
+# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.
+# TYPE go_memstats_alloc_bytes_total counter
+go_memstats_alloc_bytes_total 2.1698216e+07
+
+# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table.
+# TYPE go_memstats_buck_hash_sys_bytes gauge
+go_memstats_buck_hash_sys_bytes 1.456234e+06
+
+# HELP go_memstats_frees_total Total number of frees.
+# TYPE go_memstats_frees_total counter
+go_memstats_frees_total 172118
+
+# HELP go_memstats_gc_cpu_fraction The fraction of this program's available CPU time used by the GC since the program started.
+# TYPE go_memstats_gc_cpu_fraction gauge
+go_memstats_gc_cpu_fraction 1.0749468700447189e-05
+
+# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata.
+# TYPE go_memstats_gc_sys_bytes gauge
+go_memstats_gc_sys_bytes 5.530048e+06
+
+# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use.
+# TYPE go_memstats_heap_alloc_bytes gauge
+go_memstats_heap_alloc_bytes 4.493744e+06
+
+# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used.
+# TYPE go_memstats_heap_idle_bytes gauge
+go_memstats_heap_idle_bytes 5.8236928e+07
+
+# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use.
+# TYPE go_memstats_heap_inuse_bytes gauge
+go_memstats_heap_inuse_bytes 7.528448e+06
+
+# HELP go_memstats_heap_objects Number of allocated objects.
+# TYPE go_memstats_heap_objects gauge
+go_memstats_heap_objects 26306
+
+# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS.
+# TYPE go_memstats_heap_released_bytes gauge
+go_memstats_heap_released_bytes 5.7401344e+07
+
+# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system.
+# TYPE go_memstats_heap_sys_bytes gauge
+go_memstats_heap_sys_bytes 6.5765376e+07
+
+# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
+# TYPE go_memstats_last_gc_time_seconds gauge
+go_memstats_last_gc_time_seconds 1.6311727586032727e+09
+
+# HELP go_memstats_lookups_total Total number of pointer lookups.
+# TYPE go_memstats_lookups_total counter
+go_memstats_lookups_total 0
+
+# HELP go_memstats_mallocs_total Total number of mallocs.
+# TYPE go_memstats_mallocs_total counter
+go_memstats_mallocs_total 198424
+
+# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures.
+# TYPE go_memstats_mcache_inuse_bytes gauge
+go_memstats_mcache_inuse_bytes 14400
+
+# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system.
+# TYPE go_memstats_mcache_sys_bytes gauge
+go_memstats_mcache_sys_bytes 16384
+
+# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures.
+# TYPE go_memstats_mspan_inuse_bytes gauge
+go_memstats_mspan_inuse_bytes 191896
+
+# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system.
+# TYPE go_memstats_mspan_sys_bytes gauge
+go_memstats_mspan_sys_bytes 212992
+
+# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place.
+# TYPE go_memstats_next_gc_bytes gauge
+go_memstats_next_gc_bytes 8.689632e+06
+
+# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations.
+# TYPE go_memstats_other_sys_bytes gauge
+go_memstats_other_sys_bytes 2.566622e+06
+
+# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator.
+# TYPE go_memstats_stack_inuse_bytes gauge
+go_memstats_stack_inuse_bytes 1.343488e+06
+
+# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator.
+# TYPE go_memstats_stack_sys_bytes gauge
+go_memstats_stack_sys_bytes 1.343488e+06
+
+# HELP go_memstats_sys_bytes Number of bytes obtained from system.
+# TYPE go_memstats_sys_bytes gauge
+go_memstats_sys_bytes 7.6891144e+07
+
+# HELP go_threads Number of OS threads created.
+# TYPE go_threads gauge
+go_threads 18
+```
### User defined metrics
This feature is currently in *beta* state and the format is inspired by the
@@ -97,7 +277,7 @@ The `customQueriesConfigMap`/`customQueriesSecret` sections contain a list of
Take care that the referred resources have to be created **in the same namespace as the Cluster** resource.
!!! Note
- If you want ConfigMaps and Secrets to be **automatically** reloaded by instances, you can
+ If you want ConfigMaps and Secrets to be **automatically** reloaded by instances, you can
add a label with key `k8s.enterprisedb.io/reload` to it, otherwise you will have to reload
the instances using the `kubectl cnp reload` subcommand.
@@ -121,11 +301,15 @@ data:
THEN 0
ELSE GREATEST (0,
EXTRACT(EPOCH FROM (now() - pg_last_xact_replay_timestamp())))
- END AS lag"
+ END AS lag,
+ pg_is_in_recovery() AS in_recovery"
metrics:
- lag:
usage: "GAUGE"
description: "Replication lag behind primary in seconds"
+ - in_recovery:
+ usage: "GAUGE"
+ description: "Whether the instance is in recovery"
```
A list of basic monitoring queries can be found in the [`cnp-basic-monitoring.yaml` file](../samples/cnp-basic-monitoring.yaml).
@@ -146,7 +330,7 @@ the pattern according to [path.Match()](https://pkg.go.dev/path#Match) rules.
The `*` character has a [special meaning](https://yaml.org/spec/1.2/spec.html#id2786448) in yaml,
so you need to quote (`"*"`) the `target_databases` value when it includes such a pattern.
-It is recommended that you always include the name of the database
+It is recommended that you always include the name of the database
in the returned labels, for example using the `current_database()` function
as in the following example:
@@ -230,6 +414,8 @@ Here is a short description of all the available fields:
- `query`: the SQL query to run on the target database to generate the metrics
- `primary`: whether to run the query only on the primary instance
- `master`: same as `primary` (for compatibility with the Prometheus PostgreSQL exporter's syntax - deprecated)
+ - `runonserver`: a semantic version range to limit the versions of PostgreSQL the query should run on
+ (e.g. `">=10.0.0"` or `">=12.0.0 <=14.0.0"`)
- `target_databases`: a list of databases to run the `query` against,
or a [shell-like pattern](#example-of-a-user-defined-metric-running-on-multiple-databases)
to enable auto discovery. Overwrites the default database if provided.
@@ -249,7 +435,7 @@ The possible values for `usage` are:
| `GAUGE` | use this column as a gauge |
| `MAPPEDMETRIC` | use this column with the supplied mapping of text values |
| `DURATION` | use this column as a text duration (in milliseconds) |
-| `HISTOGRAM` | use this column as an histogram |
+| `HISTOGRAM` | use this column as a histogram |
Please visit the ["Metric Types" page](https://prometheus.io/docs/concepts/metric_types/)
@@ -272,9 +458,13 @@ Considering the `pg_replication` example above, the exporter's endpoint would
return the following output when invoked:
```text
+# HELP cnp_pg_replication_in_recovery Whether the instance is in recovery
+# TYPE cnp_pg_replication_in_recovery gauge
+cnp_pg_replication_in_recovery 0
# HELP cnp_pg_replication_lag Replication lag behind primary in seconds
# TYPE cnp_pg_replication_lag gauge
cnp_pg_replication_lag 0
+
```
### Differences with the Prometheus Postgres exporter
@@ -285,7 +475,6 @@ are defined in the official Prometheus exporter are not implemented in Cloud
Native PostgreSQL's exporter:
- `cache_seconds`: number of seconds to cache the result of the query
-- `runonserver`: a semantic version range to limit the versions of PostgreSQL the query should run on (e.g. `">=10.0.0"`)
Similarly, the `pg_version` field of a column definition is not implemented.
@@ -321,4 +510,4 @@ spec:
app.kubernetes.io/name: cloud-native-postgresql
podMetricsEndpoints:
- port: metrics
-```
+```
\ No newline at end of file
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/operator_capability_levels.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/operator_capability_levels.mdx
index f949794e8c0..bab00886ef5 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/operator_capability_levels.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/operator_capability_levels.mdx
@@ -355,9 +355,9 @@ version: such a source can be anywhere, as long as a direct streaming
connection via TLS is allowed from the two endpoints.
Moreover, the source can be even outside Kubernetes, running in a physical or
virtual environment.
-Currently, only the `pg_basebackup` bootstrap method is allowed, even though
-future implementations will enable bootstrap from a backup, as well as
-WAL file shipping instead/on top of WAL streaming.
+Replica clusters can be created from a recovery object store (backup in Barman
+Cloud format) or via streaming through `pg_basebackup`. Both WAL file shipping
+and WAL streaming are allowed.
Replica clusters dramatically improve the business continuity posture of your
PostgreSQL databases in Kubernetes, spanning over multiple datacenters and
opening up for hybrid and multi-cloud setups (currently, manual switchover
@@ -443,7 +443,7 @@ PostgreSQL that can be integrated and adapted to your context.
### Standard output logging of PostgreSQL error messages in JSON format
-Every log message is delivered to standard output in JSON format, with first level
+Every log message is delivered to standard output in JSON format, with the first level
definition of the timestamp, the log level and the type of log entry, such as
`postgres` for the canonical PostgreSQL error message channel.
As a result, every Pod managed by Cloud Native PostgreSQL can be easily and directly
@@ -452,7 +452,7 @@ data type.
### Real-time query monitoring
-Cloud Native PostgreSQL transparently and natively manages support for:
+Cloud Native PostgreSQL transparently and natively supports:
- the essential [`pg_stat_statements` extension](https://www.postgresql.org/docs/current/pgstatstatements.html),
which enables tracking of planning and execution statistics of all SQL
@@ -480,7 +480,7 @@ the `kubectl describe` and `kubectl get events` command.
## Level 5 - Auto Pilot
Capability level 5 is focused on **automated scaling**, **healing** and
-**tuning** - through the discovery of anomalies and insights emerged
+**tuning** - through the discovery of anomalies and insights that emerged
from the observability layer.
### Automated Failover for self-healing
@@ -497,4 +497,4 @@ the server and restarting it as a standby.
### Automated recreation of a standby
In case the pod hosting a standby has been removed, the operator initiates
-the procedure to recreate a standby server.
+the procedure to recreate a standby server.
\ No newline at end of file
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/operator_conf.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/operator_conf.mdx
index f5f352fac1a..03db2c8696c 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/operator_conf.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/operator_conf.mdx
@@ -15,7 +15,7 @@ to change the default behavior, such as:
by the operator and that are set in the cluster resource
- defining a different default image for PostgreSQL or an additional pull secret
-By the default, the operator is installed in the `postgresql-operator-system`
+By default, the operator is installed in the `postgresql-operator-system`
namespace as a Kubernetes `Deployment` called `postgresql-operator-controller-manager`.
!!! Note
@@ -37,11 +37,11 @@ is located in the same namespace of the operator deployment and with
## Available options
-The operator looks for the following environment variables to be defined in the config map:
+The operator looks for the following environment variables to be defined in the `ConfigMap`/`Secret`:
Name | Description
---- | -----------
-`EDB_LICENSE_KEY` | default license key (to be used only if the cluster does not define one)
+`EDB_LICENSE_KEY` | default license key (to be used only if the cluster does not define one, and preferably in the `Secret`)
`INHERITED_ANNOTATIONS` | list of annotation names that, when defined in a `Cluster` metadata, will be inherited by all the generated resources, including pods
`INHERITED_LABELS` | list of label names that, when defined in a `Cluster` metadata, will be inherited by all the generated resources, including pods
`PULL_SECRET_NAME` | name of an additional pull secret to be defined in the operator's namespace and to be used to download images
@@ -67,7 +67,6 @@ metadata:
data:
INHERITED_ANNOTATIONS: categories
INHERITED_LABELS: environment, workload, app
- EDB_LICENSE_KEY:
```
## Defining an operator secret
@@ -109,8 +108,8 @@ kubectl delete pods -n [NAMESPACE_NAME_HERE] \
!!! Warning
Customizations will be applied only to `Cluster` resources created
- after the reload of the operator deployment.
+ after the reload of the operator deployment.
Following the above example, if the `Cluster` definition contains a `categories`
annotation and any of the `environment`, `workload`, or `app` labels, these will
-be inherited by all the resources generated by the deployment.
+be inherited by all the resources generated by the deployment.
\ No newline at end of file
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/postgresql_conf.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/postgresql_conf.mdx
index 5a381838d5d..f61f20d9e41 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/postgresql_conf.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/postgresql_conf.mdx
@@ -248,8 +248,8 @@ Fixed rules:
```text
local all all peer
-hostssl postgres streaming_replica all cert clientcert=1
-hostssl replication streaming_replica all cert clientcert=1
+hostssl postgres streaming_replica all cert
+hostssl replication streaming_replica all cert
```
Default rules:
@@ -263,8 +263,8 @@ The resulting `pg_hba.conf` will look like this:
```text
local all all peer
-hostssl postgres streaming_replica all cert clientcert=1
-hostssl replication streaming_replica all cert clientcert=1
+hostssl postgres streaming_replica all cert
+hostssl replication streaming_replica all cert
@@ -370,5 +370,4 @@ Users are not allowed to set the following configuration parameters in the
- `unix_socket_group`
- `unix_socket_permissions`
- `wal_level`
-- `wal_log_hints`
-
+- `wal_log_hints`
\ No newline at end of file
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/quickstart.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/quickstart.mdx
index fcd151e0b49..9ef9f846bc9 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/quickstart.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/quickstart.mdx
@@ -9,7 +9,7 @@ using Cloud Native PostgreSQL on a local Kubernetes cluster in
[Minikube](https://kubernetes.io/docs/setup/learning-environment/minikube/) or
[Kind](https://kind.sigs.k8s.io/).
-
+
!!! Tip "Live demonstration"
Don't want to install anything locally just yet? Try a demonstration directly in your browser:
@@ -17,6 +17,7 @@ using Cloud Native PostgreSQL on a local Kubernetes cluster in
[Cloud Native PostgreSQL Operator Interactive Quickstart](interactive_demo)
+
RedHat OpenShift Container Platform users can test the certified operator for
Cloud Native PostgreSQL on the [Red Hat CodeReady Containers (CRC)](https://developers.redhat.com/products/codeready-containers/overview)
for OpenShift.
@@ -182,4 +183,4 @@ spec:
as it might lead to unpredictable scenarios in terms of update
policies and version consistency in the cluster.
For strict deterministic and repeatable deployments, you can add the digests
- to the image name, through the `:@sha256:` format.
+ to the image name, through the `:@sha256:` format.
\ No newline at end of file
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/release_notes.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/release_notes.mdx
index 594ea0a32c7..0411d503f18 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/release_notes.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/release_notes.mdx
@@ -6,6 +6,59 @@ product: 'Cloud Native Operator'
History of user-visible changes for Cloud Native PostgreSQL.
+## Version 1.8.0
+
+**Release date:** 13 September 2021
+
+Features:
+
+- Bootstrap a new cluster via full or Point-In-Time Recovery directly from an
+ object store defined in the external cluster section, eliminating the
+ previous requirement to have a Backup CR defined
+- Introduce the `immediate` option in scheduled backups to request a backup
+ immediately after the first Postgres instance running, adding the capability
+ to rewind to the very beginning of a cluster when Point-In-Time Recovery is
+ configured
+- Add the `firstRecoverabilityPoint` in the cluster status to report the oldest
+ consistent point in time to request a recovery based on the backup object
+ store’s content
+- Enhance the default Prometheus exporter for a PostgreSQL instance by exposing
+ the following new metrics:
+
+ 1. number of WAL files and computed total size on disk
+ 2. number of `.ready` and `.done` files in the archive status folder
+ 3. flag for replica mode
+ 4. number of requested minimum/maximum synchronous replicas, as well as
+ the expected and actually observed ones
+
+- Add support for the `runonserver` option when defining custom metrics in the
+ Prometheus exporter to limit the collection of a metric to a range of
+ PostgreSQL versions
+- Natively support Azure Blob Storage for backup and recovery, by taking
+ advantage of the feature introduced in Barman 2.13 for Barman Cloud
+- Rely on `pg_isready` for the liveness probe
+- Support RFC3339 format for timestamp specification in recovery target times
+- Introduce `.spec.imagePullPolicy` to control the pull policy of image
+ containers for all pods and jobs created for a cluster
+- Add support for OpenShift 4.8, which replaces OpenShift 4.5
+- Support PostgreSQL 14 (beta)
+- Enhance the replica cluster feature with cross-cluster replication from an
+ object store defined in an external cluster section, without requiring a
+ streaming connection (experimental)
+- Introduce `logLevel` option to the cluster's spec to specify one of the
+ following levels: error, info, debug or trace
+
+Security Enhancements:
+
+- Introduce `.spec.enableSuperuserAccess` to enable/disable network access with the
+ `postgres` user through password authentication
+- Enable specification of a license key in a secret with `spec.licenseKeySecret`
+
+Fixes:
+
+- Properly inform users when a cluster enters an unrecoverable state and
+ requires human intervention
+
## Version 1.7.1
**Release date:** 11 August 2021
@@ -298,4 +351,4 @@ Kubernetes with the following main capabilities:
- Full recovery and point-in-time recovery from an S3 compatible object store backup
- Support for synchronous replicas
- Support for node affinity via `nodeSelector` property
-- Standard output logging of PostgreSQL error messages
+- Standard output logging of PostgreSQL error messages
\ No newline at end of file
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/replication.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/replication.mdx
index 10d7bdcbb69..bbe7476ff73 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/replication.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/replication.mdx
@@ -5,16 +5,16 @@ product: 'Cloud Native Operator'
---
Physical replication is one of the strengths of PostgreSQL and one of the
-reasons why some of the world's largest organizations in the world have chosen
+reasons why some of the world's largest organizations in the world have chosen
it for the management of their data in business continuity contexts.
-Primarily used to achieve high availability, physical replication also allows
-scale-out of read-only workloads and offloading some work from the primary.
+Primarily used to achieve high availability, physical replication also allows
+scale-out of read-only workloads and offloading some work from the primary.
## Application-level replication
-Having contributed throughout the years to the replication feature in PostgreSQL,
-we have decided to build high availability in Cloud Native PostgreSQL on top of
-the native physical replication technology and integrate it
+Having contributed throughout the years to the replication feature in PostgreSQL,
+we have decided to build high availability in Cloud Native PostgreSQL on top of
+the native physical replication technology and integrate it
directly in the Kubernetes API.
In Kubernetes terms, this is referred to as **application-level replication**, in
@@ -23,7 +23,7 @@ contrast with *storage-level replication*.
## A very mature technology
PostgreSQL has a very robust and mature native framework for replicating data
-from the primary instance to one or more replicas which is built around the
+from the primary instance to one or more replicas, built around the
concept of transactional changes continuously stored in the WAL (Write Ahead Log).
Started as the evolution of crash recovery and point in time recovery
@@ -32,13 +32,15 @@ technologies, physical replication was first introduced in PostgreSQL 8.2
continuous recovery.
PostgreSQL 9.0 (2010) enhanced it with WAL streaming and read-only replicas via
-*hot standby*, while 9.1 (2011) introduced synchronous replication at
+*hot standby*, while 9.1 (2011) introduced synchronous replication at the
transaction level (for RPO=0 clusters). Cascading replication was released with
PostgreSQL 9.2 (2012). The foundations of logical replication were laid in
PostgreSQL 9.4, while version 10 (2017) introduced native support for the
publisher/subscriber pattern to replicate data from an origin to a destination.
-## Streaming replication support
+## Replication within a PostgreSQL cluster
+
+### Streaming replication support
At the moment, Cloud Native PostgreSQL natively and transparently manages
physical streaming replicas within a cluster in a declarative way, based on
@@ -67,8 +69,8 @@ excerpt taken from `pg_hba.conf`:
```
# Require client certificate authentication for the streaming_replica user
-hostssl postgres streaming_replica all cert clientcert=1
-hostssl replication streaming_replica all cert clientcert=1
+hostssl postgres streaming_replica all cert
+hostssl replication streaming_replica all cert
```
!!! Seealso "Certificates"
@@ -77,14 +79,14 @@ hostssl replication streaming_replica all cert clientcert=1
in the documentation.
-## Continuous backup integration
+### Continuous backup integration
In case continuous backup is configured in the cluster, Cloud Native PostgreSQL
transparently configures replicas to take advantage of `restore_command` when
in continuous recovery. As a result, PostgreSQL is able to use the WAL archive
as a fallback option everytime pulling WALs via streaming replication fails.
-## Synchronous replication
+### Synchronous replication
Cloud Native PostgreSQL supports configuration of **quorum-based synchronous
streaming replication** via two configuration options called `minSyncReplicas`
@@ -127,3 +129,53 @@ requested number of synchronous standbys in the list*.
synchronous replication only in clusters with 3+ instances or,
more generally, when `maxSyncReplicas < (instances - 1)`.
+## Replication from an external PostgreSQL cluster
+
+Cloud Native PostgreSQL relies on the foundations of the PostgreSQL replication
+framework even when a PostgreSQL cluster is created from an existing one (source)
+and kept synchronized through the
+[replica cluster](architecture.md#multi-cluster-deployments) feature. The source
+can be a primary cluster or another replica cluster (cascading replica cluster).
+
+The available options in terms of replication, both at bootstrap and continuous
+recovery level, are:
+
+- use streaming replication between the replica cluster and the source
+ (this will certainly require some administrative and security related
+ work to be done to make sure that the network connection between the
+ two clusters is correctly setup)
+- use a Barman Cloud object store for recovery of the base backups and
+ the WAL files that are regularly shipped from the source to the object
+ store and pulled by `barman-cloud-wal-restore` in the replica cluster
+- any of the two
+
+All you have to do is actually define an external cluster.
+Please refer to the ["Bootstrap" section](bootstrap.md#bootstrap-from-another-cluster)
+for information on how to clone a PostgreSQL server using either
+`pg_basebackup` (streaming) or `recovery` (object store).
+
+If the external cluster contains a `barmanObjectStore` section:
+
+- you'll be able to boostrap the replica cluster from an object store
+ using the `recovery` section
+- Cloud Native PostgreSQL will automatically set the `restore_command`
+ in the designated primary instance
+
+If the external cluster contains a `connectionParameters` section:
+
+- you'll be able to boostrap the replica cluster via streaming replication
+ using the `pg_basebackup` section
+- Cloud Native PostgreSQL will automatically set the `primary_conninfo`
+ option in the designated primary instance, so that a WAL receiver
+ process is started to connect to the source cluster and receive data
+
+You have full flexibility and freedom to decide your favourite
+distributed architecture for a PostgreSQL database, by choosing:
+
+- a private cloud spanning over multiple Kubernetes clusters in different data
+ centers
+- a public cloud spanning over multiple Kubernetes clusters in different
+ regions
+- a mix of the previous two (hybrid)
+- a public cloud spanning over multiple Kubernetes clusters in different
+ regions and on different Cloud Service Providers
\ No newline at end of file
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/resource_management.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/resource_management.mdx
index 9fd4381fbe9..daec60d53cf 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/resource_management.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/resource_management.mdx
@@ -48,7 +48,7 @@ while creating a cluster:
- Specify your required values for memory and CPU in the resources section of the manifest file.
This way, you can avoid the `OOM Killed` (where "OOM" stands for Out Of Memory) and `CPU throttle` or any other
- resources related issues on running instances.
+ resource-related issues on running instances.
- For your cluster's pods to get assigned to the "Guaranteed" QoS class, you must set limits and requests
for both memory and CPU to the same value.
- Specify your required PostgreSQL memory parameters consistently with the pod resources (as you would do
@@ -98,4 +98,4 @@ section in the PostgreSQL documentation.
!!! Seealso "Managing Compute Resources for Containers"
For more details on resource management, please refer to the
["Managing Compute Resources for Containers"](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/)
- page from the Kubernetes documentation.
+ page from the Kubernetes documentation.
\ No newline at end of file
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/rolling_update.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/rolling_update.mdx
index b6fb6400a8b..60d33f862e5 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/rolling_update.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/rolling_update.mdx
@@ -43,4 +43,4 @@ re-clone the data. Pods will be deleted and created again with the same PVCs.
During the rolling update procedure, the services endpoints move to reflect
the cluster's status, so the applications ignore the node that
-is updating.
+is updating.
\ No newline at end of file
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/samples.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/samples.mdx
index 3c3da7196ca..1ee3ab398e7 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/samples.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/samples.mdx
@@ -19,4 +19,4 @@ In this section, you can find some examples of configuration files to set up you
* [`cluster-example-full.yaml`](../samples/cluster-example-full.yaml):
an example of `Cluster` that sets most of the available options.
-For a list of available options, please refer to the ["API Reference" page](api_reference.md).
+For a list of available options, please refer to the ["API Reference" page](api_reference.md).
\ No newline at end of file
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-example-full.yaml b/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-example-full.yaml
index 1948023f23a..ac8d711cbbd 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-example-full.yaml
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-example-full.yaml
@@ -5,7 +5,8 @@
apiVersion: v1
data:
- password: cGFzc3dvcmQ=
+ password: VHhWZVE0bk44MlNTaVlIb3N3cU9VUlp2UURhTDRLcE5FbHNDRUVlOWJ3RHhNZDczS2NrSWVYelM1Y1U2TGlDMg==
+ username: YXBw
kind: Secret
metadata:
name: cluster-example-app-user
@@ -13,7 +14,8 @@ type: kubernetes.io/basic-auth
---
apiVersion: v1
data:
- password: cGFzc3dvcmQ=
+ password: dU4zaTFIaDBiWWJDYzRUeVZBYWNCaG1TemdxdHpxeG1PVmpBbjBRSUNoc0pyU211OVBZMmZ3MnE4RUtLTHBaOQ==
+ username: cG9zdGdyZXM=
kind: Secret
metadata:
name: cluster-example-superuser
@@ -33,7 +35,7 @@ metadata:
name: cluster-example-full
spec:
description: "Example of cluster"
- imageName: quay.io/enterprisedb/postgresql:13.3
+ imageName: quay.io/enterprisedb/postgresql:13.4
# imagePullSecret is only required if the images are located in a private registry
# imagePullSecrets:
# - name: private_registry_access
@@ -54,8 +56,8 @@ spec:
bootstrap:
initdb:
- database: appdb
- owner: appuser
+ database: app
+ owner: app
secret:
name: cluster-example-app-user
# Alternative bootstrap method: start from a backup
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-example-monitoring.yaml b/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-example-monitoring.yaml
index 722b9a5916b..0844cfcfbcb 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-example-monitoring.yaml
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-example-monitoring.yaml
@@ -26,11 +26,20 @@ metadata:
data:
custom-queries: |
pg_replication:
- query: "SELECT CASE WHEN NOT pg_is_in_recovery() THEN 0 ELSE GREATEST (0, EXTRACT(EPOCH FROM (now() - pg_last_xact_replay_timestamp()))) END AS lag"
+ query: "SELECT CASE WHEN NOT pg_is_in_recovery()
+ THEN 0
+ ELSE GREATEST (0,
+ EXTRACT(EPOCH FROM (now() - pg_last_xact_replay_timestamp())))
+ END AS lag,
+ pg_is_in_recovery() AS in_recovery"
metrics:
- lag:
usage: "GAUGE"
description: "Replication lag behind primary in seconds"
+ - in_recovery:
+ usage: "GAUGE"
+ description: "Whether the instance is in recovery"
+
pg_postmaster:
query: "SELECT pg_postmaster_start_time as start_time_seconds from pg_postmaster_start_time()"
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-example-secret.yaml b/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-example-secret.yaml
index 991261172d5..f6f6ea16802 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-example-secret.yaml
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-example-secret.yaml
@@ -7,8 +7,8 @@ spec:
bootstrap:
initdb:
- database: appdb
- owner: appuser
+ database: app
+ owner: app
secret:
name: cluster-example-app-user
@@ -20,6 +20,7 @@ spec:
---
apiVersion: v1
data:
+ username: YXBw
password: cGFzc3dvcmQ=
kind: Secret
metadata:
@@ -28,6 +29,7 @@ type: kubernetes.io/basic-auth
---
apiVersion: v1
data:
+ username: cG9zdGdyZXM=
password: cGFzc3dvcmQ=
kind: Secret
metadata:
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-replica-async.yaml b/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-replica-async.yaml
new file mode 100644
index 00000000000..feaa30fc991
--- /dev/null
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-replica-async.yaml
@@ -0,0 +1,30 @@
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Cluster
+metadata:
+ name: cluster-replica-async
+spec:
+ instances: 3
+
+ bootstrap:
+ recovery:
+ source: postgresql-bkp
+
+ replica:
+ enabled: true
+ source: postgresql-bkp
+
+ storage:
+ size: 1Gi
+
+ externalClusters:
+ - name: postgresql-bkp
+ barmanObjectStore:
+ destinationPath: s3://backups/
+ endpointURL: http://minio:9000
+ s3Credentials:
+ accessKeyId:
+ name: minio
+ key: ACCESS_KEY_ID
+ secretAccessKey:
+ name: minio
+ key: ACCESS_SECRET_KEY
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-replica-restore.yaml b/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-replica-restore.yaml
new file mode 100644
index 00000000000..7520d1ba385
--- /dev/null
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-replica-restore.yaml
@@ -0,0 +1,44 @@
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Cluster
+metadata:
+ name: cluster-replica-from-restore
+spec:
+ instances: 3
+
+ bootstrap:
+ recovery:
+ source: postgresql-bkp
+
+ replica:
+ enabled: true
+ source: postgresql-bkp
+
+ storage:
+ size: 1Gi
+
+ externalClusters:
+ - name: postgresql-bkp
+ barmanObjectStore:
+ destinationPath: s3://backups/
+ endpointURL: http://minio:9000
+ s3Credentials:
+ accessKeyId:
+ name: minio
+ key: ACCESS_KEY_ID
+ secretAccessKey:
+ name: minio
+ key: ACCESS_SECRET_KEY
+ connectionParameters:
+ host: postgresql-bkp-rw.default.svc
+ user: streaming_replica
+ sslmode: verify-full
+ dbname: postgres
+ sslKey:
+ name: postgresql-bkp-replication
+ key: tls.key
+ sslCert:
+ name: postgresql-bkp-replication
+ key: tls.crt
+ sslRootCert:
+ name: postgresql-bkp-ca
+ key: ca.crt
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-restore-external-cluster.yaml b/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-restore-external-cluster.yaml
new file mode 100644
index 00000000000..1331832d5e9
--- /dev/null
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-restore-external-cluster.yaml
@@ -0,0 +1,26 @@
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Cluster
+metadata:
+ name: cluster-restore
+spec:
+ instances: 3
+
+ storage:
+ size: 5Gi
+
+ bootstrap:
+ recovery:
+ source: postgresql-bkp
+
+ externalClusters:
+ - name: postgresql-bkp
+ barmanObjectStore:
+ destinationPath: s3://backups/
+ endpointURL: http://minio:9000
+ s3Credentials:
+ accessKeyId:
+ name: minio
+ key: ACCESS_KEY_ID
+ secretAccessKey:
+ name: minio
+ key: ACCESS_SECRET_KEY
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cnp-basic-monitoring.yaml b/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cnp-basic-monitoring.yaml
index 75c6a220c4f..7435fd73447 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cnp-basic-monitoring.yaml
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cnp-basic-monitoring.yaml
@@ -79,7 +79,8 @@ data:
query: |
SELECT datname
, pg_database_size(datname) AS size_bytes
- , age(datfrozenxid) AS age
+ , age(datfrozenxid) AS xid_age
+ , mxid_age(datminmxid) AS mxid_age
FROM pg_database
metrics:
- datname:
@@ -88,9 +89,12 @@ data:
- size_bytes:
usage: "GAUGE"
description: "Disk space used by the database"
- - age:
+ - xid_age:
usage: "GAUGE"
description: "Number of transactions from the frozen XID to the current one"
+ - mxid_age:
+ usage: "GAUGE"
+ description: "Number of multiple transactions (Multixact) from the frozen XID to the current one"
pg_postmaster:
query: |
@@ -102,11 +106,19 @@ data:
description: "Time at which postgres started (based on epoch)"
pg_replication:
- query: "SELECT CASE WHEN NOT pg_is_in_recovery() THEN 0 ELSE GREATEST (0, EXTRACT(EPOCH FROM (now() - pg_last_xact_replay_timestamp()))) END AS lag"
+ query: "SELECT CASE WHEN NOT pg_is_in_recovery()
+ THEN 0
+ ELSE GREATEST (0,
+ EXTRACT(EPOCH FROM (now() - pg_last_xact_replay_timestamp())))
+ END AS lag,
+ pg_is_in_recovery() AS in_recovery"
metrics:
- lag:
usage: "GAUGE"
description: "Replication lag behind primary in seconds"
+ - in_recovery:
+ usage: "GAUGE"
+ description: "Whether the instance is in recovery"
pg_replication_slots:
query: "SELECT slot_name, database, active, pg_wal_lsn_diff(pg_current_wal_lsn(), restart_lsn) FROM pg_replication_slots"
@@ -472,9 +484,11 @@ data:
pg_settings:
query: |
- SELECT name, setting
+ SELECT name,
+ CASE setting WHEN 'on' THEN '1' WHEN 'off' THEN '0' ELSE setting END AS setting
FROM pg_settings
- WHERE vartype NOT IN ('string', 'enum', 'bool')
+ WHERE vartype IN ('integer', 'real', 'bool')
+ ORDER BY 1
metrics:
- name:
usage: "LABEL"
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/scheduling.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/scheduling.mdx
index b8cd2ceeb41..897fbe5bc79 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/scheduling.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/scheduling.mdx
@@ -10,7 +10,7 @@ the best node possible, based on several criteria.
!!! Seealso "Kubernetes documentation"
Please refer to the
[Kubernetes documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/)
- for more information on scheduling, including all the available policies. In
+ for more information on scheduling, including all the available policies. On
this page we assume you are familiar with concepts like affinity,
anti-affinity, node selectors, and so on.
@@ -62,7 +62,7 @@ metadata:
name: cluster-example
spec:
instances: 3
- imageName: quay.io/enterprisedb/postgresql:13.3
+ imageName: quay.io/enterprisedb/postgresql:13.4
affinity:
enablePodAntiAffinity: true #default value
@@ -87,7 +87,7 @@ available (which is an expected condition when using
for automated horizontal scaling of a Kubernetes cluster).
!!! Seealso "Inter-pod affinity and anti-affinity"
- More information on this topic in the
+ More information on this topic is in the
[Kubernetes documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity).
Another possible value for `topologyKey` in a cloud environment can be
@@ -109,7 +109,7 @@ if enabled, or passed transparently otherwise.
You have to pass to `additionalPodAntiAffinity` or `additionalPodAffinity`
the whole content of `podAntiAffinity` or `podAffinity` that is expected by the
Pod spec (please look at the following YAML as an example of having only one
- instance of PostgreSQL running on every worker node, regardless which
+ instance of PostgreSQL running on every worker node, regardless of which
PostgreSQL cluster they belong to).
```yaml
@@ -136,16 +136,16 @@ on nodes that have those labels.
## Tolerations
-Kubernetes allows you to specify, through `taints`, whether a node should repel
+Kubernetes allows you to specify (through `taints`) whether a node should repel
all pods not explicitly tolerating (through `tolerations`) their `taints`.
-So, by setting a proper set `tolerations` for a workload matching a specific
-node's `taints`, Kubernetes scheduler will take into consideration also the
-tainted node, while deciding on which node to schedule the new workload.
+So, by setting a proper set of `tolerations` for a workload matching a specific
+node's `taints`, Kubernetes scheduler will now take into consideration the
+tainted node, while deciding on which node to schedule the workload.
Tolerations can be configured for all the pods of a Cluster through the
`.spec.affinity.tolerations` section, which accepts the usual Kubernetes syntax
for tolerations.
!!! Seealso "Taints and Tolerations"
More information on taints and tolerations can be found in the
- [Kubernetes documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/).
+ [Kubernetes documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/).
\ No newline at end of file
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/security.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/security.mdx
index f811f77de14..75eb54d0bbe 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/security.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/security.mdx
@@ -10,7 +10,7 @@ that are analyzed at 3 different layers: Code, Container and Cluster.
!!! Warning
The information contained in this page must not exonerate you from
performing regular InfoSec duties on your Kubernetes cluster. Please
- familiarize with the ["Overview of Cloud Native Security"](https://kubernetes.io/docs/concepts/security/overview/)
+ familiarize yourself with the ["Overview of Cloud Native Security"](https://kubernetes.io/docs/concepts/security/overview/)
page from the Kubernetes documentation.
!!! Seealso "About the 4C's Security Model"
@@ -64,7 +64,7 @@ The following guidelines and frameworks have been taken into account for contain
!!! Seealso "About the Container level security"
Please refer to ["Security and Containers in Cloud Native PostgreSQL"](https://www.enterprisedb.com/blog/security-and-containers-cloud-native-postgresql)
blog article for more information about the approach that EDB has taken on
- security at container level in Cloud Native PostgreSQL.
+ security at the container level in Cloud Native PostgreSQL.
## Cluster
@@ -139,7 +139,17 @@ operand | 5432 | PostgreSQL instance | `postgresql` | o
The current implementation of Cloud Native PostgreSQL automatically creates
passwords and `.pgpass` files for the `postgres` superuser and the database owner.
-See the ["Secrets" section in the "Architecture" page](architecture.md#secrets).
+
+You can disable management of the `postgres` user password via secrets by setting
+`enableSuperuserAccess` to `false`.
+
+!!! Note
+ The operator supports toggling the `enableSuperuserAccess` option. When you
+ disable it on a running cluster, the operator will ignore the content of the secret,
+ remove it (if previously generated by the operator) and set the password of the
+ `postgres` user to `NULL` (de facto disabling remote access through password authentication).
+
+See the ["Secrets" section in the "Architecture" page](architecture.md#secrets) for more information.
You can use those files to configure application access to the database.
@@ -158,4 +168,4 @@ For further detail on how `pg_hba.conf` is managed by the operator, see the
["PostgreSQL Configuration" page](postgresql_conf.md#the-pg_hba-section) of the documentation.
!!! Important
- Examples assume that the Kubernetes cluster runs in a private and secure network.
+ Examples assume that the Kubernetes cluster runs in a private and secure network.
\ No newline at end of file
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/ssl_connections.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/ssl_connections.mdx
index 0067917f96e..983806dc0f4 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/ssl_connections.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/ssl_connections.mdx
@@ -9,13 +9,13 @@ product: 'Cloud Native Operator'
page for more details on how Cloud Native PostgreSQL supports TLS certificates.
The Cloud Native PostgreSQL operator has been designed to work with TLS/SSL for both encryption in transit and
-authentication, on server and client sides. Clusters created using the CNP operator come with a Certification
+authentication, on server and client sides. Clusters created using the CNP operator comes with a Certification
Authority (CA) to create and sign TLS client certificates. Through the `cnp` plugin for `kubectl` you can
issue a new TLS client certificate which can be used to authenticate a user instead of using passwords.
Please refer to the following steps to authenticate via TLS/SSL certificates, which assume you have
installed a cluster using the [cluster-example.yaml](../samples/cluster-example.yaml) deployment manifest.
-According to the convention over configuration paradigm, that file automatically creates a `app` database
+According to the convention over configuration paradigm, that file automatically creates an `app` database
which is owned by a user called `app` (you can change this convention through the `initdb` configuration
in the `bootstrap` section).
@@ -125,8 +125,8 @@ spec:
This Pod will mount secrets managed by the Cloud Native PostgreSQL operator, including:
-* `sslcert`: the TLS client public certificate
-* `sslkey`: the TLS client certificate private key
+* `sslcert`: the TLS client public certificate
+* `sslkey`: the TLS client certificate private key
* `sslrootcert`: the TLS Certification Authority certificate, that signed the certificate on
the server to be used to verify the identity of the instances
@@ -167,7 +167,7 @@ Output :
version
--------------------------------------------------------------------------------------
------------------
-PostgreSQL 13.3 on x86_64-pc-linux-gnu, compiled by gcc (GCC) 8.3.1 20191121 (Red Hat
+PostgreSQL 13.4 on x86_64-pc-linux-gnu, compiled by gcc (GCC) 8.3.1 20191121 (Red Hat
8.3.1-5), 64-bit
(1 row)
-```
+```
\ No newline at end of file
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/storage.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/storage.mdx
index 2762184465c..0f1469cee26 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/storage.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/storage.mdx
@@ -62,7 +62,7 @@ Briefly, `cnp-bench` is designed to operate at two levels:
(when we don't have the luxury anymore to run this kind of tests). Databases indeed
change and evolve over time, so does the distribution of data, potentially affecting
performance: knowing the theoretical maximum throughput of sequential reads or
- writes will turn out to be extremely useful in those situations. Especially in
+ writes will turn out to be extremely useful in those situations. Especially in
shared-nothing contexts, where results do not vary due to the influence of external workloads.
**Know your system, benchmark it.**
@@ -89,7 +89,7 @@ spec:
Using the previous configuration, the generated PVCs will be satisfied by the default storage
class. If the target Kubernetes cluster has no default storage class, or even if you need your PVCs
-to satisfied by a known storage class, you can set it into the custom resource:
+to be satisfied by a known storage class, you can set it into the custom resource:
```yaml
apiVersion: postgresql.k8s.enterprisedb.io/v1
@@ -172,7 +172,7 @@ cluster-example-2 1/1 Running 0 2m22s
cluster-example-3 1/1 Running 0 2m10s
```
-An Azure disk can only be expanded while in "unattached" state, as described in the
+An Azure disk can only be expanded while in "unattached" state, as described in the
[docs](https://github.com/kubernetes-sigs/azuredisk-csi-driver/blob/master/docs/known-issues/sizegrow.md).
This means, that to resize a disk used by a PostgresSQL cluster, you will need to perform a manual rollout,
first cordoning the node that hosts the Pod using the PVC bound to the disk. This will prevent the Operator
@@ -241,8 +241,8 @@ kubectl get pvc cluster-example-3 -o=jsonpath='{.status.capacity.storage}'
So, you can repeat these steps for the remaining Pods.
!!! Important
- Please leave the resizing of the disk associated with the primary instance as last disk,
- after promoting through a switchover a new resized Pod, using `kubectl cnp promote`
+ Please leave the resizing of the disk associated with the primary instance as last disk,
+ after promoting through a switchover a new resized Pod, using `kubectl cnp promote`
(e.g. `kubectl cnp promote cluster-example 3` to promote `cluster-example-3` to primary).
### Recreating storage
@@ -299,4 +299,4 @@ cluster-example-1 1/1 Running 0 5m58s
cluster-example-2 1/1 Running 0 5m43s
cluster-example-4-join-v2bfg 0/1 Completed 0 17s
cluster-example-4 1/1 Running 0 10s
-```
+```
\ No newline at end of file
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/use_cases.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/use_cases.mdx
index 862ace48153..dc97c35df1e 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/use_cases.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/use_cases.mdx
@@ -47,4 +47,4 @@ resource in Kubernetes.
The application can still benefit from a TLS connection to PostgreSQL.
-![Application outside Kubernetes](./images/apps-outside-k8s.png)
+![Application outside Kubernetes](./images/apps-outside-k8s.png)
\ No newline at end of file
diff --git a/advocacy_docs/partner_docs/GuideTemplate.zip b/advocacy_docs/partner_docs/GuideTemplate.zip
deleted file mode 100644
index 08c893df777..00000000000
--- a/advocacy_docs/partner_docs/GuideTemplate.zip
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:86efd5ea0d1cc926a397dfe1c04d5eabe31888ffbe1bdb3f0eefcff62c314074
-size 60547
diff --git a/advocacy_docs/partner_docs/GuideTemplate/02-PartnerInformation.mdx b/advocacy_docs/partner_docs/GuideTemplate/02-PartnerInformation.mdx
deleted file mode 100644
index 364f2a967e9..00000000000
--- a/advocacy_docs/partner_docs/GuideTemplate/02-PartnerInformation.mdx
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: 'Partner Information'
-description: 'Providing a general overview of (company) and the (product)'
-
----
-| | |
-| ----------- | ----------- |
-| **Partner Name** | |
-| **Partner Product** | |
-| **Web Site** | |
-| **Version & Platform** | |
-| **Product Description** | |
-| **Version & Platform** | |
\ No newline at end of file
diff --git a/advocacy_docs/partner_docs/GuideTemplate/03-SolutionSummary.mdx b/advocacy_docs/partner_docs/GuideTemplate/03-SolutionSummary.mdx
deleted file mode 100644
index 8a0cabb6504..00000000000
--- a/advocacy_docs/partner_docs/GuideTemplate/03-SolutionSummary.mdx
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: 'Solution Summary'
-description: 'A brief explanation of the solution and its purpose'
----
-(Insert explanation of solution and diagram of solution below)
-
-
-
-
-
-
-
-!!! Note
- EDB Postgres Extended with BDR (Bi-Directional Replication)
\ No newline at end of file
diff --git a/advocacy_docs/partner_docs/GuideTemplate/04-Implementing.mdx b/advocacy_docs/partner_docs/GuideTemplate/04-Implementing.mdx
deleted file mode 100644
index 6645f073345..00000000000
--- a/advocacy_docs/partner_docs/GuideTemplate/04-Implementing.mdx
+++ /dev/null
@@ -1,18 +0,0 @@
----
-title: 'Implementing (Product)'
-description: 'A walk-through of setting up (blank)'
----
-
-(Insert implementation instructions here):
-
-1.
-
-2.
-
-3.
-
-### Prerequisites
-
-### Configuring (Product Name)
-
-### Installing (Product Name)
\ No newline at end of file
diff --git a/advocacy_docs/partner_docs/GuideTemplate/05-Using.mdx b/advocacy_docs/partner_docs/GuideTemplate/05-Using.mdx
deleted file mode 100644
index 5f72da64e60..00000000000
--- a/advocacy_docs/partner_docs/GuideTemplate/05-Using.mdx
+++ /dev/null
@@ -1,7 +0,0 @@
----
-title: 'Using (Product)'
-description: 'Walking through multiple different instances of (insert product name) in use'
----
-
-### Sample User Scenarios
-(Insert product use cases here)
\ No newline at end of file
diff --git a/advocacy_docs/partner_docs/GuideTemplate/06-CertificationEnvironment.mdx b/advocacy_docs/partner_docs/GuideTemplate/06-CertificationEnvironment.mdx
deleted file mode 100644
index 8f4e940fd0a..00000000000
--- a/advocacy_docs/partner_docs/GuideTemplate/06-CertificationEnvironment.mdx
+++ /dev/null
@@ -1,27 +0,0 @@
----
-title: 'Certification Environment'
-description: 'Providing a general overview of the certification environment used in the implementation of (product)'
----
-
-| | |
-| ----------- | ----------- |
-| **Certification Test Date** | |
-| **EDB Postgres Advanced** | |
-| **OS** | |
-| **Memory** | |
-| **Processor** | |
-| **Cloud Platform** | |
-| **CPU(s)** | |
-| **Core(s) per socket** | |
-| **Socket(s)** | |
-| **Storage** | |
-| **(Partner Product Name)** | |
-
-
-| | |
-| ----------- | ----------- |
-| **Certification Test Date**| |
-| **BDR-Always-ON**| |
-| **OS**| |
-| **Cloud Platform**| |
-| **Deployment Tool**| |
diff --git a/advocacy_docs/partner_docs/GuideTemplate/Images/EDBPartnerProgram.png b/advocacy_docs/partner_docs/GuideTemplate/Images/EDBPartnerProgram.png
deleted file mode 100644
index 561e41d0203..00000000000
--- a/advocacy_docs/partner_docs/GuideTemplate/Images/EDBPartnerProgram.png
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:74e7165b91030cd99c07756c267aa7593e6b3690fc2c10316b30420081b4def5
-size 57611
diff --git a/advocacy_docs/partner_docs/GuideTemplate/Images/PlaceholderImage.png b/advocacy_docs/partner_docs/GuideTemplate/Images/PlaceholderImage.png
deleted file mode 100644
index ed0f5d23f07..00000000000
--- a/advocacy_docs/partner_docs/GuideTemplate/Images/PlaceholderImage.png
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:80afea08e91f596792ceabb2abb7811fa22a4e2497532525ec4deff9cf27180d
-size 1182
diff --git a/advocacy_docs/partner_docs/LiquibaseGuide/02-PartnerInformation.mdx b/advocacy_docs/partner_docs/LiquibaseGuide/02-PartnerInformation.mdx
new file mode 100644
index 00000000000..9a69f231cc1
--- /dev/null
+++ b/advocacy_docs/partner_docs/LiquibaseGuide/02-PartnerInformation.mdx
@@ -0,0 +1,12 @@
+---
+title: 'Partner Information'
+description: 'Overview of Liquibase and the Liquibase Pro product'
+
+---
+| | |
+| ----------- | ----------- |
+| **Partner Name** | Liquibase |
+| **Partner Product** | Liquibase Pro |
+| **Web Site** | https://www.liquibase.com |
+| **Version & Platform** | Liquibase Pro 4.3.3: CentOS7 |
+| **Product Description** | Liquibase is a database-independent library for tracking, managing and applying database schema changes. |
diff --git a/advocacy_docs/partner_docs/LiquibaseGuide/03-SolutionSummary.mdx b/advocacy_docs/partner_docs/LiquibaseGuide/03-SolutionSummary.mdx
new file mode 100644
index 00000000000..093fb61a2bf
--- /dev/null
+++ b/advocacy_docs/partner_docs/LiquibaseGuide/03-SolutionSummary.mdx
@@ -0,0 +1,11 @@
+---
+title: 'Solution Summary'
+description: 'Brief explanation of the solution and its purpose'
+---
+Easily track, version, and deploy EDB Postgres Advanced schema changes with Liquibase. Liquibase enables your team to deploy safer, faster, automated database releases across all your environments. Liquibase integrates with most application build and deployment tools to help track, version, and deploy EDB Postgres Advanced database changes.
+
+The desired changes are applied on EDB Postgres Advanced using Liquibase changesets. The details of the changes can be stored on the Liquibase Hub to provide analysis of the changes.
+
+
+
+
\ No newline at end of file
diff --git a/advocacy_docs/partner_docs/LiquibaseGuide/04-Configuration.mdx b/advocacy_docs/partner_docs/LiquibaseGuide/04-Configuration.mdx
new file mode 100644
index 00000000000..603d3e1ab87
--- /dev/null
+++ b/advocacy_docs/partner_docs/LiquibaseGuide/04-Configuration.mdx
@@ -0,0 +1,41 @@
+---
+title: 'Configurating the Integration'
+description: 'Walkthrough of configuring the Liquibase Pro integration'
+---
+
+Implementing Liquibase with EDB Postgres Advanced requires the following components:
+
+- EDB Postgres Advanced
+- Liquibase software
+
+## Prerequisites
+
+- A running EDB Postgres Advanced (EPAS) instance
+- User account created on Liquibase Hub
+- Liquibase Pro (CLI) installed
+
+## Configure Liquibase for EDB Postgres Advanced
+
+1. Download the postgresql JAR from https://jdbc.postgresql.org/download.html.
+2. Move the postgresql JAR to the Liquibase Pro directory.
+3. Log in to your Liquibase Hub account and select the **Settings** icon on the left side of the page to access the API key.
+4. Copy the API key, which connects the information generated by your changelogs and other operations to your Liquibase Hub projects.
+
+
+
+
+
+
+5. Create a liquibase.properties file in the Liquibase Pro directory to contain:
+
+ - Driver class path
+
+ - Database connection string
+
+ - User authentication information for the database you want to capture
+
+ - Liquibase Pro license key
+
+ - Liquibase Hub API key
+
+See a sample [liquibase.properties](07-Appendix.mdx) file in the appendix for configuring Liquibase for EDB Postgres Advanced.
\ No newline at end of file
diff --git a/advocacy_docs/partner_docs/LiquibaseGuide/05-IntegrationViews.mdx b/advocacy_docs/partner_docs/LiquibaseGuide/05-IntegrationViews.mdx
new file mode 100644
index 00000000000..a8328f8f197
--- /dev/null
+++ b/advocacy_docs/partner_docs/LiquibaseGuide/05-IntegrationViews.mdx
@@ -0,0 +1,159 @@
+---
+title: 'Using Liquibase'
+description: 'Walkthroughs of Liquibase usage scenarios'
+---
+
+Liquibase is a development tool that allows changes to be applied to the EDB database using the Liquibase CLI and viewed on the Liquibase Hub.
+
+### Creating a Project on Liquibase Hub
+
+Use the following steps to create a project for the target database instance on the Liquibase Hub. All data related to the target database is stored in this project.
+
+1. Log in to the Liquibase Hub console.
+
+2. Select the **Projects** menu.
+
+3. Create a Project on the Liquibase Hub by selecting **Create Project**.
+
+4. On the **Create Project** page, enter the name and description of the project.
+
+5. Select **Create Project**.
+
+
+
+
+
+### Applying Database Changes
+
+This section provides examples of applying database changes using Liquibase changesets including:
+
+- [Updating tables](#updating-tables)
+
+- [Rolling back changes](#rolling-back-changes)
+
+- [Viewing database changes on Liquibase Hub](#viewing-database-changes-on-liquibase-hub)
+
+
+Refer to the [Liquibase documentation](https://docs.liquibase.com/change-types/home.html) for available change types.
+
+!!! Note
+ All Liquibase commands in the examples are executed from the directory where Liquibase Pro is installed.
+
+The initial database objects and data for these examples are created using this sample script:
+
+```bash
+CREATE SEQUENCE sal_seq MINVALUE 1 START WITH 1 INCREMENT BY 1 NOCACHE;
+
+CREATE TABLE tp_sales_db (
+salesman_id INT4,
+salesman_name VARCHAR2(30),
+sales_region VARCHAR2(30),
+sales_amount INT4 DEFAULT sal_seq.nextval,
+deptno INT4
+);
+
+CREATE TABLE tp_department_db
+(
+deptno INT4 Primary Key,
+dname VARCHAR(50),
+location VARCHAR(100)
+);
+INSERT INTO tp_sales_db VALUES (100,';Person 1';,';CITY 1';,DEFAULT,10);
+INSERT INTO tp_department_db VALUES (10,';Development';,';Pakistan';);
+```
+
+#### Updating Tables
+1. Create a changelog file using one of the following options for creating a changelog file:
+
+ - Create the file manually. See the following changelog file example. For detailed information on changelogs, refer to the [changelog documentation](https://docs.liquibase.com/concepts/basic/xml-format.html) from Liquibase.
+
+ ```bash
+ xml version="1.0" encoding="UTF-8"?>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ deptno=30
+
+
+ deptno=10
+
+
+DELETE FROM public.tp_department_db WHERE deptno=20;
+DELETE FROM public.tp_department_db WHERE deptno=30;
+INSERT INTO public.tp_department_db (deptno, dname, location) VALUES ('10',
+'Development', 'Pakistan');
+
+
+
+ ```
+ - Generate a sample file and update it with your changes. Generate the sample changelog file using this command:
+
+ `./liquibase --changeLogFile=edb_dbchangelog.xml generateChangeLog`
+
+!!! Note
+ Before the command is run Liquibase will take a snapshot of the database, which is an essential step in the process.
+
+2. For each database change, add a changeset entry to the changelog file. For detailed information on changesets, refer to the [changeset documentation](https://docs.liquibase.com/concepts/basic/changeset.html?__hstc=128893969.8ca9a9f8d7d5d8d684aac6cd417ffd04.1625139651397.1625246103494.1625257119630.4&__hssc=128893969.1.1625257119630&__hsfp=3718144884&_ga=2.245054725.1619392786.1625139438-28195040.1625139438&_gac=1.123026681.1625233402.Cj0KCQjw8vqGBhC_ARIsADMSd1AVyJsGu4_9-E-Pvh8OdNFqVt5qHR8FHhUyvRnYbA2ODKYlPHr3ujcaAijVEALw_wcB) from Liquibase.
+
+3. To register the changelog with the Liquibase Hub and provide the project name, execute this command:
+
+ `./liquibase registerChangeLog`
+
+Sample output:
+
+
+
+
+
+4. To update the table in the target database, use this command:
+
+ `./liquibase update`
+
+#### Rolling Back Changes
+
+Roll back changes made to the table using the `rollbackCount` command. For example, this command uses the sample changelog file:
+
+`./liquibase rollbackCount 1`
+
+#### Viewing Database Changes On Liquibase Hub
+
+To view the details of the changes made on the target database, select a ChangeLog link. For example, select dbchangelog.xml to see its details.
+
+
+
+
+
+The diagram below shows the details for the selected changeset.
+
+
+
+
\ No newline at end of file
diff --git a/advocacy_docs/partner_docs/LiquibaseGuide/06-CertificationEnvironment.mdx b/advocacy_docs/partner_docs/LiquibaseGuide/06-CertificationEnvironment.mdx
new file mode 100644
index 00000000000..b7162b4ff6e
--- /dev/null
+++ b/advocacy_docs/partner_docs/LiquibaseGuide/06-CertificationEnvironment.mdx
@@ -0,0 +1,18 @@
+---
+title: 'Certification Environment'
+description: 'Overview of the certification environment used in the implementation of Liquibase Pro'
+---
+
+| | |
+| ----------- | ----------- |
+| **Certification Test Date** | June 7, 2021 |
+| **EDB Postgres Advanced** | 12, 13 |
+| **OS** | CentOS 7 |
+| **Memory** | 2GB |
+| **Processor** | Intel® Xeon® Processor SP Family (“Skylake”) |
+| **Cloud Platform** | OpenStack |
+| **CPU(s)** | 1 |
+| **Core(s) per socket** | 1 |
+| **Socket(s)** | 1 |
+| **Storage** | 80 GB |
+| **Liquibase Pro** | 4.3.3 |
\ No newline at end of file
diff --git a/advocacy_docs/partner_docs/LiquibaseGuide/07-Appendix.mdx b/advocacy_docs/partner_docs/LiquibaseGuide/07-Appendix.mdx
new file mode 100644
index 00000000000..437e6ce02cc
--- /dev/null
+++ b/advocacy_docs/partner_docs/LiquibaseGuide/07-Appendix.mdx
@@ -0,0 +1,25 @@
+---
+title: 'Appendix'
+description: 'Sample properties file'
+---
+
+Sample `liquibase.properties` file:
+
+```yaml
+changeLogFile: dbchangelog.xml
+url: jdbc:postgresql://localhost:5444/edb
+username: enterprisedb
+password:
+classpath: postgresql-42.2.19.jar
+liquibaseProLicenseKey:
+ABwwGgQUWXKThTXmiJzs0Uhagk5wFFn9TooCAgQAGR6jLSVLG/DVclZ0X+qZ6jAofJeDn4kpOzG1oSeJ
+WRF8H+KmSa5cflPe+aTxEmVzvO1DrsDnsPpRHfkF7uBHBxS9eGQg1SU3NZh8reXguOelVMEO7ry2ar2g
+8d6B76dPHdTBGUrd6kJPEg6vipWjl0UybmXKw/L7bdEHdkw6WPTf0XAUYfvH1IBoJnO40M3EovGP0BIZ
+uPZsnPLrB6HX1f620A5i7I69EXn4Q4/lZTNRMearRjOMX2EwohrFSECPVerWOvD371iiHlrTH+uBG7/Z
+kLRBZ++zTntSTmTOB5Fdtqec3E87meIX0kXc3jSD76tXTX4aG5WPklEqIsmmdvE+w+R5fd5GhIuiaVoM
+vl6Yyhtlb9V2YQk1oJQFsBm/KlV88WdwKB2BEHST7z5bNOpja2UIwEKKHYnRayhAvfKzf3ciZ7b1Sp/8
+rGGOvMFlHkm7LP7GISM0MLVUSAvVaH3q4IK/SYkHpNxPRAc8T4qptd4UxBGinNyORKNWtyOHpIWu6wAm
+OhGSpp7Nkglj1fpjzZs+U2fqFxGA6NffNB7ysJDzNQr5ePTmVyQJgb6FkQ4LG+lw1J8CmNkK7SG4BXne
+bM4ruyYYjTSVMKy4OiLwEhBjplZsSO5AGdpgQPP5ognnauuXgqtnq6uxr8simg==
+liquibase.hub.ApiKey: 8KhNHK_IOkVfBDpHkOKbRrHrlxNNkhYOdw55ZbQ1nxQ
+```
diff --git a/advocacy_docs/partner_docs/LiquibaseGuide/Images/Configuration.png b/advocacy_docs/partner_docs/LiquibaseGuide/Images/Configuration.png
new file mode 100644
index 00000000000..f7ea7282844
--- /dev/null
+++ b/advocacy_docs/partner_docs/LiquibaseGuide/Images/Configuration.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:84d8b2fb74f649e91f9ad82954db7b61a955482505048dfadfa9458fff40ebaa
+size 174609
diff --git a/advocacy_docs/partner_docs/LiquibaseGuide/Images/ConfigurationPrerequisites1.png b/advocacy_docs/partner_docs/LiquibaseGuide/Images/ConfigurationPrerequisites1.png
new file mode 100644
index 00000000000..2cecc702dcb
--- /dev/null
+++ b/advocacy_docs/partner_docs/LiquibaseGuide/Images/ConfigurationPrerequisites1.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8b7ff44294323688df29ab9a92d6b6348c83e3d6c1d78172f45f1c657361cdb5
+size 45103
diff --git a/advocacy_docs/partner_docs/LiquibaseGuide/Images/IntegrationViews1.png b/advocacy_docs/partner_docs/LiquibaseGuide/Images/IntegrationViews1.png
new file mode 100644
index 00000000000..c1e7e8b4164
--- /dev/null
+++ b/advocacy_docs/partner_docs/LiquibaseGuide/Images/IntegrationViews1.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f7ced32cf17e8eb3c00350a9d4d389b8502c1d65583287f439f7656e459ed1ea
+size 54813
diff --git a/advocacy_docs/partner_docs/LiquibaseGuide/Images/IntegrationViews2.png b/advocacy_docs/partner_docs/LiquibaseGuide/Images/IntegrationViews2.png
new file mode 100644
index 00000000000..df457df5c1b
--- /dev/null
+++ b/advocacy_docs/partner_docs/LiquibaseGuide/Images/IntegrationViews2.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:18c83f833bb38cd3bc57a52961debfc7ac472533afa28211a72126e43f88a929
+size 45075
diff --git a/advocacy_docs/partner_docs/LiquibaseGuide/Images/IntegrationViews3.png b/advocacy_docs/partner_docs/LiquibaseGuide/Images/IntegrationViews3.png
new file mode 100644
index 00000000000..411ceca650a
--- /dev/null
+++ b/advocacy_docs/partner_docs/LiquibaseGuide/Images/IntegrationViews3.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c796a4c5403d18a2e457f71f8bdd427d8d36dcc49171830f7d70c45965407a5f
+size 485371
diff --git a/advocacy_docs/partner_docs/LiquibaseGuide/Images/IntegrationViews4.png b/advocacy_docs/partner_docs/LiquibaseGuide/Images/IntegrationViews4.png
new file mode 100644
index 00000000000..a921112c2ae
--- /dev/null
+++ b/advocacy_docs/partner_docs/LiquibaseGuide/Images/IntegrationViews4.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b2ba96f2d892898d7b0fac2c061fe961fc9e5c6532d8974632a0861e9a788be4
+size 128966
diff --git a/advocacy_docs/partner_docs/LiquibaseGuide/Images/IntegrationViews5.png b/advocacy_docs/partner_docs/LiquibaseGuide/Images/IntegrationViews5.png
new file mode 100644
index 00000000000..851c8727dfa
--- /dev/null
+++ b/advocacy_docs/partner_docs/LiquibaseGuide/Images/IntegrationViews5.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:566a42afdf0440353b622ba8051c1450e3aec3633f1d5b709694ba0e5560111c
+size 85852
diff --git a/advocacy_docs/partner_docs/LiquibaseGuide/Images/PartnerProgram.jpg.png b/advocacy_docs/partner_docs/LiquibaseGuide/Images/PartnerProgram.jpg.png
new file mode 100644
index 00000000000..a51f268a007
--- /dev/null
+++ b/advocacy_docs/partner_docs/LiquibaseGuide/Images/PartnerProgram.jpg.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6dddb2403778294d50b9c500a3b961fc5ed0aa764d4c425cd44c1c90193915e5
+size 9855
diff --git a/advocacy_docs/partner_docs/LiquibaseGuide/index.mdx b/advocacy_docs/partner_docs/LiquibaseGuide/index.mdx
new file mode 100644
index 00000000000..aea927de318
--- /dev/null
+++ b/advocacy_docs/partner_docs/LiquibaseGuide/index.mdx
@@ -0,0 +1,12 @@
+---
+title: 'Liquibase Implementation Guide'
+indexCards: simple
+directoryDefaults:
+ iconName: handshake
+---
+
+
+
+
+EDB GlobalConnect Technology Partner Implementation Guide
+Liquibase
\ No newline at end of file
diff --git a/advocacy_docs/partner_docs/NutanixGuide/02-PartnerInformation.mdx b/advocacy_docs/partner_docs/NutanixGuide/02-PartnerInformation.mdx
new file mode 100644
index 00000000000..2e8469463e9
--- /dev/null
+++ b/advocacy_docs/partner_docs/NutanixGuide/02-PartnerInformation.mdx
@@ -0,0 +1,15 @@
+---
+title: 'Partner Information'
+description: 'Overview of Nutanix and the Nutanix AHV product
+
+'
+
+---
+
+| | |
+| ----------- | ----------- |
+| **Partner Name** | Nutanix |
+| **Partner Product** | Nutanix AHV |
+| **Web Site** | [www.nutanix.com](www.nutanix.com) |
+| **Version & Platform** | 20170830.453, Available (Guest OS) platforms: Ubuntu, CentOS |
+| **Product Description** | AHV is the native Nutanix hypervisor that offers virtualization capabilities needed to deploy and manage enterprise applications. |
diff --git a/advocacy_docs/partner_docs/NutanixGuide/03-SolutionSummary.mdx b/advocacy_docs/partner_docs/NutanixGuide/03-SolutionSummary.mdx
new file mode 100644
index 00000000000..244d1ca0a4e
--- /dev/null
+++ b/advocacy_docs/partner_docs/NutanixGuide/03-SolutionSummary.mdx
@@ -0,0 +1,17 @@
+---
+title: 'Solution Summary'
+description: 'Brief explanation of the solution and its purpose'
+---
+
+EDB Postgres Advanced, EDB Failover Manager (EFM), Postgres Enterprise Manager (PEM), and Backup and
+Recovery Manager (Barman) can each be deployed on virtual machines created via the native Nutanix hypervisor,
+AHV or VMWare ESXi. AHV represents a unique approach to virtualization that offers powerful capabilities needed
+to deploy and manage enterprise applications. AHV complements the value of Hyper-converged infrastructure (HCI)
+by integrating native virtualization along with networking, infrastructure, and operations management within
+a single intuitive interface - Nutanix Prism.
+
+The following diagram shows a high-level architecture of the Nutanix platform:
+
+
+
+
diff --git a/advocacy_docs/partner_docs/NutanixGuide/04-Configuration.mdx b/advocacy_docs/partner_docs/NutanixGuide/04-Configuration.mdx
new file mode 100644
index 00000000000..bbbb77741d6
--- /dev/null
+++ b/advocacy_docs/partner_docs/NutanixGuide/04-Configuration.mdx
@@ -0,0 +1,63 @@
+---
+title: 'Configuring the Integration'
+description: 'Walkthrough of configuring the AHV integration '
+---
+
+Implementing EDB software on Nutanix AHV requires the following components:
+
+- EDB Postgres Advanced
+- Nutanix software
+
+Sample deployment:
+
+
+
+
+
+## Prerequisites
+
+- A running Nutanix cluster with AHV.
+- Access to the Prism web console.
+
+
+
+
+
+ For more details, read the [Prism Central Guide](https://portal.nutanix.com/page/documents/details?targetId=Prism-Central-Guide-Prism-v5_19:Prism-Central-Guide-Prism-v5_19).
+
+## Deploying VMs Using AHV
+
+To create a Virtual Machine (VM) via AHV:
+
+
+1. On Prism Central, select **Create VM**. Watch this [video](https://www.youtube.com/watch?v=q4wBewXfDs8) from Nutanix for more information.
+
+
+
+
+
+2. Enter the appropriate values for your configuration. For example, these are the specifications for a test environment:
+ ```
+ vCPU(s): 2
+ Memory: 4 GiB
+ Storage: DISK 50 GiB
+ Guest OS: CentOS7
+ ```
+
+
+!!! Note
+ Mount the CD-ROM with CentOS7 ISO available via the Image Service.
+
+
+
+
+
+3. Select **Save**.
+
+2. Install EDB Postgres Advanced. Refer to the [EDB Postgres Advanced documentation](https://www.enterprisedb.com/docs/epas/latest/).
+
+3. Install the other EDB tools needed for your configuration in the appropriate VMs. Refer to the [EDB documentation](https://www.enterprisedb.com/docs).
+
+
+
+
diff --git a/advocacy_docs/partner_docs/NutanixGuide/05-IntegrationViews.mdx b/advocacy_docs/partner_docs/NutanixGuide/05-IntegrationViews.mdx
new file mode 100644
index 00000000000..f0c08a8e41f
--- /dev/null
+++ b/advocacy_docs/partner_docs/NutanixGuide/05-IntegrationViews.mdx
@@ -0,0 +1,25 @@
+---
+title: 'Using the Integration'
+description: 'Overview of the integration'
+---
+
+Nutanix AHV hosts the virtual machines created so that you can deploy / redeploy them as needed.
+
+To use Nutanix AHV:
+
+1. Log in to Prism.
+
+1. Go to the **Table** tab where you can view the virtual machines deployed via Nutanix AHV. For example, this screenshot shows VMs that host the following EDB products:
+
+ - EDB Postgres Advanced (EPAS)
+ - EDB Failover Manager (EFM)
+ - Postgres Enterprise Manager (PEM)
+ - Backup Recovery Manager (Barman)
+
+
+
+
+
+
+!!! note
+ The screenshot contains information about our test environment and is not intended for a production environment.
diff --git a/advocacy_docs/partner_docs/NutanixGuide/06-CertificationEnvironment.mdx b/advocacy_docs/partner_docs/NutanixGuide/06-CertificationEnvironment.mdx
new file mode 100644
index 00000000000..e8aa5eda0d6
--- /dev/null
+++ b/advocacy_docs/partner_docs/NutanixGuide/06-CertificationEnvironment.mdx
@@ -0,0 +1,27 @@
+---
+title: 'Certification Environment'
+description: 'Overview of the certification environment used in the implementation of AHV'
+---
+
+## Hypervisor (AHV) 20170830.453 Test Environment
+| | |
+| ----------- | ----------- |
+| **Certification Test Date** | April 10 2021 |
+| **EDB Postgres Advanced** | 10,11,12,13 |
+| **Postgres Enterprise Manager** | 8.0 |
+| **EDB Failover Manager** | 4.1 |
+| **Memory** | 8GB |
+| **CPU(s)** | 2 |
+| **Platform** | Nutanix |
+
+
+## VMware ESXi 6.7.0.40000 Test Environment
+| | |
+| ----------- | ----------- |
+| **Certification Test Date** | April 10 2021 |
+| **EDB Postgres Advanced** | 10,11,12,13 |
+| **Postgres Enterprise Manager** | 8.0 |
+| **EDB Failover Manager** | 4.1 |
+| **Memory** | 8GB |
+| **CPU(s)** | 2 |
+| **Platform** | Nutanix |
diff --git a/advocacy_docs/partner_docs/NutanixGuide/07-Appendix.mdx b/advocacy_docs/partner_docs/NutanixGuide/07-Appendix.mdx
new file mode 100644
index 00000000000..4df76b51124
--- /dev/null
+++ b/advocacy_docs/partner_docs/NutanixGuide/07-Appendix.mdx
@@ -0,0 +1,9 @@
+---
+title: 'Appendix'
+description: 'Nutanix documentation'
+---
+
+For all things Nutanix, refer to the [Nutanix Bible.](https://www.nutanixbible.com/)
+
+!!! note
+ Testing of EDB products was also performed on VMs deployed using the VMWare ESXi hypervisor on the Nutanix platform.
\ No newline at end of file
diff --git a/advocacy_docs/partner_docs/NutanixGuide/Images/CreateaVM.png b/advocacy_docs/partner_docs/NutanixGuide/Images/CreateaVM.png
new file mode 100644
index 00000000000..b997d9e8b53
--- /dev/null
+++ b/advocacy_docs/partner_docs/NutanixGuide/Images/CreateaVM.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5a28128e1e8c8f29351d361f4ee63e202fa2b3c8c027884bc7e0e7ab7f43023c
+size 269890
diff --git a/advocacy_docs/partner_docs/NutanixGuide/Images/EDBProducts.png b/advocacy_docs/partner_docs/NutanixGuide/Images/EDBProducts.png
new file mode 100644
index 00000000000..da723066731
--- /dev/null
+++ b/advocacy_docs/partner_docs/NutanixGuide/Images/EDBProducts.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8c803551c0637700f0851e1c656b28f1086b8f415fa9b4f466a5aafd76b6796b
+size 238950
diff --git a/advocacy_docs/partner_docs/NutanixGuide/Images/IntegrationViews.png b/advocacy_docs/partner_docs/NutanixGuide/Images/IntegrationViews.png
new file mode 100644
index 00000000000..c3581b71eaf
--- /dev/null
+++ b/advocacy_docs/partner_docs/NutanixGuide/Images/IntegrationViews.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:38b8457525469bfa685df3bb3c66c922989987ecf7d45074fe4bb0355a71fbb7
+size 351636
diff --git a/advocacy_docs/partner_docs/NutanixGuide/Images/NutanixSolutionSummary.png b/advocacy_docs/partner_docs/NutanixGuide/Images/NutanixSolutionSummary.png
new file mode 100644
index 00000000000..fe0fa592059
--- /dev/null
+++ b/advocacy_docs/partner_docs/NutanixGuide/Images/NutanixSolutionSummary.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d8211910fb8f3802dbbe16347a7f4e618ec8c8215636748d1c7c5ba33debe5a6
+size 187297
diff --git a/advocacy_docs/partner_docs/NutanixGuide/Images/PartnerProgram.jpg.png b/advocacy_docs/partner_docs/NutanixGuide/Images/PartnerProgram.jpg.png
new file mode 100644
index 00000000000..93e0514710b
--- /dev/null
+++ b/advocacy_docs/partner_docs/NutanixGuide/Images/PartnerProgram.jpg.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1953f3a5526ab37279a598f1c370c5acbf9f6d18f7902cb538161182fbed3b1f
+size 57295
diff --git a/advocacy_docs/partner_docs/NutanixGuide/Images/PrismWebConsole.png b/advocacy_docs/partner_docs/NutanixGuide/Images/PrismWebConsole.png
new file mode 100644
index 00000000000..789f469cf7c
--- /dev/null
+++ b/advocacy_docs/partner_docs/NutanixGuide/Images/PrismWebConsole.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fe29edcc9dbbe333a46cb421392f8036d42b82c90972d10d5f91f186e077702c
+size 352860
diff --git a/advocacy_docs/partner_docs/NutanixGuide/Images/TestEnvironmentSpecifications.png b/advocacy_docs/partner_docs/NutanixGuide/Images/TestEnvironmentSpecifications.png
new file mode 100644
index 00000000000..5e4a462f46a
--- /dev/null
+++ b/advocacy_docs/partner_docs/NutanixGuide/Images/TestEnvironmentSpecifications.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3f2229d18af1ab06530d06a8e9c1c1d34a426695287c82a92b5e84e963ed436e
+size 166279
diff --git a/advocacy_docs/partner_docs/GuideTemplate/index.mdx b/advocacy_docs/partner_docs/NutanixGuide/index.mdx
similarity index 53%
rename from advocacy_docs/partner_docs/GuideTemplate/index.mdx
rename to advocacy_docs/partner_docs/NutanixGuide/index.mdx
index 12c031fdd91..17e534dfb1b 100644
--- a/advocacy_docs/partner_docs/GuideTemplate/index.mdx
+++ b/advocacy_docs/partner_docs/NutanixGuide/index.mdx
@@ -1,12 +1,12 @@
---
-title: '(Insert Company Name Here) Implementation Guide'
+title: 'Nutanix Implementation Guide'
indexCards: simple
directoryDefaults:
iconName: handshake
---
-
+
EDB GlobalConnect Technology Partner Implementation Guide
-(Insert Product Name Here)
\ No newline at end of file
+Nutanix AHV
\ No newline at end of file
diff --git a/advocacy_docs/partner_docs/RepostorGuide/02-PartnerInformation.mdx b/advocacy_docs/partner_docs/RepostorGuide/02-PartnerInformation.mdx
new file mode 100644
index 00000000000..e6074c70fc8
--- /dev/null
+++ b/advocacy_docs/partner_docs/RepostorGuide/02-PartnerInformation.mdx
@@ -0,0 +1,14 @@
+---
+title: 'Partner Information'
+description: 'Overview of Repostor and the Data Protector for PostgreSQL product'
+
+---
+
+| | |
+| ----------- | ----------- |
+| **Partner Name** | Repostor |
+| **Partner Product** | Data Protector for PostgreSQL |
+| **Web Site** | www.repostor.com |
+| **Version & Platform** | 5.0.0.0-96 https://www.repostor.com/downloadpage/ https://www.repostor.com/products/#data |
+| **Product Description** | Data Protector allows you to do fast, hot online backups in conjecture with IBM Spectrum Protect. After a full backup has been taken, an incremental backup handles the changes that have occurred to the database. |
+| **Date** | April 2, 2021 |
\ No newline at end of file
diff --git a/advocacy_docs/partner_docs/RepostorGuide/03-SolutionSummary.mdx b/advocacy_docs/partner_docs/RepostorGuide/03-SolutionSummary.mdx
new file mode 100644
index 00000000000..fe2ad7e95c4
--- /dev/null
+++ b/advocacy_docs/partner_docs/RepostorGuide/03-SolutionSummary.mdx
@@ -0,0 +1,18 @@
+---
+title: 'Solution Summary'
+description: 'Brief explanation of the solution and its purpose'
+---
+
+The Repostor Data Protector for PostgreSQL (RDP) is an integration between EDB Postgres Advanced and IBM Spectrum
+Protect. It enables backup and restore to Spectrum Protect and to have WAL logs archived instantly to IBM
+Spectrum Protect. It also enables backup and restore for the database administrators without the need for them
+to have Spectrum Protect knowledge. The current version of the RDP uses high level calls to standard PostgreSQL
+tools such as psql, pg_dump, pg_restore, pg_start_backup, pg_stop_backup.
+
+The RDP supports backup on two levels, database level and instance level. Backup on the database level uses the pg_dump tool.
+
+The backup on the instance level uses file backup of the data_directory and any external tablespace locations. This requires activation of WAL archiving and the `archive_command` set to run the RDP tool logwriter.
+
+
+
+
\ No newline at end of file
diff --git a/advocacy_docs/partner_docs/RepostorGuide/04-Configuration.mdx b/advocacy_docs/partner_docs/RepostorGuide/04-Configuration.mdx
new file mode 100644
index 00000000000..90a0d172dfd
--- /dev/null
+++ b/advocacy_docs/partner_docs/RepostorGuide/04-Configuration.mdx
@@ -0,0 +1,165 @@
+---
+title: 'Configuring the Integration'
+description: 'Walkthrough of configuring the Data Protector for PostgreSQL integration'
+---
+
+## Prerequisites
+
+- The database host with the EDB Postgres Advanced environment needs the following components:
+
+ - IBM supported operating system for Spectrum Protect clients.
+
+ - Spectrum Protect BA client (used for regular file backup).
+
+ - Spectrum Protect API client.
+
+ - EDB Postgres Advanced version 9.6 or above.
+
+ - Repostor Data Protector client.
+
+
+- A PostgreSQL user needs to be defined for use with the RDP.
+
+ - This user needs to be able to connect to EDB Postgres Advanced and have sufficient permissions for
+database backup and restore.
+
+ - This user needs access to local Spectrum Protect files to read configuration files and to write to log files.
+
+ !!! Note
+ If the user is not the same user that owns the PostgreSQL server process, make sure that the server process owner has the correct SP client file access so they can execute `archive_command`.
+
+- A Spectrum Protect node needs to be defined in the Spectrum Protect server environment in a
+management class that suits the needs of the DBA team.
+
+- The Spectrum Protect client details for setting up connection to Spectrum Protect server needs to be
+available including the client Spectrum Protect password.
+
+
+## Installation and configuration of Repostor Data Protector for PostgreSQL
+
+The high-level steps for installing and configuring the integration are:
+
+1. [Configure local Spectrum Protect configuration files.](/#configure-local-spectrum-protect-configuration-files)
+1. [Install Repostor Data Protector client.](#install-repostor-data-protector-client)
+1. [Verify the Connection to PostgreSQL psql.](/#verify-the-connection-to-postgresql-psql)
+1. [Configure PostgreSQL `archive_command` to use logwriter.](#configure-postgresql-archive_command-to-use-logwriter)
+1. [Set up a backup script.](#set-up-a-backup-script)
+
+### Configure the Local Spectrum Protect Configuration Files
+
+Configure the local Spectrum Protect configuration files with details needed to connect to
+Spectrum Protect server:
+
+1. Create Spectrum Protect options file with the logical servername.
+
+2. Edit the `dsm.sys` file by adding a section for the new logical servername for this Spectrum Protect node with
+connection details.
+
+3. Create soft links from the API directory to the ba clients bin directory, for `dsm.sys` and `dsm.opt`. This example uses `dsm.postgres.opt` Spectrum Protect options filename:
+
+ ```
+ ln -s /opt/tivoli/tsm/client/ba/bin/dsm.sys
+ /opt/tivoli/tsm/client/api/bin64/dsm.sys
+ ln -s /opt/tivoli/tsm/client/ba/bin/dsm.postgres.opt
+ /opt/tivoli/tsm/client/api/bin64/dsm.opt
+ ```
+
+ Here is the listing (`ls -l` command) from API directory for this example showing the links:
+
+ ```
+ dsm.opt -> /opt/tivoli/tsm/client/ba/bin/dsm.postgres.opt
+ dsm.sys -> /opt/tivoli/tsm/client/ba/bin/dsm.sys
+ ```
+
+4. Set the `DSMI_CONFIG` variable for the OS user that runs the RDP tools. This is preferably the same user
+that owns the PostgreSQL server process.
+
+5. Verify that the OS user can connect to Spectrum Protect server. This also verifies the correct file access to local SP files.
+
+ ```
+ dsmc q session -se=XXX
+ ```
+
+ where ‘XXX’ is your logical servername.
+
+
+### Install the Repostor Data Protector Client
+
+1. Verify that IBM Spectrum Protect clients for API and BA are already installed. For example:
+
+ ```
+ rpm -aq | grep TIV
+ TIVsm-API64-8.1.8-0.x86_64
+ TIVsm-BA-8.1.8-0.x86_64
+ ```
+
+2. Install RDP (verify that you have latest version on www.repostor.com):
+ ```
+ rpm -ivh rdp4Postgres-4.4.4.0-31.x86_64
+ ```
+ If you are on Ubuntu, you need to prepare the package with the `alien` tool:
+
+ ```
+ sudo apt-get install alien
+ sudo alien rdp4Postgres-4.4.4.0-31.x86_64.rpm
+ sudo dpkg -I rdp4Postgres-4.4.4.0-31.deb
+ ```
+
+
+3. Install the license file. The `license.dat` file should be placed in the Repostor `/opt/repostor/rdp4Postgres/etc` directory. If no
+license is available yet a trial license is automatically generated the first time a backup is run. This
+trial license needs to be cleared with a special `UNLOCK` key before changing to a contract license.
+
+4. Add the Repostor `bin` directory to the PATH. All users that run RDP commands need to have the PATH set to include the Repostor `bin` directory.
+The path is `/opt/repostor/rdp4Postgres/bin`.
+
+### Verify the Connection to PostgreSQL psql
+
+The user used with the `-u` option to the RDP commands needs to be able to connect to PostgreSQL and be
+allowed to backup/restore databases.
+
+For example, to verify the connection for user `enterprisedb`:
+
+```
+ psql -U enterprisedb -l
+```
+### Configure PostgreSQL `archive_command` to Use logwriter
+
+If your PostgreSQL environment has WAL activated and you plan to backup PostgreSQL on the instance
+level (`-f` option with RDP), then you need to configure the PostgreSQL ‘archive_command’ to run the
+RDP `logwriter.script`.
+
+For example, this is a sample specification of the `archive_command` in the `postgresql.conf` file:
+```
+ archive_command =
+ ‘/opt/repostor/rdp4Postgres/bin/logwriter.script -v -S instName
+ -s “%p” -d %f’
+```
+!!! note
+ The instance name you specify with the `-S` option needs to be the same as the one you use with the RDP `postgresbackup`
+ command. The `logwriter.script` is a script that calls the logwriter binary. It is a script to allow for local configuration if you want
+ to set a specific environment before running the logwriter.
+
+### Set up a Backup Script
+
+If you run `postgresbackup` from a script that initiated from Spectrum Protect scheduler, you should set `LOGNAME`.
+
+Note that the PATHs and filenames are unique to each installation in this exampple:
+
+```
+ #!/bin/bash
+ #
+
+ ### Set PATH & DSMI_CONFIG
+ export PATH=$PATH:/opt/repostor/rdp4Postgres/bin
+ export DSMI_CONFIG=/opt/tivoli/tsm/client/ba/bin/dsm.postgres.opt
+
+ ### Set LOGNAME
+ export LOGNAME=enterprisedb
+
+ ### Run postgresbackup command, send output to logfile under /tmp
+ postgresbackup -u enterprisedb -f -z -v >/tmp/postgresbackup.log 2>&1
+```
+
+
+
diff --git a/advocacy_docs/partner_docs/RepostorGuide/04a-Using.mdx b/advocacy_docs/partner_docs/RepostorGuide/04a-Using.mdx
new file mode 100644
index 00000000000..1746a964e1a
--- /dev/null
+++ b/advocacy_docs/partner_docs/RepostorGuide/04a-Using.mdx
@@ -0,0 +1,55 @@
+---
+title: 'Using the Integration'
+description: 'Overview of the integration'
+---
+The current RDP version is a command-line client. There are three commands that the user runs
+(`postgresbackup`, `postgresquery` and `postgresrestore`) and there are the logwriter and logreader tools that are
+automatically called by PostgreSQL during execution of `archive_command` and `restore_command`.
+
+The follwong screenshots are examples of daily operation tasks. For details and more examples, see chapter 5 in the *RDP User Guide*.
+
+
+
+## Instance Level Backup Example
+
+
+
+
+
+## Query of Available Instance Backups on the Spectrum Protect Server Example
+
+
+
+
+
+## Instance Restore Example
+
+
+
+
+
+## Database Level Backup Example
+
+
+
+
+
+## Query of Available Backups on the Spectrum Protect Server Example
+
+
+
+
+
+## Restore of Database Level Backup Example
+
+Dropping database for visibility only.
+
+
+
+
+
+## Redirected Restore of Database level Backup Example
+
+
+
+
\ No newline at end of file
diff --git a/advocacy_docs/partner_docs/RepostorGuide/05-CertificationEnvironment.mdx b/advocacy_docs/partner_docs/RepostorGuide/05-CertificationEnvironment.mdx
new file mode 100644
index 00000000000..1ec514f760d
--- /dev/null
+++ b/advocacy_docs/partner_docs/RepostorGuide/05-CertificationEnvironment.mdx
@@ -0,0 +1,14 @@
+---
+title: 'Certification Environment'
+description: 'Overview of the certification environment used in the implementation of the Repostor Data Protector for PostgreSQL product'
+---
+
+| | |
+| ----------- | ----------- |
+| **Certification Test Date**| April 6, 2021 |
+| **OS** | CentOS Linux 7.9.2009 |
+| **IBM Tivoli Storage Manager client API**| TIVsm-API64-8.1.8-0.x86_64 |
+| **IBM Tivoli Storage Manager client BA**| TIVsm-BA-8.1.8-0.x86_64 |
+| **IBM Tivoli Storage Manager Server**| 8.1.8.000 |
+| **Repostor Data Protector for PostgreSQL**| 5.0.0.0-96 |
+| **EDB Postgres Advanced** | 13 |
\ No newline at end of file
diff --git a/advocacy_docs/partner_docs/RepostorGuide/Images/AvailableBackups.png b/advocacy_docs/partner_docs/RepostorGuide/Images/AvailableBackups.png
new file mode 100644
index 00000000000..3ccefb86bf0
--- /dev/null
+++ b/advocacy_docs/partner_docs/RepostorGuide/Images/AvailableBackups.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:af6a615590d07eb1ab5c15924519630ee43dfa8deadea8abdf06f467528a9402
+size 152561
diff --git a/advocacy_docs/partner_docs/RepostorGuide/Images/DatabaseLevelBackup.png b/advocacy_docs/partner_docs/RepostorGuide/Images/DatabaseLevelBackup.png
new file mode 100644
index 00000000000..fffa643f480
--- /dev/null
+++ b/advocacy_docs/partner_docs/RepostorGuide/Images/DatabaseLevelBackup.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:982840e4c5eef22e30b8e66482a414e9173f6c93c4247f10e6da138de239321e
+size 307709
diff --git a/advocacy_docs/partner_docs/RepostorGuide/Images/InstanceLevelBackup.png b/advocacy_docs/partner_docs/RepostorGuide/Images/InstanceLevelBackup.png
new file mode 100644
index 00000000000..7f7cb00e015
--- /dev/null
+++ b/advocacy_docs/partner_docs/RepostorGuide/Images/InstanceLevelBackup.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:139ae787cb9090f2bb85b19c1b624847692639d967ad0f660d33ab31fe5595f6
+size 418744
diff --git a/advocacy_docs/partner_docs/RepostorGuide/Images/InstanceRestore.png b/advocacy_docs/partner_docs/RepostorGuide/Images/InstanceRestore.png
new file mode 100644
index 00000000000..f671e923560
--- /dev/null
+++ b/advocacy_docs/partner_docs/RepostorGuide/Images/InstanceRestore.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d640e15d05119875a79a60b1ce1fa646f2269eebaeea8347ee96e9d838b7dda2
+size 341088
diff --git a/advocacy_docs/partner_docs/RepostorGuide/Images/PartnerProgram.png b/advocacy_docs/partner_docs/RepostorGuide/Images/PartnerProgram.png
new file mode 100644
index 00000000000..93e0514710b
--- /dev/null
+++ b/advocacy_docs/partner_docs/RepostorGuide/Images/PartnerProgram.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1953f3a5526ab37279a598f1c370c5acbf9f6d18f7902cb538161182fbed3b1f
+size 57295
diff --git a/advocacy_docs/partner_docs/RepostorGuide/Images/QueryofAvailableInstanceBackups.png b/advocacy_docs/partner_docs/RepostorGuide/Images/QueryofAvailableInstanceBackups.png
new file mode 100644
index 00000000000..0e27697f6d8
--- /dev/null
+++ b/advocacy_docs/partner_docs/RepostorGuide/Images/QueryofAvailableInstanceBackups.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a3c0dc23023e5f4502126e923f593660991ca2798f652142f26eaf28a3f03b1f
+size 101815
diff --git a/advocacy_docs/partner_docs/RepostorGuide/Images/RedirectedRestore.png b/advocacy_docs/partner_docs/RepostorGuide/Images/RedirectedRestore.png
new file mode 100644
index 00000000000..33d0329777e
--- /dev/null
+++ b/advocacy_docs/partner_docs/RepostorGuide/Images/RedirectedRestore.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:31395f319f188839c82ee733c9a65a959f5a6f6d1e664434ef3e4e05150310ab
+size 388466
diff --git a/advocacy_docs/partner_docs/RepostorGuide/Images/RepostorSolutionSummary.png b/advocacy_docs/partner_docs/RepostorGuide/Images/RepostorSolutionSummary.png
new file mode 100644
index 00000000000..e51312def67
--- /dev/null
+++ b/advocacy_docs/partner_docs/RepostorGuide/Images/RepostorSolutionSummary.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b08665dd8cfb59511192d243be5463fa8aec8f9f0cfb1fc4dd390513913ebbfb
+size 335368
diff --git a/advocacy_docs/partner_docs/RepostorGuide/Images/RestoreofDatabaseLevelBackup.png b/advocacy_docs/partner_docs/RepostorGuide/Images/RestoreofDatabaseLevelBackup.png
new file mode 100644
index 00000000000..aa30f42769d
--- /dev/null
+++ b/advocacy_docs/partner_docs/RepostorGuide/Images/RestoreofDatabaseLevelBackup.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:02a8ca47074995e6dd8b5f2c5f27071af39bc1c929744db93fa3b2da8e54c6be
+size 696767
diff --git a/advocacy_docs/partner_docs/RepostorGuide/index.mdx b/advocacy_docs/partner_docs/RepostorGuide/index.mdx
new file mode 100644
index 00000000000..d4375ed48db
--- /dev/null
+++ b/advocacy_docs/partner_docs/RepostorGuide/index.mdx
@@ -0,0 +1,12 @@
+---
+title: 'Repostor Implementation Guide'
+indexCards: simple
+directoryDefaults:
+ iconName: handshake
+---
+
+
+
+
+EDB GlobalConnect Technology Partner Implementation Guide
+Repostor Data Protector for PostgresSQL
\ No newline at end of file
diff --git a/merge_sources/kubernetes/cloud_native_postgresql/interactive_demo.mdx b/merge_sources/kubernetes/cloud_native_postgresql/interactive_demo.mdx
index 4e62638c45c..f61e2183700 100644
--- a/merge_sources/kubernetes/cloud_native_postgresql/interactive_demo.mdx
+++ b/merge_sources/kubernetes/cloud_native_postgresql/interactive_demo.mdx
@@ -2,7 +2,7 @@
title: "Installation, Configuration and Deployment Demo"
description: "Walk through the process of installing, configuring and deploying the Cloud Native PostgreSQL Operator via a browser-hosted Minikube console"
navTitle: Install, Configure, Deploy
-product: 'Cloud Native PostgreSQL Operator'
+product: 'Cloud Native Operator'
platform: ubuntu
tags:
- postgresql
@@ -65,7 +65,7 @@ You will see one node called `minikube`. If the status isn't yet "Ready", wait f
Now that the Minikube cluster is running, you can proceed with Cloud Native PostgreSQL installation as described in the ["Installation"](installation_upgrade.md) section:
```shell
-kubectl apply -f https://get.enterprisedb.io/cnp/postgresql-operator-1.7.1.yaml
+kubectl apply -f https://get.enterprisedb.io/cnp/postgresql-operator-1.8.0.yaml
__OUTPUT__
namespace/postgresql-operator-system created
customresourcedefinition.apiextensions.k8s.io/backups.postgresql.k8s.enterprisedb.io created
@@ -156,6 +156,7 @@ cluster-example-3 1/1 Running 0 19s
Now we can check the status of the cluster:
+
```shell
kubectl get cluster cluster-example -o yaml
__OUTPUT__
@@ -165,13 +166,13 @@ metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"postgresql.k8s.enterprisedb.io/v1","kind":"Cluster","metadata":{"annotations":{},"name":"cluster-example","namespace":"default"},"spec":{"instances":3,"primaryUpdateStrategy":"unsupervised","storage":{"size":"1Gi"}}}
- creationTimestamp: "2021-07-13T06:48:56Z"
+ creationTimestamp: "2021-09-14T00:13:02Z"
generation: 1
name: cluster-example
namespace: default
- resourceVersion: "2270"
+ resourceVersion: "2338"
selfLink: /apis/postgresql.k8s.enterprisedb.io/v1/namespaces/default/clusters/cluster-example
- uid: 405c5b71-6e9b-4baf-a186-2086bbb23a2b
+ uid: 47475a76-c5ee-442c-923b-927cf52f8a89
spec:
affinity:
podAntiAffinityType: preferred
@@ -180,11 +181,11 @@ spec:
initdb:
database: app
owner: app
- imageName: quay.io/enterprisedb/postgresql:13.3
+ imageName: quay.io/enterprisedb/postgresql:13.4
instances: 3
+ logLevel: info
postgresql:
parameters:
- cluster_name: ""
log_destination: csvlog
log_directory: /controller/log
log_filename: postgres
@@ -205,9 +206,9 @@ status:
certificates:
clientCASecret: cluster-example-ca
expirations:
- cluster-example-ca: 2022-07-13 06:43:56 +0000 UTC
- cluster-example-replication: 2022-07-13 06:43:56 +0000 UTC
- cluster-example-server: 2022-07-13 06:43:56 +0000 UTC
+ cluster-example-ca: 2021-12-13 00:08:03 +0000 UTC
+ cluster-example-replication: 2021-12-13 00:08:03 +0000 UTC
+ cluster-example-server: 2021-12-13 00:08:03 +0000 UTC
replicationTLSSecret: cluster-example-replication
serverAltDNSNames:
- cluster-example-rw
@@ -221,11 +222,12 @@ status:
- cluster-example-ro.default.svc
serverCASecret: cluster-example-ca
serverTLSSecret: cluster-example-server
+ configMapResourceVersion: {}
currentPrimary: cluster-example-1
healthyPVC:
- - cluster-example-3
- cluster-example-1
- cluster-example-2
+ - cluster-example-3
instances: 3
instancesStatus:
healthy:
@@ -236,7 +238,7 @@ status:
licenseStatus:
isImplicit: true
isTrial: true
- licenseExpiration: "2021-08-12T06:48:56Z"
+ licenseExpiration: "2021-10-14T00:13:02Z"
licenseStatus: Implicit trial license
repositoryAccess: false
valid: true
@@ -245,12 +247,12 @@ status:
readService: cluster-example-r
readyInstances: 3
secretsResourceVersion:
- applicationSecretVersion: "957"
- clientCaSecretVersion: "953"
- replicationSecretVersion: "955"
- serverCaSecretVersion: "953"
- serverSecretVersion: "954"
- superuserSecretVersion: "956"
+ applicationSecretVersion: "880"
+ clientCaSecretVersion: "876"
+ replicationSecretVersion: "878"
+ serverCaSecretVersion: "876"
+ serverSecretVersion: "877"
+ superuserSecretVersion: "879"
targetPrimary: cluster-example-1
writeService: cluster-example-rw
```
@@ -278,7 +280,7 @@ curl -sSfL \
sudo sh -s -- -b /usr/local/bin
__OUTPUT__
EnterpriseDB/kubectl-cnp info checking GitHub for latest tag
-EnterpriseDB/kubectl-cnp info found version: 1.7.1 for v1.7.1/linux/x86_64
+EnterpriseDB/kubectl-cnp info found version: 1.8.0 for v1.8.0/linux/x86_64
EnterpriseDB/kubectl-cnp info installed /usr/local/bin/kubectl-cnp
```
@@ -290,7 +292,7 @@ __OUTPUT__
Cluster in healthy state
Name: cluster-example
Namespace: default
-PostgreSQL Image: quay.io/enterprisedb/postgresql:13.3
+PostgreSQL Image: quay.io/enterprisedb/postgresql:13.4
Primary instance: cluster-example-1
Instances: 3
Ready instances: 3
@@ -325,7 +327,7 @@ __OUTPUT__
Failing over Failing over to cluster-example-2
Name: cluster-example
Namespace: default
-PostgreSQL Image: quay.io/enterprisedb/postgresql:13.3
+PostgreSQL Image: quay.io/enterprisedb/postgresql:13.4
Primary instance: cluster-example-2
Instances: 3
Ready instances: 2
@@ -346,7 +348,7 @@ __OUTPUT__
Cluster in healthy state
Name: cluster-example
Namespace: default
-PostgreSQL Image: quay.io/enterprisedb/postgresql:13.3
+PostgreSQL Image: quay.io/enterprisedb/postgresql:13.4
Primary instance: cluster-example-2
Instances: 3
Ready instances: 3
diff --git a/product_docs/docs/efm/4/efm_user/03_installing_efm.mdx b/product_docs/docs/efm/4/efm_user/03_installing_efm.mdx
index b8e516ca247..7aaa38c92e3 100644
--- a/product_docs/docs/efm/4/efm_user/03_installing_efm.mdx
+++ b/product_docs/docs/efm/4/efm_user/03_installing_efm.mdx
@@ -205,7 +205,7 @@ The following steps will walk you through using the EnterpriseDB apt repository
apt-get -y install edb-efm42
```
-### Ububtu Host
+### Ubuntu Host
1. Assume superuser privileges:
```text
@@ -300,7 +300,7 @@ After installing on each node of the cluster, you must:
1. Modify the [cluster properties file](04_configuring_efm/01_cluster_properties/#cluster_properties) on each node.
2. Modify the [cluster members file](04_configuring_efm/03_cluster_members/#cluster_members) on each node.
3. If applicable, configure and test virtual IP address settings and any scripts that are identified in the cluster properties file.
-4. Start the agent on each node of the cluster. For more information about controlling the service, see [Section 5](08_controlling_efm_service/#controlling-the-failover-manager-service).
+4. Start the agent on each node of the cluster. For more information, see [controlling the failover manager service](08_controlling_efm_service/#controlling-the-failover-manager-service).
### Installation Locations
diff --git a/product_docs/docs/efm/4/efm_user/12_upgrading_existing_cluster.mdx b/product_docs/docs/efm/4/efm_user/12_upgrading_existing_cluster.mdx
index c7c53c8b55e..35c35a25392 100644
--- a/product_docs/docs/efm/4/efm_user/12_upgrading_existing_cluster.mdx
+++ b/product_docs/docs/efm/4/efm_user/12_upgrading_existing_cluster.mdx
@@ -25,7 +25,7 @@ The efm `upgrade-conf` utility locates the `.properties` and `.nodes` files of p
!!! Note
`db.bin` is a required property. When modifying the properties file, ensure that the `db.bin` property specifies the location of the Postgres `bin` directory.
-4. If you are using Eager Failover, you must disable it before stopping the EFM cluster. For more information, see [Disabling Eager Failover](06_configuring_for_eager_failover/disabling_the_eager_failover).
+4. If you are using Eager Failover, you must disable it before stopping the EFM cluster. For more information, see [Disabling Eager Failover](04_configuring_efm/06_configuring_for_eager_failover/#disabling-eager-failover).
5. Use a version-specific command to stop the old Failover Manager cluster; for example, you can use the following command to stop a version 4.1 cluster:
diff --git a/product_docs/docs/efm/4/index.mdx b/product_docs/docs/efm/4/index.mdx
index 4b4676007fb..0c7ed23e73c 100644
--- a/product_docs/docs/efm/4/index.mdx
+++ b/product_docs/docs/efm/4/index.mdx
@@ -1,7 +1,7 @@
---
-title: "EDB Postgres Failover Manager"
+title: "Failover Manager"
directoryDefaults:
- description: "EDB Postgres Failover Manager Version 4.1 Documentation and release notes. PostgreSQL replication and failover manager for achieving high availability."
+ description: "Failover Manager documentation and release notes. PostgreSQL replication and failover manager for achieving high availability."
navigation:
- efm_rel_notes
- "#Getting Started"
diff --git a/product_docs/docs/migration_portal/3.3.0/01_whats_new.mdx b/product_docs/docs/migration_portal/3.3.0/01_whats_new.mdx
index 4d14cf8d244..49d3fa048a8 100644
--- a/product_docs/docs/migration_portal/3.3.0/01_whats_new.mdx
+++ b/product_docs/docs/migration_portal/3.3.0/01_whats_new.mdx
@@ -5,6 +5,9 @@ redirects:
- /migration_portal/3.1.0/01_whats_new/
- /migration_portal/3.2.0/01_whats_new/
+legacyRedirectsGenerated:
+ # This list is generated by a script. If you need add entries, use the `legacyRedirects` key.
+ - "/edb-docs/d/edb-postgres-migration-portal/user-guides/user-guide/3.0.1/whats_new.html"
---
diff --git a/product_docs/docs/migration_portal/3.3.0/03_mp_using_portal/01_mp_overview_home.mdx b/product_docs/docs/migration_portal/3.3.0/03_mp_using_portal/01_mp_overview_home.mdx
index 567fb396a3c..7c02719410c 100644
--- a/product_docs/docs/migration_portal/3.3.0/03_mp_using_portal/01_mp_overview_home.mdx
+++ b/product_docs/docs/migration_portal/3.3.0/03_mp_using_portal/01_mp_overview_home.mdx
@@ -6,6 +6,9 @@ redirects:
- /migration_portal/3.1.0/03_mp_using_portal/01_mp_overview_home/
- /migration_portal/3.2.0/03_mp_using_portal/01_mp_overview_home/
+legacyRedirectsGenerated:
+ # This list is generated by a script. If you need add entries, use the `legacyRedirects` key.
+ - "/edb-docs/d/edb-postgres-migration-portal/user-guides/user-guide/3.0.1/mp_overview_home.html"
---
diff --git a/product_docs/docs/migration_portal/3.3.0/03_mp_using_portal/02_mp_overview_project.mdx b/product_docs/docs/migration_portal/3.3.0/03_mp_using_portal/02_mp_overview_project.mdx
index 9545b08b17c..f2ad96a34b0 100644
--- a/product_docs/docs/migration_portal/3.3.0/03_mp_using_portal/02_mp_overview_project.mdx
+++ b/product_docs/docs/migration_portal/3.3.0/03_mp_using_portal/02_mp_overview_project.mdx
@@ -6,6 +6,9 @@ redirects:
- /migration_portal/3.1.0/03_mp_using_portal/02_mp_overview_project/
- /migration_portal/3.2.0/03_mp_using_portal/02_mp_overview_project/
+legacyRedirectsGenerated:
+ # This list is generated by a script. If you need add entries, use the `legacyRedirects` key.
+ - "/edb-docs/d/edb-postgres-migration-portal/user-guides/user-guide/3.0.1/mp_overview_project.html"
---
diff --git a/product_docs/docs/migration_portal/3.3.0/03_mp_using_portal/03_mp_overview_wiki.mdx b/product_docs/docs/migration_portal/3.3.0/03_mp_using_portal/03_mp_overview_wiki.mdx
index 79f28985ee1..9bb9547896c 100644
--- a/product_docs/docs/migration_portal/3.3.0/03_mp_using_portal/03_mp_overview_wiki.mdx
+++ b/product_docs/docs/migration_portal/3.3.0/03_mp_using_portal/03_mp_overview_wiki.mdx
@@ -6,6 +6,9 @@ redirects:
- /migration_portal/3.1.0/03_mp_using_portal/03_mp_overview_wiki/
- /migration_portal/3.2.0/03_mp_using_portal/03_mp_overview_wiki/
+legacyRedirectsGenerated:
+ # This list is generated by a script. If you need add entries, use the `legacyRedirects` key.
+ - "/edb-docs/d/edb-postgres-migration-portal/user-guides/user-guide/3.0.1/mp_overview_wiki.html"
---
diff --git a/product_docs/docs/migration_portal/3.3.0/03_mp_using_portal/index.mdx b/product_docs/docs/migration_portal/3.3.0/03_mp_using_portal/index.mdx
index 1ba13e3b7ba..61a9aa8c170 100644
--- a/product_docs/docs/migration_portal/3.3.0/03_mp_using_portal/index.mdx
+++ b/product_docs/docs/migration_portal/3.3.0/03_mp_using_portal/index.mdx
@@ -6,6 +6,9 @@ redirects:
- /migration_portal/3.1.0/03_mp_using_portal/
- /migration_portal/3.2.0/03_mp_using_portal/
+legacyRedirectsGenerated:
+ # This list is generated by a script. If you need add entries, use the `legacyRedirects` key.
+ - "/edb-docs/d/edb-postgres-migration-portal/user-guides/user-guide/3.0.1/mp_using_portal.html"
---
diff --git a/product_docs/docs/migration_portal/3.3.0/04_mp_migrating_database/01_mp_schema_extraction.mdx b/product_docs/docs/migration_portal/3.3.0/04_mp_migrating_database/01_mp_schema_extraction.mdx
index 601ff3e8ef7..b56579c07e1 100644
--- a/product_docs/docs/migration_portal/3.3.0/04_mp_migrating_database/01_mp_schema_extraction.mdx
+++ b/product_docs/docs/migration_portal/3.3.0/04_mp_migrating_database/01_mp_schema_extraction.mdx
@@ -6,6 +6,9 @@ redirects:
- /migration_portal/3.1.0/04_mp_migrating_database/01_mp_schema_extraction/
- /migration_portal/3.2.0/04_mp_migrating_database/01_mp_schema_extraction/
+legacyRedirectsGenerated:
+ # This list is generated by a script. If you need add entries, use the `legacyRedirects` key.
+ - "/edb-docs/d/edb-postgres-migration-portal/user-guides/user-guide/3.0.1/mp_schema_extraction.html"
---
diff --git a/product_docs/docs/migration_portal/3.3.0/04_mp_migrating_database/02_mp_schema_assessment.mdx b/product_docs/docs/migration_portal/3.3.0/04_mp_migrating_database/02_mp_schema_assessment.mdx
index 0a63b3ab73e..e0f913d13ec 100644
--- a/product_docs/docs/migration_portal/3.3.0/04_mp_migrating_database/02_mp_schema_assessment.mdx
+++ b/product_docs/docs/migration_portal/3.3.0/04_mp_migrating_database/02_mp_schema_assessment.mdx
@@ -6,6 +6,9 @@ redirects:
- /migration_portal/3.1.0/04_mp_migrating_database/02_mp_schema_assessment/
- /migration_portal/3.2.0/04_mp_migrating_database/04_mp_data_migration/
+legacyRedirectsGenerated:
+ # This list is generated by a script. If you need add entries, use the `legacyRedirects` key.
+ - "/edb-docs/d/edb-postgres-migration-portal/user-guides/user-guide/3.0.1/mp_schema_assessment.html"
---
diff --git a/product_docs/docs/migration_portal/3.3.0/04_mp_migrating_database/03_mp_schema_migration.mdx b/product_docs/docs/migration_portal/3.3.0/04_mp_migrating_database/03_mp_schema_migration.mdx
index 72f6c3cb51c..abbd3e298d6 100644
--- a/product_docs/docs/migration_portal/3.3.0/04_mp_migrating_database/03_mp_schema_migration.mdx
+++ b/product_docs/docs/migration_portal/3.3.0/04_mp_migrating_database/03_mp_schema_migration.mdx
@@ -6,6 +6,9 @@ redirects:
- /migration_portal/3.1.0/04_mp_migrating_database/03_mp_schema_migration/
- /migration_portal/3.2.0/04_mp_migrating_database/04_mp_data_migration/
+legacyRedirectsGenerated:
+ # This list is generated by a script. If you need add entries, use the `legacyRedirects` key.
+ - "/edb-docs/d/edb-postgres-migration-portal/user-guides/user-guide/3.0.1/mp_schema_migration.html"
---
diff --git a/product_docs/docs/migration_portal/3.3.0/04_mp_migrating_database/04_mp_data_migration.mdx b/product_docs/docs/migration_portal/3.3.0/04_mp_migrating_database/04_mp_data_migration.mdx
index ef50561f03c..6178efe1ed7 100644
--- a/product_docs/docs/migration_portal/3.3.0/04_mp_migrating_database/04_mp_data_migration.mdx
+++ b/product_docs/docs/migration_portal/3.3.0/04_mp_migrating_database/04_mp_data_migration.mdx
@@ -6,6 +6,9 @@ redirects:
- /migration_portal/3.1.0/04_mp_migrating_database/04_mp_data_migration/
- /migration_portal/3.2.0/04_mp_migrating_database/04_mp_data_migration/
+legacyRedirectsGenerated:
+ # This list is generated by a script. If you need add entries, use the `legacyRedirects` key.
+ - "/edb-docs/d/edb-postgres-migration-portal/user-guides/user-guide/3.0.1/mp_data_migration.html"
---
diff --git a/product_docs/docs/migration_portal/3.3.0/04_mp_migrating_database/index.mdx b/product_docs/docs/migration_portal/3.3.0/04_mp_migrating_database/index.mdx
index 9a9dcd48fe0..4c936d9136c 100644
--- a/product_docs/docs/migration_portal/3.3.0/04_mp_migrating_database/index.mdx
+++ b/product_docs/docs/migration_portal/3.3.0/04_mp_migrating_database/index.mdx
@@ -6,6 +6,9 @@ redirects:
- /migration_portal/3.1.0/04_mp_migrating_database/
- /migration_portal/3.2.0/04_mp_migrating_database/
+legacyRedirectsGenerated:
+ # This list is generated by a script. If you need add entries, use the `legacyRedirects` key.
+ - "/edb-docs/d/edb-postgres-migration-portal/user-guides/user-guide/3.0.1/mp_migrating_database.html"
---
diff --git a/product_docs/docs/migration_portal/3.3.0/05_mp_advanced_data_migration.mdx b/product_docs/docs/migration_portal/3.3.0/05_mp_advanced_data_migration.mdx
index 708d7c7136a..ce0dbb0ee63 100644
--- a/product_docs/docs/migration_portal/3.3.0/05_mp_advanced_data_migration.mdx
+++ b/product_docs/docs/migration_portal/3.3.0/05_mp_advanced_data_migration.mdx
@@ -6,6 +6,9 @@ redirects:
- /migration_portal/3.1.0/05_mp_advanced_data_migration/
- /migration_portal/3.2.0/05_mp_advanced_data_migration/
+legacyRedirectsGenerated:
+ # This list is generated by a script. If you need add entries, use the `legacyRedirects` key.
+ - "/edb-docs/d/edb-postgres-migration-portal/user-guides/user-guide/3.0.1/mp_advanced_data_migration.html"
---
diff --git a/product_docs/docs/migration_portal/3.3.0/index.mdx b/product_docs/docs/migration_portal/3.3.0/index.mdx
index 30512a7fa05..270ab62388b 100644
--- a/product_docs/docs/migration_portal/3.3.0/index.mdx
+++ b/product_docs/docs/migration_portal/3.3.0/index.mdx
@@ -10,6 +10,13 @@ redirects:
- /migration_portal/3.1.0/
- /migration_portal/3.2.0/
+legacyRedirectsGenerated:
+ # This list is generated by a script. If you need add entries, use the `legacyRedirects` key.
+ - "/edb-docs/d/edb-postgres-migration-portal/user-guides/user-guide/3.0.1/index.html"
+ - "/edb-docs/p/edb-postgres-migration-portal/3.0.1"
+ - "/edb-docs/d/edb-postgres-migration-portal/user-guides/user-guide/3.0.1/conclusion.html"
+ - "/edb-docs/d/edb-postgres-migration-portal/user-guides/user-guide/3.0.1/whats_new.html"
+ - "/edb-docs/d/edb-postgres-migration-portal/user-guides/user-guide/3.0.1/genindex.html"
---
diff --git a/scripts/legacy_redirects/equivalent_versions.json b/scripts/legacy_redirects/equivalent_versions.json
index b1794cc1680..59a4fffff05 100644
--- a/scripts/legacy_redirects/equivalent_versions.json
+++ b/scripts/legacy_redirects/equivalent_versions.json
@@ -45,7 +45,8 @@
"1.0": "1.15"
},
"EDB Postgres Migration Portal": {
- "3.0.1": "3.2.0"
+ "3.0.1": "3.3.0",
+ "3.2.0": "3.3.0"
},
"EDB ODBC Connector": {
"12.0.0.1": "12.0.0.2",
diff --git a/src/pages/index.js b/src/pages/index.js
index c083b4c3e9b..3ae7efc525d 100644
--- a/src/pages/index.js
+++ b/src/pages/index.js
@@ -234,6 +234,13 @@ const Page = () => (
iconName={iconNames.HANDSHAKE}
headingText="Third Party Integrations"
>
+
+
+ Liquibase Pro
+
+
+ Nutanix AHV
+
Thales CipherTrust Transparent Encryption