From 650a7fbf1e3dcbe471f02c337e922d3044c4dbd4 Mon Sep 17 00:00:00 2001 From: gvasquezvargas Date: Tue, 10 Dec 2024 11:38:54 -0500 Subject: [PATCH 01/20] PGE 13.x: corrected semantic versioning --- product_docs/docs/pge/13/release_notes/index.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/product_docs/docs/pge/13/release_notes/index.mdx b/product_docs/docs/pge/13/release_notes/index.mdx index ff4c31359bb..eec41796661 100644 --- a/product_docs/docs/pge/13/release_notes/index.mdx +++ b/product_docs/docs/pge/13/release_notes/index.mdx @@ -4,7 +4,7 @@ navTitle: Release notes description: Release notes for EDB Postgres Extended Server 13. --- -## 2ndQuadrant Postgres 13.18.24 +## 2ndQuadrant Postgres 13.18 Release date: 2024-11-21 From 25835f07c2d4fe84c2aa8cc8d1732b8459fdba4b Mon Sep 17 00:00:00 2001 From: Josh Heyer Date: Tue, 10 Dec 2024 06:32:05 +0000 Subject: [PATCH 02/20] Merging import for cnp --- .github/workflows/sync-and-process-files.yml | 65 +++++-- .../processors/cnp/add-frontmatters.mjs | 4 +- .../processors/cnp/update-yaml-links.mjs | 9 +- scripts/source/dispatch_product.py | 12 +- scripts/source/process-cnp-docs.sh | 184 +++++++++++++----- 5 files changed, 202 insertions(+), 72 deletions(-) diff --git a/.github/workflows/sync-and-process-files.yml b/.github/workflows/sync-and-process-files.yml index 690b779dc1e..ecd6d71e723 100644 --- a/.github/workflows/sync-and-process-files.yml +++ b/.github/workflows/sync-and-process-files.yml @@ -2,48 +2,87 @@ name: sync and process files from another repo on: repository_dispatch: types: [sync-to-docs] + workflow_dispatch: + inputs: + repo: + description: Repository to source documentation from + required: true + type: string + ref: + description: Ref name in the source repo + required: true + type: string + sha: + description: SHA in the source repo, should correspond to ref + required: true + type: string + jobs: sync-and-process-files: env: + SOURCE_REPO: ${{ github.event.client_payload.repo || inputs.repo }} + SOURCE_REF: ${{ github.event.client_payload.ref || inputs.ref }} + SOURCE_SHA: ${{ github.event.client_payload.sha || inputs.sha }} + # The body text of the PR requests that will be created - BODY: "Automated changes to pull in and process updates from repo: ${{ github.event.client_payload.repo }} ref: ${{ github.event.client_payload.ref }}" + BODY: | + Automated changes to pull in and process updates from repo: ${{ github.event.client_payload.repo || inputs.repo }} ref: ${{ github.event.client_payload.ref || inputs.ref }} - # The name of the branch that will be created - BRANCH_NAME: automatic_docs_update/repo_${{ github.event.client_payload.repo }}/ref_${{ github.event.client_payload.ref }} + ## Reviewing + - Look for formatting that may not work as intended + - Watch out for local changes (factual corrections, copy edits, link fixes) that may be overwritten + - You may need to resolve conflicts before merging - check the upstream repo for context when this isn't obvious - # The users that should be assigned to the PR as a comma separated list of github usernames. - REVIEWERS: + # The name of the branch that will be created + BRANCH_NAME: automatic_docs_update/repo_${{ github.event.client_payload.repo || inputs.repo }}/ref_${{ github.event.client_payload.ref || inputs.ref }} # The title of the PR request that will be created - TITLE: "Process changes to docs from: repo: ${{ github.event.client_payload.repo }} ref: ${{ github.event.client_payload.ref }}" + TITLE: "Process changes to docs from: repo: ${{ github.event.client_payload.repo || inputs.repo }} ref: ${{ github.event.client_payload.ref || inputs.ref }}" - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: + - name: Check inputs + if: ${{ !env.SOURCE_REPO || !env.SOURCE_REF || !env.SOURCE_SHA }} + run: | + echo "::error title=missing inputs::must provide source repo, source ref and source SHA" + exit 1 + - name: Checkout destination uses: actions/checkout@v4 with: path: destination + lfs: true - name: Checkout source repo uses: actions/checkout@v4 with: - ref: ${{ github.event.client_payload.sha }} - repository: ${{ github.event.client_payload.repo }} + ref: ${{ env.SOURCE_SHA }} + repository: ${{ env.SOURCE_REPO }} token: ${{ secrets.SYNC_FILES_TOKEN }} path: source - name: setup node uses: actions/setup-node@v4 with: - node-version: "14" + node-version: "18" - name: update npm - run: npm install -g npm@7 + run: npm install -g npm@10 - name: Process changes - run: ${{ github.workspace }}/destination/scripts/source/dispatch_product.py ${{ github.event.client_payload.repo }} ${{ github.workspace }} + id: changes + run: ${{ github.workspace }}/destination/scripts/source/dispatch_product.py ${{env.SOURCE_REPO }} ${{ github.workspace }} working-directory: source + - name: Update PR body + if: ${{ steps.changes.outputs.new-tag }} + run: | + echo 'BODY<> $GITHUB_ENV + echo "$BODY" >> $GITHUB_ENV + echo '## After merging' >> $GITHUB_ENV + echo 'Create a tag named `${{ steps.changes.outputs.new-tag }}` that points to the merge commit' >> $GITHUB_ENV + echo 'EOF' >> $GITHUB_ENV + - name: Create pull request if: ${{ !env.ACT }} uses: peter-evans/create-pull-request@v6 @@ -51,6 +90,6 @@ jobs: body: ${{ env.BODY }} branch: ${{ env.BRANCH_NAME }} path: destination/ - reviewers: ${{ env.REVIEWERS }} title: ${{ env.TITLE }} token: ${{ secrets.GITHUB_TOKEN }} + diff --git a/scripts/fileProcessor/processors/cnp/add-frontmatters.mjs b/scripts/fileProcessor/processors/cnp/add-frontmatters.mjs index 42471ea7ea3..28d80f81c35 100644 --- a/scripts/fileProcessor/processors/cnp/add-frontmatters.mjs +++ b/scripts/fileProcessor/processors/cnp/add-frontmatters.mjs @@ -51,7 +51,9 @@ navigation: const mkdocsYaml = yaml.load( await fs.readFile("mkdocs.yml", { encoding: "utf8" }), ); - mkdocsYaml.nav.forEach((line) => { + // handle nested / labeled entries (by flattening) + const nav = mkdocsYaml.nav.flatMap(l => l.slice ? l : Object.values(l).flatMap(nl => nl)) + nav.forEach((line) => { // make sure file extensions are stripped off. modifiedFrontmatter = `${modifiedFrontmatter} - ${line.slice(0, -3)}\n`; diff --git a/scripts/fileProcessor/processors/cnp/update-yaml-links.mjs b/scripts/fileProcessor/processors/cnp/update-yaml-links.mjs index 582513f4d08..8a81e80b818 100644 --- a/scripts/fileProcessor/processors/cnp/update-yaml-links.mjs +++ b/scripts/fileProcessor/processors/cnp/update-yaml-links.mjs @@ -6,7 +6,7 @@ import remarkFrontmatter from "remark-frontmatter"; import remarkStringify from "remark-stringify"; import admonitions from "remark-admonitions"; import visit from "unist-util-visit"; -import isAbsoluteUrl from "is-absolute-url"; +import path from "path"; export const process = async (filename, content) => { const processor = unified() @@ -41,8 +41,11 @@ function linkRewriter() { // link rewriter: // - make relative to parent (because gatsby URL paths are always directories) visit(tree, "link", (node) => { - if (isAbsoluteUrl(node.url) || node.url[0] === "/") return; - if (!node.url.includes(".yaml")) return; + let url = new URL(node.url, "fake://do/x/"); + // don't mess with absolute URLs or absolute paths + if (!url.href.startsWith("fake://do/x/")) return; + // only rewrite yaml file links (for historical reasons, these are resolved differently from ordinary paths) + if (!['.yaml', '.yml'].includes(path.extname(url.pathname))) return; node.url = node.url.replace(/^\/?/, "../"); }); }; diff --git a/scripts/source/dispatch_product.py b/scripts/source/dispatch_product.py index 3874bdb67ff..37060f5f30f 100755 --- a/scripts/source/dispatch_product.py +++ b/scripts/source/dispatch_product.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 import argparse -import os +import subprocess import sys parser = argparse.ArgumentParser() @@ -10,8 +10,8 @@ args = parser.parse_args() commands = { - "EnterpriseDB/cloud-native-postgres": f"{args.workspace}/destination/scripts/source/process-cnp-docs.sh {args.workspace}/source {args.workspace }/destination", - "EnterpriseDB/pg4k-pgd": f"{args.workspace}/destination/scripts/source/process-pgd4k-docs.sh {args.workspace}/source {args.workspace }/destination", + "EnterpriseDB/cloud-native-postgres": f"{args.workspace}/destination/scripts/source/process-cnp-docs.sh {args.workspace}/source {args.workspace}/destination", + "EnterpriseDB/pg4k-pgd": f"{args.workspace}/destination/scripts/source/process-pgd4k-docs.sh {args.workspace}/source {args.workspace}/destination", "EnterpriseDB/fe": f"mkdir -p {args.workspace}/destination/icons-pkg && \ cp -fr utils/icons-placeholder/output/* {args.workspace}/destination/icons-pkg/", "EnterpriseDB/LiveCompare": f"node {args.workspace}/destination/scripts/source/livecompare.js {args.workspace}/source {args.workspace}/destination --unhandled-rejections=strict", @@ -21,9 +21,9 @@ "EnterpriseDB/tpa": f"{args.workspace}/destination/scripts/source/process-tpa-docs.sh {args.workspace}/source {args.workspace}/destination", } -ret = os.system( +ret = subprocess.call( f"cd {args.workspace}/destination/scripts/source && \ - npm ci" + npm ci", shell=True ) if ret != 0: @@ -31,7 +31,7 @@ if args.repo in commands: cmd = commands[args.repo] - ret = os.system(cmd) + ret = subprocess.call(cmd, shell=True) else: print( f"The workflow has not been configured for the {args.repo} repo", diff --git a/scripts/source/process-cnp-docs.sh b/scripts/source/process-cnp-docs.sh index a8729484066..27620cb8507 100755 --- a/scripts/source/process-cnp-docs.sh +++ b/scripts/source/process-cnp-docs.sh @@ -1,5 +1,12 @@ #!/bin/bash +# Process: +# - establish previous tag +# - establish new tag +# - create new branch with new content based on previous tag +# (allow conflicts to be resolved when merging with latest mainline branch) +# - identify new tag + if [ -z $1 ] || [ -z $2 ] then echo "the path to the source and destination checkouts must be provided" @@ -12,67 +19,146 @@ CWD=`pwd` SOURCE_CHECKOUT=`cd $1 && pwd` DESTINATION_CHECKOUT=`cd $2 && pwd` +function do_import { + local source=$1 + local dest=$2 + local version=$3 + local cwd=`pwd` + + cd $dest/product_docs/docs/postgres_for_kubernetes/1/ + node "$DESTINATION_CHECKOUT/scripts/source/files-to-ignore.mjs" \ + "$dest/product_docs/docs/postgres_for_kubernetes/1/" \ + > $source/files-to-ignore.txt + + cd $source/docs + + # grab key bit of source for use in docs + cp $source/config/manager/default-monitoring.yaml $source/docs/src/ + + node "$DESTINATION_CHECKOUT/scripts/fileProcessor/main.mjs" \ + -f "src/**/*.md" \ + -p "cnp/add-frontmatters" \ + -p "cnp/flatten-appendices" \ + -p "cnp/replace-github-urls" \ + -p "cnp/update-links" \ + -p "cnp/update-yaml-links" \ + -p "cnp/rewrite-mdextra-anchors" \ + -p "cnp/cleanup-html" \ + -p "cnp/rename-to-mdx" \ + > /dev/null + + node "$DESTINATION_CHECKOUT/scripts/source/merge-indexes.mjs" \ + "$source/docs/src/index.mdx" \ + "$dest/product_docs/docs/postgres_for_kubernetes/1/index.mdx" \ + "$source/docs/src/index.mdx" \ + >> $source/files-to-ignore.txt + + rsync -av --delete --exclude-from=$source/files-to-ignore.txt src/ $dest/product_docs/docs/postgres_for_kubernetes/1/ > /dev/null + + # Archive API docs + local api_ref_dir="$dest/product_docs/docs/postgres_for_kubernetes/1/pg4k.v1" + local current_api_ref="$api_ref_dir/$version.mdx" + mkdir -p "$api_ref_dir" + mv "$dest/product_docs/docs/postgres_for_kubernetes/1/pg4k.v1.mdx" "$current_api_ref" + # TODO: just install yq + node "$DESTINATION_CHECKOUT/scripts/source/update-yaml.mjs" "$current_api_ref" \ + title="API Reference - $version" \ + navTitle="$version" \ + pdfExclude=true + if [ $version == $LATEST_TAG ] + then + local api_ref_index="$api_ref_dir/index.mdx" + cp "$current_api_ref" "$api_ref_index" + node "$DESTINATION_CHECKOUT/scripts/source/update-yaml.mjs" "$api_ref_index" \ + navTitle="API Reference" \ + pdfExclude=null \ + navigation=[`ls "$api_ref_dir" | grep ^v | sed -e 's/\.mdx$//' | sort -V -r | paste -sd "," - `] + fi + node "$DESTINATION_CHECKOUT/scripts/source/update-yaml.mjs" "$current_api_ref" \ + originalFilePath=null + + cd $cwd +} + cd $DESTINATION_CHECKOUT/scripts/fileProcessor npm ci +# grab some information about what we're importing cd $SOURCE_CHECKOUT -# grab some information about what we're importing -CURRENT_TAG=`git describe --exact-match --tags` +git fetch --tags + LATEST_TAG=`git tag | sort -V -r | head -n 1` +CURRENT_TAG=`git describe --exact-match --tags || echo "$LATEST_TAG-next"` +CURRENT_TAG_INDEX=`git tag | sort -V -r | grep -n $CURRENT_TAG | cut -d : -f 1 || echo 0` +PREVIOUS_TAG=`git tag | sort -V -r | head -n $(($CURRENT_TAG_INDEX+1)) | tail -n 1` -# create a temporary worktree to avoid messing up source repo (for local work; CI doesn't care) -git worktree add --detach ./docs-import +cd $DESTINATION_CHECKOUT -cd $DESTINATION_CHECKOUT/product_docs/docs/postgres_for_kubernetes/1/ -node $DESTINATION_CHECKOUT/scripts/source/files-to-ignore.mjs \ - "$DESTINATION_CHECKOUT/product_docs/docs/postgres_for_kubernetes/1/" \ - > $SOURCE_CHECKOUT/docs-import/files-to-ignore.txt - -cd $SOURCE_CHECKOUT/docs-import/docs - -# grab key bit of source for use in docs -cp $SOURCE_CHECKOUT/docs-import/config/manager/default-monitoring.yaml $SOURCE_CHECKOUT/docs-import/docs/src/ - -node $DESTINATION_CHECKOUT/scripts/fileProcessor/main.mjs \ - -f "src/**/*.md" \ - -p "cnp/add-frontmatters" \ - -p "cnp/flatten-appendices" \ - -p "cnp/replace-github-urls" \ - -p "cnp/update-links" \ - -p "cnp/update-yaml-links" \ - -p "cnp/rewrite-mdextra-anchors" \ - -p "cnp/cleanup-html" \ - -p "cnp/rename-to-mdx" - -node $DESTINATION_CHECKOUT/scripts/source/merge-indexes.mjs \ - "$SOURCE_CHECKOUT/docs-import/docs/src/index.mdx" \ - "$DESTINATION_CHECKOUT/product_docs/docs/postgres_for_kubernetes/1/index.mdx" \ - "$SOURCE_CHECKOUT/docs-import/docs/src/index.mdx" \ - >> $SOURCE_CHECKOUT/docs-import/files-to-ignore.txt - -rsync -av --delete --exclude-from=$SOURCE_CHECKOUT/docs-import/files-to-ignore.txt src/ $DESTINATION_CHECKOUT/product_docs/docs/postgres_for_kubernetes/1/ - -# Archive API docs -API_REF_DIR="$DESTINATION_CHECKOUT/product_docs/docs/postgres_for_kubernetes/1/pg4k.v1" -CURRENT_API_REF="$API_REF_DIR/$CURRENT_TAG.mdx" -mv "$DESTINATION_CHECKOUT/product_docs/docs/postgres_for_kubernetes/1/pg4k.v1.mdx" "$CURRENT_API_REF" -# TODO: just install yq -node "$DESTINATION_CHECKOUT/scripts/source/update-yaml.mjs" "$CURRENT_API_REF" \ - title="API Reference - $CURRENT_TAG" \ - navTitle="$CURRENT_TAG" -if [ $CURRENT_TAG == $LATEST_TAG ] +PREVIOUS_COMMIT=`git rev-list -n 1 product/pg4k/$PREVIOUS_TAG || git rev-list -n 1 HEAD` +PREVIOUS_COMMIT_DESC=`git rev-list --format=oneline -n 1 $PREVIOUS_COMMIT` + +if [[ -z "$CURRENT_TAG" || -z "$PREVIOUS_TAG" || -z "$PREVIOUS_COMMIT" || -z "$LATEST_TAG" ]] then - API_REF_INDEX="$API_REF_DIR/index.mdx" - cp "$CURRENT_API_REF" "$API_REF_INDEX" - node "$DESTINATION_CHECKOUT/scripts/source/update-yaml.mjs" "$API_REF_INDEX" \ - navTitle="API Reference" \ - navigation=[`ls "$API_REF_DIR" | grep ^v | sed -e 's/\.mdx$//' | sort -V -r | paste -sd "," - `] + echo "Missing context: CURRENT_TAG=$CURRENT_TAG PREVIOUS_TAG=$PREVIOUS_TAG PREVIOUS_COMMIT=$PREVIOUS_COMMIT LATEST_TAG=$LATEST_TAG" + exit 1 fi -node "$DESTINATION_CHECKOUT/scripts/source/update-yaml.mjs" "$CURRENT_API_REF" \ - originalFilePath=null + +echo "Applying diff between $PREVIOUS_TAG and $CURRENT_TAG, including local changes since $PREVIOUS_COMMIT_DESC" +set -e + +git config user.name "github-actions[bot]" +git config user.email "41898282+github-actions[bot]@users.noreply.github.com" + +# create a temporary worktree to avoid messing up source repo (for local work; CI doesn't care) +cd $SOURCE_CHECKOUT +git worktree add --detach ./docs-import + +cd $DESTINATION_CHECKOUT + +git worktree add --detach ./docs-import + +cd $DESTINATION_CHECKOUT/docs-import +git checkout $PREVIOUS_COMMIT +cd $SOURCE_CHECKOUT/docs-import +git checkout $PREVIOUS_TAG + +do_import $SOURCE_CHECKOUT/docs-import $DESTINATION_CHECKOUT/docs-import $PREVIOUS_TAG + +cd $DESTINATION_CHECKOUT/docs-import +git checkout -b temp/docs-import-$PREVIOUS_TAG +git add product_docs/docs/postgres_for_kubernetes +git commit -m "PG4K import for $PREVIOUS_TAG" + +cd $SOURCE_CHECKOUT +git worktree remove --force ./docs-import +git worktree add --detach ./docs-import + +do_import $SOURCE_CHECKOUT/docs-import $DESTINATION_CHECKOUT/docs-import $CURRENT_TAG + +cd $DESTINATION_CHECKOUT/docs-import + +git checkout -b temp/docs-import-$CURRENT_TAG +git add product_docs/docs/postgres_for_kubernetes +git commit -m "PG4K import for $CURRENT_TAG" + +cd $DESTINATION_CHECKOUT + +git checkout $PREVIOUS_COMMIT + +git cherry-pick --no-commit --strategy=ort -X theirs temp/docs-import-$CURRENT_TAG # cleanup: remove worktree cd $SOURCE_CHECKOUT git worktree remove --force ./docs-import + +cd $DESTINATION_CHECKOUT + +git worktree remove --force ./docs-import + +git branch -D temp/docs-import-$CURRENT_TAG +git branch -D temp/docs-import-$PREVIOUS_TAG + cd $CWD + +echo "new-tag=product/pg4k/$CURRENT_TAG" >> $GITHUB_OUTPUT From 322897a2750ef97aa156c9b5911a8c65ce714437 Mon Sep 17 00:00:00 2001 From: Josh Heyer Date: Tue, 10 Dec 2024 23:41:19 +0000 Subject: [PATCH 03/20] Fix for error & warnings when doing merge sourcing --- .github/workflows/sync-and-process-files.yml | 1 + scripts/fileProcessor/package-lock.json | 32 ++++++++++---------- 2 files changed, 17 insertions(+), 16 deletions(-) diff --git a/.github/workflows/sync-and-process-files.yml b/.github/workflows/sync-and-process-files.yml index ecd6d71e723..26526cc414b 100644 --- a/.github/workflows/sync-and-process-files.yml +++ b/.github/workflows/sync-and-process-files.yml @@ -89,6 +89,7 @@ jobs: with: body: ${{ env.BODY }} branch: ${{ env.BRANCH_NAME }} + base: develop path: destination/ title: ${{ env.TITLE }} token: ${{ secrets.GITHUB_TOKEN }} diff --git a/scripts/fileProcessor/package-lock.json b/scripts/fileProcessor/package-lock.json index b4f5ebb9b71..34bd6a93a7f 100644 --- a/scripts/fileProcessor/package-lock.json +++ b/scripts/fileProcessor/package-lock.json @@ -1082,12 +1082,12 @@ } }, "node_modules/micromatch": { - "version": "4.0.4", - "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.4.tgz", - "integrity": "sha512-pRmzw/XUcwXGpD9aI9q/0XOwLNygjETJ8y0ao0wdqprrzDa4YnxLcz7fQRZr8voh8V10kGhABbNcHVk5wHgWwg==", + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", "dependencies": { - "braces": "^3.0.1", - "picomatch": "^2.2.3" + "braces": "^3.0.3", + "picomatch": "^2.3.1" }, "engines": { "node": ">=8.6" @@ -1134,9 +1134,9 @@ } }, "node_modules/picomatch": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.0.tgz", - "integrity": "sha512-lY1Q/PiJGC2zOv/z391WOTD+Z02bCgsFfvxoXXf6h7kv9o+WmsmzYqrAwY63sNgOxE4xEdq0WyUnXfKeBrSvYw==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", "engines": { "node": ">=8.6" }, @@ -2374,12 +2374,12 @@ "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==" }, "micromatch": { - "version": "4.0.4", - "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.4.tgz", - "integrity": "sha512-pRmzw/XUcwXGpD9aI9q/0XOwLNygjETJ8y0ao0wdqprrzDa4YnxLcz7fQRZr8voh8V10kGhABbNcHVk5wHgWwg==", + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", "requires": { - "braces": "^3.0.1", - "picomatch": "^2.2.3" + "braces": "^3.0.3", + "picomatch": "^2.3.1" } }, "ms": { @@ -2416,9 +2416,9 @@ "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==" }, "picomatch": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.0.tgz", - "integrity": "sha512-lY1Q/PiJGC2zOv/z391WOTD+Z02bCgsFfvxoXXf6h7kv9o+WmsmzYqrAwY63sNgOxE4xEdq0WyUnXfKeBrSvYw==" + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==" }, "prettier": { "version": "2.8.8", From 85f4c217bc264138d43e921bea63d66422a89315 Mon Sep 17 00:00:00 2001 From: Josh Heyer Date: Wed, 11 Dec 2024 02:36:10 +0000 Subject: [PATCH 04/20] cnp merge sync: grab tags for destination too --- .github/workflows/sync-and-process-files.yml | 4 ++++ scripts/source/process-cnp-docs.sh | 2 ++ 2 files changed, 6 insertions(+) diff --git a/.github/workflows/sync-and-process-files.yml b/.github/workflows/sync-and-process-files.yml index 26526cc414b..8d4e8ac12ac 100644 --- a/.github/workflows/sync-and-process-files.yml +++ b/.github/workflows/sync-and-process-files.yml @@ -19,6 +19,9 @@ on: jobs: sync-and-process-files: + permissions: + contents: write + pull-requests: write env: SOURCE_REPO: ${{ github.event.client_payload.repo || inputs.repo }} SOURCE_REF: ${{ github.event.client_payload.ref || inputs.ref }} @@ -93,4 +96,5 @@ jobs: path: destination/ title: ${{ env.TITLE }} token: ${{ secrets.GITHUB_TOKEN }} + commit-message: "Sync ${{ env.SOURCE_REPO }} ${{ steps.changes.outputs.new-tag }}" diff --git a/scripts/source/process-cnp-docs.sh b/scripts/source/process-cnp-docs.sh index 27620cb8507..b2a6119e9f0 100755 --- a/scripts/source/process-cnp-docs.sh +++ b/scripts/source/process-cnp-docs.sh @@ -95,6 +95,8 @@ PREVIOUS_TAG=`git tag | sort -V -r | head -n $(($CURRENT_TAG_INDEX+1)) | tail -n cd $DESTINATION_CHECKOUT +git fetch --tags + PREVIOUS_COMMIT=`git rev-list -n 1 product/pg4k/$PREVIOUS_TAG || git rev-list -n 1 HEAD` PREVIOUS_COMMIT_DESC=`git rev-list --format=oneline -n 1 $PREVIOUS_COMMIT` From f2b5d81fc8c501624e3ab33ef6c90d3f5e4b217a Mon Sep 17 00:00:00 2001 From: Josh Heyer Date: Wed, 11 Dec 2024 05:18:55 +0000 Subject: [PATCH 05/20] Generate release notes when source files change --- .github/workflows/generate-release-notes.yml | 50 ++++++++++++++++++++ 1 file changed, 50 insertions(+) create mode 100644 .github/workflows/generate-release-notes.yml diff --git a/.github/workflows/generate-release-notes.yml b/.github/workflows/generate-release-notes.yml new file mode 100644 index 00000000000..369403d0c9f --- /dev/null +++ b/.github/workflows/generate-release-notes.yml @@ -0,0 +1,50 @@ +name: generate release notes +on: + pull_request: + types: [opened, synchronize] + paths: + - "**/src/*.yml" + - "**/src/*.yaml" +jobs: + release-notes: + runs-on: ubuntu-latest + steps: + - name: Checkout repo + uses: actions/checkout@v4 + with: + ref: ${{ github.event.pull_request.head.ref }} + path: content + sparse-checkout: | + advocacy_docs + product_docs + + - name: Checkout relgen tool + uses: actions/checkout@v4 + with: + ref: develop + path: tools + sparse-checkout: | + tools + + - name: setup node + uses: actions/setup-node@v4 + + - name: install dependencies + run: npm --prefix ./tools/tools/automation/generators/relgen ci + + # TODO: limit this to paths that have actually *changed* + - name: regenerate relnotes + run: | + shopt -s globstar + for rnmetapath in ./content/**/src/meta.yml; do + ./tools/tools/automation/generators/relgen/relgen.js -p ${rnmetapath%/src/meta.yml} + done + + - name: commit modified files + run: | + cd ./content + git config user.name "github-actions[bot]" + git config user.email "41898282+github-actions[bot]@users.noreply.github.com" + git add . + git commit -m "update generated release notes" + git push \ No newline at end of file From a3df7b8125d5bd57f5c17611194a0108e9dcc2cc Mon Sep 17 00:00:00 2001 From: Josh Heyer Date: Wed, 11 Dec 2024 05:38:02 +0000 Subject: [PATCH 06/20] regen release notes: don't fail when nothing changed --- .github/workflows/generate-release-notes.yml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/.github/workflows/generate-release-notes.yml b/.github/workflows/generate-release-notes.yml index 369403d0c9f..613e284dc05 100644 --- a/.github/workflows/generate-release-notes.yml +++ b/.github/workflows/generate-release-notes.yml @@ -5,7 +5,7 @@ on: paths: - "**/src/*.yml" - "**/src/*.yaml" -jobs: +jobs: release-notes: runs-on: ubuntu-latest steps: @@ -39,8 +39,15 @@ jobs: for rnmetapath in ./content/**/src/meta.yml; do ./tools/tools/automation/generators/relgen/relgen.js -p ${rnmetapath%/src/meta.yml} done + + - name: check for modified files + id: changes + run: | + cd ./content + echo "files=`git ls-files --other --modified --exclude-standard | wc -l`" >> $GITHUB_OUTPUT - name: commit modified files + if: steps.changes.outputs.files > 0 run: | cd ./content git config user.name "github-actions[bot]" From dccffd7f8fb7162ac24da85c3b2b44c8e81ac377 Mon Sep 17 00:00:00 2001 From: Harisai Marisa <52988175+iamharisai@users.noreply.github.com> Date: Wed, 11 Dec 2024 22:50:13 +0530 Subject: [PATCH 07/20] Update terminology.mdx The current documentation describes a "cluster" as a group of "multiple redundant systems," which is not universally accurate in the broader computing context. Clusters are generally defined as a group of interconnected systems working together to function as a single cohesive unit. While redundancy may be a feature of some clusters, it is not inherent to the definition. This change revises the wording to reflect a more accurate and generic definition of a cluster in computing. --- product_docs/docs/pgd/5.6/terminology.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/product_docs/docs/pgd/5.6/terminology.mdx b/product_docs/docs/pgd/5.6/terminology.mdx index 2e1a76092f7..12953ae6ca4 100644 --- a/product_docs/docs/pgd/5.6/terminology.mdx +++ b/product_docs/docs/pgd/5.6/terminology.mdx @@ -27,7 +27,7 @@ How [Raft](#replicated-available-fault-tolerance-raft) makes group-wide decision #### Cluster -Generically, a cluster is a group of multiple redundant systems arranged to appear to end users as one system. See also [PGD cluster](#pgd-cluster) and [Postgres cluster](#postgres-cluster). +Generically, a cluster is a group of multiple systems arranged to appear to end users as one system. See also [PGD cluster](#pgd-cluster) and [Postgres cluster](#postgres-cluster). #### DDL (data definition language) From 7075cc69a240be1ad1c056bb6f8f621b79d6c771 Mon Sep 17 00:00:00 2001 From: Dj Walker-Morgan Date: Wed, 27 Nov 2024 10:26:07 +0000 Subject: [PATCH 08/20] Test fix for pages and feedback Signed-off-by: Dj Walker-Morgan --- .../docs/postgres_distributed_for_kubernetes/1/index.mdx | 3 ++- product_docs/docs/postgres_for_kubernetes/1/index.mdx | 4 +++- src/pages/index.js | 4 ++-- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/index.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/index.mdx index ce73a581e66..ec6dd166317 100644 --- a/product_docs/docs/postgres_distributed_for_kubernetes/1/index.mdx +++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/index.mdx @@ -1,5 +1,5 @@ --- -title: EDB Postgres Distributed for Kubernetes +title: EDB CloudNativePG Global Cluster originalFilePath: src/index.md indexCards: none navigation: @@ -35,6 +35,7 @@ navigation: - known_issues directoryDefaults: iconName: logos/KubernetesMono + displayBanner: "We are in the process of migrating the documentation from the previous EDB Postgres Distributed for Kubernetes name to the new EDB CloudNativePG Global Cluster name. You may see references to both names in this documentation." --- EDB Postgres Distributed for Kubernetes (`PGD4K`) is an diff --git a/product_docs/docs/postgres_for_kubernetes/1/index.mdx b/product_docs/docs/postgres_for_kubernetes/1/index.mdx index 17ef3d12d5a..50e7c62ce2d 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/index.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/index.mdx @@ -1,7 +1,9 @@ --- -title: EDB Postgres for Kubernetes +title: EDB CloudNativePG Cluster originalFilePath: src/index.md indexCards: none +directoryDefaults: + displayBanner: "We are in the process of migrating the documentation from the previous EDB Postgres for Kubernetes name to the new EDB CloudNativePG Cluster name. You may see references to both names in this documentation." navigation: - rel_notes - '!commercial_support.mdx' diff --git a/src/pages/index.js b/src/pages/index.js index b7ec2e9b583..7358cd2d57d 100644 --- a/src/pages/index.js +++ b/src/pages/index.js @@ -372,11 +372,11 @@ const Page = () => { /> - EDB Postgres Distributed for Kubernetes + EDB CloudNativePG Global Cluster - EDB Postgres for Kubernetes + EDB CloudNativePG Cluster From 2305e6c9b51bc88418711604796e35a1f8acb611 Mon Sep 17 00:00:00 2001 From: Dj Walker-Morgan Date: Wed, 27 Nov 2024 10:49:07 +0000 Subject: [PATCH 09/20] Adjust updates Signed-off-by: Dj Walker-Morgan --- src/constants/updates.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/constants/updates.js b/src/constants/updates.js index c9b3fc9caac..d09d13fbf51 100644 --- a/src/constants/updates.js +++ b/src/constants/updates.js @@ -2,10 +2,10 @@ import IconNames from "../components/icon/iconNames"; export const updates = [ { - title: "Postgres Distributed for Kubernetes 1.0.1", + title: "EDB CloudNativePG Global Cluster 1.0.1", icon: IconNames.KUBERNETES, description: - "PGD4K brings LDAP authentication configuration, tablespace configuration and more in this patch release.", + "EDB CloudNativePG Global Cluster brings LDAP authentication configuration, tablespace configuration and more in this patch release.", url: "/postgres_distributed_for_kubernetes/latest/", moreUrl: "/postgres_distributed_for_kubernetes/latest/rel_notes/1_0_1_rel_notes/", From b059fb6715d636875e5048fbf997149bb51d2d19 Mon Sep 17 00:00:00 2001 From: Dj Walker-Morgan Date: Thu, 12 Dec 2024 09:55:05 +0000 Subject: [PATCH 10/20] Added the updated husky hooks Signed-off-by: Dj Walker-Morgan --- .husky/post-checkout | 2 +- .husky/post-commit | 2 +- .husky/post-merge | 2 +- .husky/pre-push | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.husky/post-checkout b/.husky/post-checkout index ca7fcb40088..5abf8ed93f7 100755 --- a/.husky/post-checkout +++ b/.husky/post-checkout @@ -1,3 +1,3 @@ #!/bin/sh -command -v git-lfs >/dev/null 2>&1 || { echo >&2 "\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'post-checkout' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks').\n"; exit 2; } +command -v git-lfs >/dev/null 2>&1 || { printf >&2 "\n%s\n\n" "This repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'post-checkout' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks')."; exit 2; } git lfs post-checkout "$@" diff --git a/.husky/post-commit b/.husky/post-commit index 52b339cb3f4..b8b76c2c425 100755 --- a/.husky/post-commit +++ b/.husky/post-commit @@ -1,3 +1,3 @@ #!/bin/sh -command -v git-lfs >/dev/null 2>&1 || { echo >&2 "\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'post-commit' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks').\n"; exit 2; } +command -v git-lfs >/dev/null 2>&1 || { printf >&2 "\n%s\n\n" "This repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'post-commit' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks')."; exit 2; } git lfs post-commit "$@" diff --git a/.husky/post-merge b/.husky/post-merge index a912e667aa3..726f909891a 100755 --- a/.husky/post-merge +++ b/.husky/post-merge @@ -1,3 +1,3 @@ #!/bin/sh -command -v git-lfs >/dev/null 2>&1 || { echo >&2 "\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'post-merge' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks').\n"; exit 2; } +command -v git-lfs >/dev/null 2>&1 || { printf >&2 "\n%s\n\n" "This repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'post-merge' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks')."; exit 2; } git lfs post-merge "$@" diff --git a/.husky/pre-push b/.husky/pre-push index 0f0089bc25d..5f26dc45523 100755 --- a/.husky/pre-push +++ b/.husky/pre-push @@ -1,3 +1,3 @@ #!/bin/sh -command -v git-lfs >/dev/null 2>&1 || { echo >&2 "\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'pre-push' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks').\n"; exit 2; } +command -v git-lfs >/dev/null 2>&1 || { printf >&2 "\n%s\n\n" "This repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'pre-push' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks')."; exit 2; } git lfs pre-push "$@" From 5750fc9b1f059c4c258358e7394d44b87883ed1d Mon Sep 17 00:00:00 2001 From: Dj Walker-Morgan Date: Thu, 12 Dec 2024 09:21:52 +0000 Subject: [PATCH 11/20] Added small formerly Signed-off-by: Dj Walker-Morgan --- src/pages/index.js | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/pages/index.js b/src/pages/index.js index 7358cd2d57d..6daa9f16143 100644 --- a/src/pages/index.js +++ b/src/pages/index.js @@ -373,10 +373,18 @@ const Page = () => { EDB CloudNativePG Global Cluster +
+ + Formerly EDB Postgres Distributed for Kubernetes +
EDB CloudNativePG Cluster +
+ + Formerly EDB Postgres for Kubernetes +
From f94bda53358b4a2504a51261641407e2eb2f8599 Mon Sep 17 00:00:00 2001 From: Dj Walker-Morgan Date: Thu, 12 Dec 2024 10:24:33 +0000 Subject: [PATCH 12/20] Revert "Added the updated husky hooks" This reverts commit b059fb6715d636875e5048fbf997149bb51d2d19. --- .husky/post-checkout | 2 +- .husky/post-commit | 2 +- .husky/post-merge | 2 +- .husky/pre-push | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.husky/post-checkout b/.husky/post-checkout index 5abf8ed93f7..ca7fcb40088 100755 --- a/.husky/post-checkout +++ b/.husky/post-checkout @@ -1,3 +1,3 @@ #!/bin/sh -command -v git-lfs >/dev/null 2>&1 || { printf >&2 "\n%s\n\n" "This repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'post-checkout' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks')."; exit 2; } +command -v git-lfs >/dev/null 2>&1 || { echo >&2 "\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'post-checkout' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks').\n"; exit 2; } git lfs post-checkout "$@" diff --git a/.husky/post-commit b/.husky/post-commit index b8b76c2c425..52b339cb3f4 100755 --- a/.husky/post-commit +++ b/.husky/post-commit @@ -1,3 +1,3 @@ #!/bin/sh -command -v git-lfs >/dev/null 2>&1 || { printf >&2 "\n%s\n\n" "This repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'post-commit' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks')."; exit 2; } +command -v git-lfs >/dev/null 2>&1 || { echo >&2 "\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'post-commit' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks').\n"; exit 2; } git lfs post-commit "$@" diff --git a/.husky/post-merge b/.husky/post-merge index 726f909891a..a912e667aa3 100755 --- a/.husky/post-merge +++ b/.husky/post-merge @@ -1,3 +1,3 @@ #!/bin/sh -command -v git-lfs >/dev/null 2>&1 || { printf >&2 "\n%s\n\n" "This repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'post-merge' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks')."; exit 2; } +command -v git-lfs >/dev/null 2>&1 || { echo >&2 "\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'post-merge' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks').\n"; exit 2; } git lfs post-merge "$@" diff --git a/.husky/pre-push b/.husky/pre-push index 5f26dc45523..0f0089bc25d 100755 --- a/.husky/pre-push +++ b/.husky/pre-push @@ -1,3 +1,3 @@ #!/bin/sh -command -v git-lfs >/dev/null 2>&1 || { printf >&2 "\n%s\n\n" "This repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'pre-push' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks')."; exit 2; } +command -v git-lfs >/dev/null 2>&1 || { echo >&2 "\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'pre-push' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks').\n"; exit 2; } git lfs pre-push "$@" From 24e187d37c44903123df70c44097459fc37c81d0 Mon Sep 17 00:00:00 2001 From: Dj Walker-Morgan Date: Thu, 12 Dec 2024 16:11:48 +0000 Subject: [PATCH 13/20] Q42024 Updates Signed-off-by: Dj Walker-Morgan --- .../latest-release-news/2024q4release.mdx | 104 ++++++++++++++++++ .../overview/latest-release-news/index.mdx | 102 +++++++++-------- src/pages/index.js | 2 +- 3 files changed, 164 insertions(+), 44 deletions(-) create mode 100644 advocacy_docs/edb-postgres-ai/overview/latest-release-news/2024q4release.mdx diff --git a/advocacy_docs/edb-postgres-ai/overview/latest-release-news/2024q4release.mdx b/advocacy_docs/edb-postgres-ai/overview/latest-release-news/2024q4release.mdx new file mode 100644 index 00000000000..c19b3bf0bf0 --- /dev/null +++ b/advocacy_docs/edb-postgres-ai/overview/latest-release-news/2024q4release.mdx @@ -0,0 +1,104 @@ +--- +title: "EDB Postgres AI Q4 2024 release highlights" +navTitle: Q4 2024 release highlights +description: The latest features released and updated in EDB Postgres AI. +date: 2024-12-10 +--- + +Date: **December 10, 2024** + +This [release roundup](https://www.enterprisedb.com/blog/solving-enterprise-generative-ai-and-analytics-challenges-zooming-our-q4-2024-release) originally appeared on the EDB blog. + +## Introducing the EDB Postgres AI Software Deployment: cloud agility, on your terms + +### **Enable cloud agility and AI sovereignty for critical data infrastructure –  anywhere, any environment.** + +Earlier today, we [announced](https://www.enterprisedb.com/news/edb-brings-cloud-agility-and-observability-hybrid-environments-sovereign-control) major updates to the [EDB Postgres AI](https://www.enterprisedb.com/products/edb-postgres-ai) sovereign data and AI platform. In the wake of data and AI becoming increasingly important to business innovation, our customers have asked us for more flexible solutions that offer both agility and control.  + +In response, we’ve launched a number of new generally available and preview capabilities to help accelerate deployment of EDB Postgres AI in [sovereign](https://www.enterprisedb.com/use-case/sovereign-ai), hybrid environments as an [omni-data platform](https://www.enterprisedb.com/use-case/omni-data-platform) that works across your enterprise’s data corpus to drive faster time to market for data-driven applications. With the new [EDB Postgres AI Software Deployment](https://www.enterprisedb.com/products/software-deployment), you can deploy, manage, scale, and observe mission critical data infrastructure in any self-managed, hybrid, or public cloud environment. + +The single container-driven software installation enables the consolidation of structured and unstructured data in a single multi-model data platform to accelerate transactional, analytical, and AI workloads. The Software Deployment unlocks a number of new capabilities: + +1. **Hybrid Control Plane**, enabling a hybrid database-as-a-service (DBaaS) with Kubernetes-driven automation and advanced observability across 200+ metrics to enable a cloud-like experience – even in your private data center. +2. **Analytics Accelerator**, which unlocks rapid analytics across unified business data in Postgres, powering 30x faster query performance and improving cost efficiency. +3. **AI Accelerator**, the fastest way to test and launch enterprise generative AI (GenAI) applications like chatbots and recommendation engines, so you can build cutting-edge GenAI functionality with just 5 lines of familiar SQL code (rather than 130+ using standard approaches). + +To continue supporting our customers’ requirements as they evolve with their growing transactional workloads, we’ve also released enhancements to our **transactional database server software** and tooling, including **EDB Postgres 17** to meet the demands of modern workloads and the **EDB Software Bill of Materials**, offering visibility into your secure open source supply chain.  + +Today, these **transactional database enhancements** are **generally available**, along with the **AI Accelerator**. The **Hybrid Control Plane** and **Analytics Accelerator** are **now available for preview** through a [concierge demo experience](https://www.enterprisedb.com/engage). + +## What’s in Preview? Unlock Cloud Scale And Rapid Analytics In Hybrid Environments  + +### **Hybrid Control Plane** + +_Automation, single pane of glass management, and observability across hybrid data estates._ + +Modern enterprises manage data across multiple clouds and on-premises deployments. The undifferentiated heavy lifting of database administration often distracts operators and engineers from more value-oriented work, like improving app scalability and accelerating time to market for data initiatives. While public cloud Database-as-a-Service (DBaaS) offerings provide automation of administrative tasks, they require tradeoffs on control, data sovereignty and deployment flexibility.  + +The **Hybrid Control Plane** is a **centralized management and automation solution** for the EDB Postgres AI Software Deployment, providing cloud automation and agility in a self-hosted environment. It boosts productivity up to 30% by automating time-consuming and expensive administrative functions like backups, provisioning, and point-in-time recovery – enabling a [hybrid (DBaaS) experience](https://www.enterprisedb.com/use-case/hybrid-dbaas). Monitor, observe, and respond to issues in real-time with visibility into 200+ metrics, keeping databases secure and enabling up to 99.999% availability. Plus, with built-in query diagnostics, you can identify problems and bottlenecks up to 5x faster and accelerate application performance up to 8x.  + +See a demo of the Hybrid Control Plane in action! + +
+ + +### **Analytics Accelerator** + +_Unify transactional and analytical workloads in Postgres with lower cost, faster performance, and simpler operations._  + +Scaling analytics workloads is crucial for modern enterprises that deal with high volumes of data and demand for rapid insights. Running these analytics queries directly on transactional data requires teams to spend significant time on data management and can degrade operational performance and slow down time-to-insights. + +EDB’s [**Analytics Accelerator**](https://www.enterprisedb.com/products/analytics-accelerator) leverages lakehouse ecosystem integration and a Vectorized Query Engine so you can use SQL to query columnar data in external object storage. This allows you to run complex analytical queries across core business data with no lag on existing transactional workloads — 30x faster than standard Postgres. + +It also supports Tiered Tables functionality, ensuring optimal performance by automatically offloading cold data to columnar tables in object storage, reducing overall storage costs with 18x more cost efficiency and simplifying the process of managing analytics over multiple data tiers.  + +Watch a demo to see how to add an analytics node, sync data, and integrate with Databricks, improving insights without sacrificing performance. + +
+ + +### **EDB Data Migration Service (DMS) and Data Sync** + +_Accelerate seamless migrations to break free from legacy constraints and innovate faster._ + +Today, organizations want to break free from legacy systems to tackle next-gen application development, which requires diverse data models and open standards that integrate with modern data stacks. + +[Modernizing](https://www.enterprisedb.com/use-case/modernize-legacy-applications) from legacy systems to EDB Postgres AI unlocks rapid innovation and growth for enterprises enabling seamless migrations to enterprise-grade PostgreSQL. The [**EDB Data Migration Service (DMS)**](https://www.enterprisedb.com/docs/edb-postgres-ai/migration-etl/data-migration-service/) **and Data Sync** enable a secure and fault-tolerant way to migrate Oracle and Postgres data from on-premises and cloud environments into EDB Postgres AI. This enables organizations with strict security compliance and data privacy needs to utilize EDB’s migration capabilities in their own environments. **EDB's Oracle Estate Migration Assessments** also make it easier to get a quick understanding of the complexity and level of effort required to migrate their Oracle databases to Postgres. + +Learn more about [Oracle compatibility](https://www.enterprisedb.com/products/edb-postgres-advanced-server) enhancements and how EDB Postgres AI unlocks rapid innovation and growth for enterprises undergoing modernization of their legacy data infrastructure. + +## Generally Available Today – Enhanced AI and Transactional Workloads + +### **AI Accelerator**  + +_The fastest way to test and launch enterprise generative AI (GenAI) applications_ + +Postgres users can already use the open source pgvector extension for foundational vector data support. This is powerful on its own but still requires developers to do a lot of manual work to create data pipelines, select embedding models, and keep embeddings up to date to avoid data staleness.  + +The [**AI Accelerator**](https://www.enterprisedb.com/products/ai-accelerator) provides the fastest way to test and launch multi-modal enterprise GenAI applications with the powerful EDB Pipelines extension, which is preloaded with pgvector and advanced AI workflow functionality like managed pipelines and automatic embedding generation. This enables customers to get GenAI apps to market faster with out-of-the-box vector data capabilities, less custom code, lower maintenance, and fewer application integration efforts. Now, developers can build complex GenAI functionality using SQL commands in the familiar Postgres environment—with just 5 lines of code instead of 130+. + +You can also transform your Postgres database into a powerful GenAI semantic search engine that’s [4.22x faster](https://www.confident-ai.com/blog/why-we-replaced-pinecone-with-pgvector) than other purpose-built vector databases. Want to see this in real time? Check out this demo of a GenAI application that provides quick, accurate recommendations based on text or image searches. The AI Accelerator is generally available today – [get started here](https://enterprisedb.com/docs/purl/aidb/gettingstarted). + +
+ + +### **EDB Postgres 17** + +_Use PostgreSQL to meet the demands of modern workloads_ + +The recent [PostgreSQL 17 release](https://www.enterprisedb.com/news/edb-contributions-postgresqlr-17-help-enterprises-unlock-greater-performance-complex-workloads) equipped users with backup and recovery, JSON enhancements, and performance improvements to support modern database operations. EDB was a key contributor to these Postgres enhancements and we’re excited to make these community features generally available on EDB Postgres AI transactional database, tools, and extensions. [EDB Postgres Advanced Server](https://www.enterprisedb.com/products/edb-postgres-advanced-server) (EPAS), [EDB Postgres Extended](https://www.enterprisedb.com/products/edb-postgres-extended) (PGE) Server.  + +These releases are generally available and ready for download. Visit EDB docs for detailed [EPAS](https://www.enterprisedb.com/docs/epas/latest/) and [PGE](https://www.enterprisedb.com/docs/pge/latest/) Release Notes or [check out this blog](https://www.enterprisedb.com/blog/edb-postgresr-17-transactional-database-highlights) for a recap of what’s new in EDB Postgres 17.  + +### **EDB Software Bill of Materials** + +_Build with open source confidently and ensure security and Postgres compliance readiness_  + +Enterprises today must ensure that customer data is protected and access to databases is controlled. While open source software (OSS) deployments can provide cost benefits, allow flexibility, and enable rapid innovation, they also introduce a challenge in identifying and mitigating potential security vulnerabilities. Today, the **EDB Software Bill of Materials (SBOM)** is available for **EDB Postgres Advanced Server** and **EDB Postgres Distributed** software packages through the [**EDB Trust Center**](https://trust.enterprisedb.com/?itemName=continuous_monitoring&source=click). It offers visibility into your open source supply chain with a detailed inventory of components and dependencies that comprise the, including up-to-date license reporting. + +By enabling you to easily identify potential security vulnerabilities, you can ensure [secure open source software](https://www.enterprisedb.com/use-case/secure-oss) and mitigate risk and reduce your attack surface as you invest in open source. [Learn more about securing your open source software](https://www.enterprisedb.com/blog/edb-announces-secure-open-software-solution-edb-postgres-air-enterprise-and-government). + +### **That’s a wrap!**  + +To learn more about EDB Postgres AI Software Deployment, read more and register for the preview experience [here](https://www.enterprisedb.com/preview). You can also zoom in even further on AI and Analytics launches with our [dedicated post](https://www.enterprisedb.com/blog/solving-enterprise-generative-ai-and-analytics-challenges-zooming-our-q4-2024-release) about them. + diff --git a/advocacy_docs/edb-postgres-ai/overview/latest-release-news/index.mdx b/advocacy_docs/edb-postgres-ai/overview/latest-release-news/index.mdx index f18a86349bc..c8960cd24d0 100644 --- a/advocacy_docs/edb-postgres-ai/overview/latest-release-news/index.mdx +++ b/advocacy_docs/edb-postgres-ai/overview/latest-release-news/index.mdx @@ -4,89 +4,105 @@ navTitle: Release News indexCards: simple iconName: Earth navigation: +- 2024q4release - 2024q3release - 2024q2release --- -** August 28, 2024 ** +Date: **December 10, 2024** -This [release roundup](https://www.enterprisedb.com/blog/release-radar-edb-postgresr-ai-q3-release-highlights) originally appeared on the EDB blog. +This [release roundup](https://www.enterprisedb.com/blog/solving-enterprise-generative-ai-and-analytics-challenges-zooming-our-q4-2024-release) originally appeared on the EDB blog. -There’s a lot of energy at EDB following the [Q2 announcement of EDB Postgres AI](2024q2release), an intelligent platform for unified management of transactional, analytical, and AI workloads, last quarter. The features we’re unveiling today build on last quarter’s EDB Postgres AI announcement by further enhancing the value delivered across our platform to support next gen applications, while further strengthening our core transactional database capabilities. +## Introducing the EDB Postgres AI Software Deployment: cloud agility, on your terms -Let’s take a closer look at these features. +### **Enable cloud agility and AI sovereignty for critical data infrastructure –  anywhere, any environment.** -## Trusted Postgres Architect release improves HA cluster deployment verification and PEM & EFM integration +Earlier today, we [announced](https://www.enterprisedb.com/news/edb-brings-cloud-agility-and-observability-hybrid-environments-sovereign-control) major updates to the [EDB Postgres AI](https://www.enterprisedb.com/products/edb-postgres-ai) sovereign data and AI platform. In the wake of data and AI becoming increasingly important to business innovation, our customers have asked us for more flexible solutions that offer both agility and control.  -With Trusted Postgres Architect (TPA), we continuously seek to create useful default configurations for our users to minimize the manual work required to tweak the cluster after deployment. We’ve done that and more with our latest features. +In response, we’ve launched a number of new generally available and preview capabilities to help accelerate deployment of EDB Postgres AI in [sovereign](https://www.enterprisedb.com/use-case/sovereign-ai), hybrid environments as an [omni-data platform](https://www.enterprisedb.com/use-case/omni-data-platform) that works across your enterprise’s data corpus to drive faster time to market for data-driven applications. With the new [EDB Postgres AI Software Deployment](https://www.enterprisedb.com/products/software-deployment), you can deploy, manage, scale, and observe mission critical data infrastructure in any self-managed, hybrid, or public cloud environment. -TPA Version 23.24 introduces clearer and more concise CLI output, helping operators verify that deployment was successful. Today’s release includes a new module that processes the raw Ansible output into a more concise form before streaming it to the operator, making it easier to spot important information. +The single container-driven software installation enables the consolidation of structured and unstructured data in a single multi-model data platform to accelerate transactional, analytical, and AI workloads. The Software Deployment unlocks a number of new capabilities: -For clusters that include EDB [Postgres Enterprise Manager](https://www.enterprisedb.com/products/postgres-enterprise-manager) (PEM), operators can now specify additional options to pass to the “register-agent” command, as well as provide their own SSL certificates to be used by the PEM web server. This enhancement improves upon earlier processes requiring the use of custom hooks or manual configuration. For [Enterprise Failover Manager](https://www.enterprisedb.com/docs/efm/latest/) (EFM) clusters, TPA now supports the new configuration parameters introduced with [EFM 4.9](https://www.enterprisedb.com/docs/efm/latest/efm_rel_notes/01_efm_49_rel_notes/). In addition, the configuration files created by TPA now include all the formatting and comments provided in the default EFM configuration files, making it easier for any operator wanting to manually inspect these files. TPA now allows users to specify whether TPA uses host names or IP addresses in EFM configuration, where previously only IP addresses were supported. +1. **Hybrid Control Plane**, enabling a hybrid database-as-a-service (DBaaS) with Kubernetes-driven automation and advanced observability across 200+ metrics to enable a cloud-like experience – even in your private data center. +2. **Analytics Accelerator**, which unlocks rapid analytics across unified business data in Postgres, powering 30x faster query performance and improving cost efficiency. +3. **AI Accelerator**, the fastest way to test and launch enterprise generative AI (GenAI) applications like chatbots and recommendation engines, so you can build cutting-edge GenAI functionality with just 5 lines of familiar SQL code (rather than 130+ using standard approaches). -Find out more about [Trusted Postgres Architect](https://www.enterprisedb.com/docs/tpa/latest/). If you’re new to TPA, try out this [TPA tutorial to spin up your first cluster](https://www.enterprisedb.com/docs/tpa/latest/firstclusterdeployment/). +To continue supporting our customers’ requirements as they evolve with their growing transactional workloads, we’ve also released enhancements to our **transactional database server software** and tooling, including **EDB Postgres 17** to meet the demands of modern workloads and the **EDB Software Bill of Materials**, offering visibility into your secure open source supply chain.  -## Postgres Enterprise Manager advances database and server monitoring +Today, these **transactional database enhancements** are **generally available**, along with the **AI Accelerator**. The **Hybrid Control Plane** and **Analytics Accelerator** are **now available for preview** through a [concierge demo experience](https://www.enterprisedb.com/engage). -Postgres Enterprise Manager provides tools to keep databases running smoothly, continuously monitoring database and server health with real-time graphical dashboards and automatic alerts. When issues are detected, PEM makes it easier to pinpoint and fix performance bottlenecks with integrated query profiling, performance, and log analysis tools. Highlights of Postgres Enterprise Manager Version 9.7.0 include the following: +## What’s in Preview? Unlock Cloud Scale And Rapid Analytics In Hybrid Environments  -- **EDB Postgres Distributed information in the “Core Usage” report:** This release enhances the PEM core usage report by adding information about EDB Postgres Distributed (PGD), giving customers a more complete view of their EDB license usage. As a result, the core usage report shows how many cores are running each version of Postgres across all PEM-monitored servers, including PGD.  -- **Copy notification settings:** This release extends the copy feature used by PEM to synchronize settings across multiple servers to now include notification settings, making it more powerful and reducing the need for additional manual steps or coding using the REST API.  -- **Dynamic probe scheduling to avoid showing outdated information:** PEM Agents collect data from monitored servers periodically, so changes can take a short time to reflect in the data. To mitigate this, PEM will now force some key agent tasks (known as probes) to run immediately after a server is added or changed in PEM, reducing the lag between adding or modifying a Postgres instance and the information being reflected in PEM monitoring data.  +### **Hybrid Control Plane** -To get the latest PEM and enjoy all these benefits, [update your PEM today](https://www.enterprisedb.com/docs/pem/latest/upgrading/upgrading_pem_installation/). +_Automation, single pane of glass management, and observability across hybrid data estates._ -## EDB Database Server Updates for PostgreSQL Community +Modern enterprises manage data across multiple clouds and on-premises deployments. The undifferentiated heavy lifting of database administration often distracts operators and engineers from more value-oriented work, like improving app scalability and accelerating time to market for data initiatives. While public cloud Database-as-a-Service (DBaaS) offerings provide automation of administrative tasks, they require tradeoffs on control, data sovereignty and deployment flexibility.  -As part of EDB’s support for the open source community’s quarterly release schedule, we completed new software releases in the EDB repositories of PostgreSQL, EDB Postgres Extended (PGE) Server, and EDB Postgres Advanced Server (EPAS), including the following:  +The **Hybrid Control Plane** is a **centralized management and automation solution** for the EDB Postgres AI Software Deployment, providing cloud automation and agility in a self-hosted environment. It boosts productivity up to 30% by automating time-consuming and expensive administrative functions like backups, provisioning, and point-in-time recovery – enabling a [hybrid (DBaaS) experience](https://www.enterprisedb.com/use-case/hybrid-dbaas). Monitor, observe, and respond to issues in real-time with visibility into 200+ metrics, keeping databases secure and enabling up to 99.999% availability. Plus, with built-in query diagnostics, you can identify problems and bottlenecks up to 5x faster and accelerate application performance up to 8x.  -| Database Distributions | Versions Released | -| ---------------------- | ----------------- | -| PostgreSQL | 16.4, 15.8, 14.13, 13.16 and 12.20 | -| EDB Postgres Extended Server | 16.4.1, 15.8.1, 14.13.1, 13.16 and 12.20 | -| EDB Postgres Advanced Server | 16.4.1, 15.8.1, 14.13.1, 13.16.22 and 12.20.25 | +See a demo of the Hybrid Control Plane in action! -The PostgreSQL minor releases were made available by the PostgreSQL Global Development Group on August 8th, addressing a security vulnerability. EDB repositories were simultaneously updated with new, minor releases of PostgreSQL, PGE, and EPAS, incorporating upstream fixes and additional feature enhancements. Complete [PGE release notes](https://www.enterprisedb.com/docs/pge/latest/release_notes/) and [EPAS release notes](https://www.enterprisedb.com/docs/epas/latest/epas_rel_notes/) are available. For details on the security fix and other improvements in PostgreSQL, please see [the Postgres release announcement](https://www.postgresql.org/about/news/postgresql-164-158-1413-1316-1220-and-17-beta-3-released-2910/). +
-## Seamlessly Convert Postgres Databases into a RESTful API with PostgREST Extension Support +### **Analytics Accelerator** -The popular open source tool [PostgREST](https://docs.postgrest.org/en/latest/) has been added to EDB’s supported [open source software list](https://www.enterprisedb.com/sites/default/files/pdf/edb_supported_open_source_software_20240515.pdf). This update unlocks more efficient and scalable Web services by enabling customers to seamlessly convert their Postgres database into a RESTful API. +_Unify transactional and analytical workloads in Postgres with lower cost, faster performance, and simpler operations._  -With EDB-supported software, customers can deploy with confidence knowing that these solutions are not only packaged by EDB but also come with the assurance of thorough EDB review and dedicated support for any issues encountered. +Scaling analytics workloads is crucial for modern enterprises that deal with high volumes of data and demand for rapid insights. Running these analytics queries directly on transactional data requires teams to spend significant time on data management and can degrade operational performance and slow down time-to-insights. -## New Migration Toolkit command line options ease migration criteria specification +EDB’s [**Analytics Accelerator**](https://www.enterprisedb.com/products/analytics-accelerator) leverages lakehouse ecosystem integration and a Vectorized Query Engine so you can use SQL to query columnar data in external object storage. This allows you to run complex analytical queries across core business data with no lag on existing transactional workloads — 30x faster than standard Postgres. -Operators have long relied on the [EDB Migration Toolkit](https://www.enterprisedb.com/products/migration-toolkit-move-oracle-postgresql) command-line tool to help migrate tables and data from legacy DBMS to PostgreSQL or [EDB Postgres Advanced Server](https://www.enterprisedb.com/products/edb-postgres-advanced-server). By adding the ability to enable command line options to be specified via a file for configuration, EDB Migration Toolkit reduces administration risks and increases the predictability of Oracle-to-Postgres migrations. +It also supports Tiered Tables functionality, ensuring optimal performance by automatically offloading cold data to columnar tables in object storage, reducing overall storage costs with 18x more cost efficiency and simplifying the process of managing analytics over multiple data tiers.  -The new version of the Migration Toolkit is available for download from the EDB repositories or the [EDB software downloads page](https://www.enterprisedb.com/software-downloads-postgres#migration-toolkit). +Watch a demo to see how to add an analytics node, sync data, and integrate with Databricks, improving insights without sacrificing performance. -For more information about the new command line options and other enhancements and fixes in this release, see the [Migration Toolkit documentation](https://www.enterprisedb.com/docs/migration_toolkit/latest/). +
-## EDB Query Advisor updates provide actionable index recommendations  -EDB customers can leverage [EDB Query Advisor](https://www.enterprisedb.com/docs/pg_extensions/query_advisor/) with PostgreSQL, PGE, and EPAS to get index recommendations based on the running database workload. This soon-to-be-released version builds on that capability by analyzing the gap between estimated and actual rows in query plans. With this insight, EDB Query Advisor provides actionable recommendations for extended table statistics, resulting in more accurate query plans, improved performance, and enhanced overall database efficiency. +### **EDB Data Migration Service (DMS) and Data Sync** -## EDB Postgres for Kubernetes 1.24 released +_Accelerate seamless migrations to break free from legacy constraints and innovate faster._ -[EDB Postgres for Kubernetes](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/) adds speed, efficiency, and protection for your k8s infrastructure modernization, with an enterprise-grade operator for Postgres. EDB Postgres for Kubernetes brings automation, security, and reliability to cloud-native data infrastructures. +Today, organizations want to break free from legacy systems to tackle next-gen application development, which requires diverse data models and open standards that integrate with modern data stacks. -Our new EDB Postgres for Kubernetes upstream release is merged with CloudNativePG 1.24.0.  +[Modernizing](https://www.enterprisedb.com/use-case/modernize-legacy-applications) from legacy systems to EDB Postgres AI unlocks rapid innovation and growth for enterprises enabling seamless migrations to enterprise-grade PostgreSQL. The [**EDB Data Migration Service (DMS)**](https://www.enterprisedb.com/docs/edb-postgres-ai/migration-etl/data-migration-service/) **and Data Sync** enable a secure and fault-tolerant way to migrate Oracle and Postgres data from on-premises and cloud environments into EDB Postgres AI. This enables organizations with strict security compliance and data privacy needs to utilize EDB’s migration capabilities in their own environments. **EDB's Oracle Estate Migration Assessments** also make it easier to get a quick understanding of the complexity and level of effort required to migrate their Oracle databases to Postgres. -For more CloudNativePG information, check out the [1.24 Release Notes](https://cloudnative-pg.io/documentation/1.24/release_notes/v1.24/). +Learn more about [Oracle compatibility](https://www.enterprisedb.com/products/edb-postgres-advanced-server) enhancements and how EDB Postgres AI unlocks rapid innovation and growth for enterprises undergoing modernization of their legacy data infrastructure. -## Barman 3.11 release leverages PostgreSQL 17 incremental backup and more +## Generally Available Today – Enhanced AI and Transactional Workloads -[Barman](https://pgbarman.org/) provides robust backup and recovery solutions for PostgreSQL. We recently detailed how the combination of Barman 3.11 and PostgreSQL 17 combine to deliver seamless, enterprise-grade backup strategies in this EDB blog: [_Why PostgreSQL 17's Incremental Backup Feature is a Game-Changer_](https://www.enterprisedb.com/blog/why-postgresql-17s-incremental-backup-feature-game-changer). +### **AI Accelerator**  -Learn more about enhanced Barman 3.11 features focused on saving resources and adding configuration options in the [Release Notes](https://github.com/EnterpriseDB/barman/blob/release/3.11.1/NEWS). +_The fastest way to test and launch enterprise generative AI (GenAI) applications_ -Take a closer look at [Barman on EDB Docs](https://www.enterprisedb.com/docs/supported-open-source/barman/). +Postgres users can already use the open source pgvector extension for foundational vector data support. This is powerful on its own but still requires developers to do a lot of manual work to create data pipelines, select embedding models, and keep embeddings up to date to avoid data staleness.  -## There’s more to come from EDB in Q3  +The [**AI Accelerator**](https://www.enterprisedb.com/products/ai-accelerator) provides the fastest way to test and launch multi-modal enterprise GenAI applications with the powerful EDB Pipelines extension, which is preloaded with pgvector and advanced AI workflow functionality like managed pipelines and automatic embedding generation. This enables customers to get GenAI apps to market faster with out-of-the-box vector data capabilities, less custom code, lower maintenance, and fewer application integration efforts. Now, developers can build complex GenAI functionality using SQL commands in the familiar Postgres environment—with just 5 lines of code instead of 130+. -We’re not done sharing news about our EDB Postgres AI enhancements. As a leading PostgreSQL community contributor, we already have [expert-level analysis](https://iw-resources.informationweek.com/free/w_defa6615/) into our favorite features in the upcoming PostgreSQL17 release. We’ll share news on how EDB database platforms build on this important community update as the September 15th release date arrives. Plus, stay tuned for new EDB announcements focused on our fully managed EDB Postgres AI Cloud Service and EDB Postgres Distributed database solutions.  +You can also transform your Postgres database into a powerful GenAI semantic search engine that’s [4.22x faster](https://www.confident-ai.com/blog/why-we-replaced-pinecone-with-pgvector) than other purpose-built vector databases. Want to see this in real time? Check out this demo of a GenAI application that provides quick, accurate recommendations based on text or image searches. The AI Accelerator is generally available today – [get started here](https://enterprisedb.com/docs/purl/aidb/gettingstarted). -For more information about our Q3 releases, [contact us today](https://www.enterprisedb.com/contact). +
+### **EDB Postgres 17** + +_Use PostgreSQL to meet the demands of modern workloads_ + +The recent [PostgreSQL 17 release](https://www.enterprisedb.com/news/edb-contributions-postgresqlr-17-help-enterprises-unlock-greater-performance-complex-workloads) equipped users with backup and recovery, JSON enhancements, and performance improvements to support modern database operations. EDB was a key contributor to these Postgres enhancements and we’re excited to make these community features generally available on EDB Postgres AI transactional database, tools, and extensions. [EDB Postgres Advanced Server](https://www.enterprisedb.com/products/edb-postgres-advanced-server) (EPAS), [EDB Postgres Extended](https://www.enterprisedb.com/products/edb-postgres-extended) (PGE) Server.  + +These releases are generally available and ready for download. Visit EDB docs for detailed [EPAS](https://www.enterprisedb.com/docs/epas/latest/) and [PGE](https://www.enterprisedb.com/docs/pge/latest/) Release Notes or [check out this blog](https://www.enterprisedb.com/blog/edb-postgresr-17-transactional-database-highlights) for a recap of what’s new in EDB Postgres 17.  + +### **EDB Software Bill of Materials** + +_Build with open source confidently and ensure security and Postgres compliance readiness_  + +Enterprises today must ensure that customer data is protected and access to databases is controlled. While open source software (OSS) deployments can provide cost benefits, allow flexibility, and enable rapid innovation, they also introduce a challenge in identifying and mitigating potential security vulnerabilities. Today, the **EDB Software Bill of Materials (SBOM)** is available for **EDB Postgres Advanced Server** and **EDB Postgres Distributed** software packages through the [**EDB Trust Center**](https://trust.enterprisedb.com/?itemName=continuous_monitoring&source=click). It offers visibility into your open source supply chain with a detailed inventory of components and dependencies that comprise the, including up-to-date license reporting. + +By enabling you to easily identify potential security vulnerabilities, you can ensure [secure open source software](https://www.enterprisedb.com/use-case/secure-oss) and mitigate risk and reduce your attack surface as you invest in open source. [Learn more about securing your open source software](https://www.enterprisedb.com/blog/edb-announces-secure-open-software-solution-edb-postgres-air-enterprise-and-government). + +### **That’s a wrap!**  + +To learn more about EDB Postgres AI Software Deployment, read more and register for the preview experience [here](https://www.enterprisedb.com/preview). You can also zoom in even further on AI and Analytics launches with our [dedicated post](https://www.enterprisedb.com/blog/solving-enterprise-generative-ai-and-analytics-challenges-zooming-our-q4-2024-release) about them. + diff --git a/src/pages/index.js b/src/pages/index.js index 6daa9f16143..709d5691ba8 100644 --- a/src/pages/index.js +++ b/src/pages/index.js @@ -233,7 +233,7 @@ const Page = () => { to="/edb-postgres-ai/overview/latest-release-news" iconName={iconNames.SMALL_DASHBOARD} > - Release News - 24Q3 + Release News - 24Q4 Date: Thu, 12 Dec 2024 16:53:25 +0000 Subject: [PATCH 14/20] Updated news boxes Signed-off-by: Dj Walker-Morgan --- src/constants/updates.js | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/src/constants/updates.js b/src/constants/updates.js index d09d13fbf51..af8690f13ad 100644 --- a/src/constants/updates.js +++ b/src/constants/updates.js @@ -1,6 +1,22 @@ import IconNames from "../components/icon/iconNames"; export const updates = [ + { + title: "EDB Postgres AI Q4 Release News", + icon: IconNames.SMALL_DASHBOARD, + description: + "EDB Postgres AI's Q4 announcements include AI Accelerator and previews of EDB's Software Deployment and Analytics Accelerator.", + url: "/edb-postgres-ai/overview/latest-release-news/", + moreUrl: "/edb-postgres-ai/overview/latest-release-news/", + }, + { + title: "EDB Postgres Advanced Server 17.2", + icon: IconNames.EDB_EPAS, + description: + "EDB Postgres Advanced Server 17.2 is built on open-source PostgreSQL 17.2, which introduces myriad enhancements that enable databases to scale up and scale out in more efficient ways.", + url: "/epas/latest/", + moreUrl: "/epas/latest/epas_rel_notes/epas17_2_rel_notes/", + }, { title: "EDB CloudNativePG Global Cluster 1.0.1", icon: IconNames.KUBERNETES, From d0d6ac9bf420333676b44b6c99a97b89f42f9c77 Mon Sep 17 00:00:00 2001 From: gvasquezvargas Date: Tue, 10 Dec 2024 18:02:22 -0500 Subject: [PATCH 15/20] TDE: improvements and fixes for DOCS-1147 --- product_docs/docs/tde/15/enabling/enabling_tde.mdx | 14 +++++++------- .../docs/tde/15/enabling/enabling_tde_epas.mdx | 4 ++-- .../docs/tde/15/enabling/postgres_to_extended.mdx | 4 ++-- .../docs/tde/15/encrypted_files/wal_files.mdx | 4 ++-- product_docs/docs/tde/15/secure_key/index.mdx | 6 +++--- product_docs/docs/tde/15/secure_key/key_store.mdx | 4 ++-- product_docs/docs/tde/15/secure_key/passphrase.mdx | 6 +++--- 7 files changed, 21 insertions(+), 21 deletions(-) diff --git a/product_docs/docs/tde/15/enabling/enabling_tde.mdx b/product_docs/docs/tde/15/enabling/enabling_tde.mdx index af63e7559ab..2a9af91e38c 100644 --- a/product_docs/docs/tde/15/enabling/enabling_tde.mdx +++ b/product_docs/docs/tde/15/enabling/enabling_tde.mdx @@ -12,13 +12,13 @@ Create a new EDB Postgres Advanced Server cluster with TDE enabled. ## Worked example -This example uses EDB Postgres Advanced Server 15 running on a Linux platform. It uses OpenSSL to define the passphrase to wrap and unwrap the generated data encryption key. +This example uses EDB Postgres Advanced Server 16 running on a Linux platform. It uses OpenSSL to define the passphrase to wrap and unwrap the generated data encryption key. 1. Set the data encryption key (wrap) and decryption (unwrap) environment variables: ```shell - export PGDATAKEYWRAPCMD='openssl enc -e -aes-128-cbc -pass pass: -out %p' - export PGDATAKEYUNWRAPCMD='openssl enc -d -aes-128-cbc -pass pass: -in %p' + export PGDATAKEYWRAPCMD='openssl enc -e -aes-128-cbc -pass pass: -out "%p"' + export PGDATAKEYUNWRAPCMD='openssl enc -d -aes-128-cbc -pass pass: -in "%p"' ``` !!!note @@ -28,21 +28,21 @@ This example uses EDB Postgres Advanced Server 15 running on a Linux platform. I 1. Initialize the cluster using `initdb` with encryption enabled. This command sets the `data_encryption_key_unwrap_command` parameter in the `postgresql.conf` file. ```shell - /usr/edb/as15/bin/initdb --data-encryption -D /var/lib/edb/as15/data + /usr/edb/as16/bin/initdb --data-encryption -D /var/lib/edb/as16/data ``` 1. Start the cluster: ```shell - /usr/edb/as15/bin/pg_ctl -D /var/lib/edb/as15/data start + /usr/edb/as16/bin/pg_ctl -D /var/lib/edb/as16/data start ``` 1. Run grep on `postgresql.conf` to verify the setting of `data_encryption_key_unwrap_command`: ```shell - grep data_encryption_key_unwrap_command /var/lib/edb/as15/data/postgresql.conf + grep data_encryption_key_unwrap_command /var/lib/edb/as16/data/postgresql.conf __OUTPUT__ - data_encryption_key_unwrap_command = 'openssl enc -d -aes-128-cbc -pass pass: -in %p' + data_encryption_key_unwrap_command = 'openssl enc -d -aes-128-cbc -pass pass: -in "%p"' ``` 1. [Verify that data encryption is enabled](verifying_tde). diff --git a/product_docs/docs/tde/15/enabling/enabling_tde_epas.mdx b/product_docs/docs/tde/15/enabling/enabling_tde_epas.mdx index d05c9b123b7..58e45a22095 100644 --- a/product_docs/docs/tde/15/enabling/enabling_tde_epas.mdx +++ b/product_docs/docs/tde/15/enabling/enabling_tde_epas.mdx @@ -54,8 +54,8 @@ Use [pg_dumpall](https://www.postgresql.org/docs/current/app-pg-dumpall.html), [ 1. Set environment variables to export the `wrap` and `unwrap` commands: ``` - export PGDATAKEYWRAPCMD='openssl enc -e -aes-128-cbc -pass pass:ok -out %p' - export PGDATAKEYUNWRAPCMD='openssl enc -d -aes-128-cbc -pass pass:ok -in %p' + export PGDATAKEYWRAPCMD='openssl enc -e -aes-128-cbc -pass pass:ok -out "%p"' + export PGDATAKEYUNWRAPCMD='openssl enc -d -aes-128-cbc -pass pass:ok -in "%p"' ``` !!!note diff --git a/product_docs/docs/tde/15/enabling/postgres_to_extended.mdx b/product_docs/docs/tde/15/enabling/postgres_to_extended.mdx index 4e9f507e6e4..86de2e5380b 100644 --- a/product_docs/docs/tde/15/enabling/postgres_to_extended.mdx +++ b/product_docs/docs/tde/15/enabling/postgres_to_extended.mdx @@ -54,8 +54,8 @@ This example upgrades a PostgreSQL 16 instance to EDB Postgres Extended Server 1 1. Set environment variables to export the `wrap` and `unwrap` commands: ``` - export PGDATAKEYWRAPCMD='openssl enc -e -aes-128-cbc -pass pass:ok -out %p' - export PGDATAKEYUNWRAPCMD='openssl enc -d -aes-128-cbc -pass pass:ok -in %p' + export PGDATAKEYWRAPCMD='openssl enc -e -aes-128-cbc -pass pass:ok -out "%p"' + export PGDATAKEYUNWRAPCMD='openssl enc -d -aes-128-cbc -pass pass:ok -in "%p"' ``` !!!note diff --git a/product_docs/docs/tde/15/encrypted_files/wal_files.mdx b/product_docs/docs/tde/15/encrypted_files/wal_files.mdx index 0a350983684..18814fe7dfe 100644 --- a/product_docs/docs/tde/15/encrypted_files/wal_files.mdx +++ b/product_docs/docs/tde/15/encrypted_files/wal_files.mdx @@ -41,7 +41,7 @@ Alternatively, you can set the `PGDATAKEYUNWRAPCMD` environment variable before This example uses `pg_waldump` to display the WAL log of an encrypted cluster that uses `openssl` to wrap the data encryption key: ``` -pg_waldump --data-encryption --key-file-name=pg_encryption/key.bin --key-unwrap-command='openssl enc -d -aes-128-cbc -pass pass: -in %p' +pg_waldump --data-encryption --key-file-name=pg_encryption/key.bin --key-unwrap-command='openssl enc -d -aes-128-cbc -pass pass: -in "%p"' ``` ## Resetting a corrupt TDE-encrypted WAL file @@ -59,5 +59,5 @@ Alternatively, you can set the `PGDATAKEYUNWRAPCMD` environment variable before This example uses `pg_resetwal` to reset a corrupt encrypted WAL log of an encrypted cluster that uses `openssl` to wrap the data encryption key: ``` -pg_resetwal --key-unwrap-command='openssl enc -d -aes-128-cbc -pass pass: -in %p' +pg_resetwal --key-unwrap-command='openssl enc -d -aes-128-cbc -pass pass: -in" "%p"' ``` \ No newline at end of file diff --git a/product_docs/docs/tde/15/secure_key/index.mdx b/product_docs/docs/tde/15/secure_key/index.mdx index 45ef27d8b51..10f3be904f4 100644 --- a/product_docs/docs/tde/15/secure_key/index.mdx +++ b/product_docs/docs/tde/15/secure_key/index.mdx @@ -48,8 +48,8 @@ You must make the commands available to the TDE database server so it can wrap a
Example ```shell - PGDATAKEYWRAPCMD='openssl enc -e -aes128-wrap -pbkdf2 -out "%p"' - PGDATAKEYUNWRAPCMD='openssl enc -d -aes128-wrap -pbkdf2 -in "%p"' + PGDATAKEYWRAPCMD='openssl enc -e -aes-128-cbc -pbkdf2 -out "%p"' + PGDATAKEYUNWRAPCMD='openssl enc -d -aes-128-cbc -pbkdf2 -in "%p"' export PGDATAKEYWRAPCMD PGDATAKEYUNWRAPCMD #After these variables are set, you can initialize the server: initdb --data-encryption -D /var/lib/edb/as16/data @@ -63,7 +63,7 @@ You must make the commands available to the TDE database server so it can wrap a
Example ```shell - initdb --data-encryption -D /var/lib/edb/as16/data --key-wrap-command='openssl enc -e -aes128-wrap -pbkdf2 -out "%p"' --key-unwrap-command='openssl enc -d -aes128-wrap -pbkdf2 -in "%p"' + initdb --data-encryption -D /var/lib/edb/as16/data --key-wrap-command='openssl enc -e -aes-128-cbc -pbkdf2 -out "%p"' --key-unwrap-command='openssl enc -d -aes-128-cbc -pbkdf2 -in "%p"' ```
diff --git a/product_docs/docs/tde/15/secure_key/key_store.mdx b/product_docs/docs/tde/15/secure_key/key_store.mdx index 55396044979..bc27ad9b81f 100644 --- a/product_docs/docs/tde/15/secure_key/key_store.mdx +++ b/product_docs/docs/tde/15/secure_key/key_store.mdx @@ -83,8 +83,8 @@ vault write -f transit/keys/pg-tde-master-1 Use the `vault write` command with the `pg-tde-master-1` key to wrap and unwrap the data encryption key: ``` -PGDATAKEYWRAPCMD='base64 | vault write -field=ciphertext transit/encrypt/pg-tde-master-1 plaintext=- > %p' -PGDATAKEYUNWRAPCMD='vault write -field=plaintext transit/decrypt/pg-tde-master-1 ciphertext=- < %p | base64 -d' +PGDATAKEYWRAPCMD='base64 | vault write -field=ciphertext transit/encrypt/pg-tde-master-1 plaintext=- > "%p"' +PGDATAKEYUNWRAPCMD='vault write -field=plaintext transit/decrypt/pg-tde-master-1 ciphertext=- < "%p" | base64 -d' ``` ## Thales CipherTrust Manager example diff --git a/product_docs/docs/tde/15/secure_key/passphrase.mdx b/product_docs/docs/tde/15/secure_key/passphrase.mdx index f09ac41f70f..4c1d2090098 100644 --- a/product_docs/docs/tde/15/secure_key/passphrase.mdx +++ b/product_docs/docs/tde/15/secure_key/passphrase.mdx @@ -6,7 +6,7 @@ description: Learn how to secure your encryption key with a passphrase. You can protect the data key with a passphrase using the OpenSSL command line utility. The following is an example that sets up this protection: ```shell -initdb -D datadir -y --key-wrap-command='openssl enc -e -aes-128-cbc -pbkdf2 -out "%p"' --key-unwrap-command='openssl enc -d -aes-128-cbc -pbkdf2 -in "%p"' +initdb -D datadir --data-encryption --key-wrap-command='openssl enc -e -aes-128-cbc -pbkdf2 -out "%p"' --key-unwrap-command='openssl enc -d -aes-128-cbc -pbkdf2 -in "%p"' ``` This example wraps the randomly generated data key (done internally by initdb) by encrypting it with the AES-128-CBC (AESKW) algorithm. The encryption uses a key derived from a passphrase with the PBKDF2 key derivation function and a randomly generated salt. The terminal prompts for the passphrase. (See the openssl-enc manual page for details about these options. Available options vary across versions.) The initdb utility replaces `%p` with the name of the file that stores the wrapped key. @@ -30,8 +30,8 @@ Key unwrap commands that prompt for passwords on the terminal don't work when th For example, for systemd, you can use `systemd-ask-password`: ``` -PGDATAKEYWRAPCMD="bash -c 'openssl enc -e -aes-128-cbc -pbkdf2 -out %p -pass file:<(sudo systemd-ask-password --no-tty)'" -PGDATAKEYUNWRAPCMD="bash -c 'openssl enc -d -aes-128-cbc -pbkdf2 -in %p -pass file:<(sudo systemd-ask-password --no-tty)'" +PGDATAKEYWRAPCMD="bash -c 'openssl enc -e -aes-128-cbc -pbkdf2 -out "%p" -pass file:<(sudo systemd-ask-password --no-tty)'" +PGDATAKEYUNWRAPCMD="bash -c 'openssl enc -d -aes-128-cbc -pbkdf2 -in "%p" -pass file:<(sudo systemd-ask-password --no-tty)'" ``` You also need an entry like in `/etc/sudoers`: From 4f78800eae0f34274ac2f9b7c4512a0cadd0e1fc Mon Sep 17 00:00:00 2001 From: gvasquezvargas Date: Wed, 11 Dec 2024 10:56:22 -0500 Subject: [PATCH 16/20] Implemented fixes and improvements --- product_docs/docs/tde/15/enabling/enabling_tde.mdx | 4 ++-- .../docs/tde/15/enabling/enabling_tde_epas.mdx | 4 ++-- .../docs/tde/15/enabling/postgres_to_extended.mdx | 4 ++-- .../docs/tde/15/encrypted_files/wal_files.mdx | 4 ++-- product_docs/docs/tde/15/initdb_tde_options.mdx | 4 +++- .../docs/tde/15/secure_key/disabling_key.mdx | 12 ++++++++++-- 6 files changed, 21 insertions(+), 11 deletions(-) diff --git a/product_docs/docs/tde/15/enabling/enabling_tde.mdx b/product_docs/docs/tde/15/enabling/enabling_tde.mdx index 2a9af91e38c..85b62fb2b9f 100644 --- a/product_docs/docs/tde/15/enabling/enabling_tde.mdx +++ b/product_docs/docs/tde/15/enabling/enabling_tde.mdx @@ -17,8 +17,8 @@ This example uses EDB Postgres Advanced Server 16 running on a Linux platform. I 1. Set the data encryption key (wrap) and decryption (unwrap) environment variables: ```shell - export PGDATAKEYWRAPCMD='openssl enc -e -aes-128-cbc -pass pass: -out "%p"' - export PGDATAKEYUNWRAPCMD='openssl enc -d -aes-128-cbc -pass pass: -in "%p"' + export PGDATAKEYWRAPCMD='openssl enc -e -aes-128-cbc -pbkdf2 -pass pass: -out "%p"' + export PGDATAKEYUNWRAPCMD='openssl enc -d -aes-128-cbc -pbkdf2 -pass pass: -in "%p"' ``` !!!note diff --git a/product_docs/docs/tde/15/enabling/enabling_tde_epas.mdx b/product_docs/docs/tde/15/enabling/enabling_tde_epas.mdx index 58e45a22095..995d5c030e5 100644 --- a/product_docs/docs/tde/15/enabling/enabling_tde_epas.mdx +++ b/product_docs/docs/tde/15/enabling/enabling_tde_epas.mdx @@ -54,8 +54,8 @@ Use [pg_dumpall](https://www.postgresql.org/docs/current/app-pg-dumpall.html), [ 1. Set environment variables to export the `wrap` and `unwrap` commands: ``` - export PGDATAKEYWRAPCMD='openssl enc -e -aes-128-cbc -pass pass:ok -out "%p"' - export PGDATAKEYUNWRAPCMD='openssl enc -d -aes-128-cbc -pass pass:ok -in "%p"' + export PGDATAKEYWRAPCMD='openssl enc -e -aes-128-cbc -pbkdf2 -pass pass:ok -out "%p"' + export PGDATAKEYUNWRAPCMD='openssl enc -d -aes-128-cbc -pbkdf2 -pass pass:ok -in "%p"' ``` !!!note diff --git a/product_docs/docs/tde/15/enabling/postgres_to_extended.mdx b/product_docs/docs/tde/15/enabling/postgres_to_extended.mdx index 86de2e5380b..1d6fe963c2f 100644 --- a/product_docs/docs/tde/15/enabling/postgres_to_extended.mdx +++ b/product_docs/docs/tde/15/enabling/postgres_to_extended.mdx @@ -54,8 +54,8 @@ This example upgrades a PostgreSQL 16 instance to EDB Postgres Extended Server 1 1. Set environment variables to export the `wrap` and `unwrap` commands: ``` - export PGDATAKEYWRAPCMD='openssl enc -e -aes-128-cbc -pass pass:ok -out "%p"' - export PGDATAKEYUNWRAPCMD='openssl enc -d -aes-128-cbc -pass pass:ok -in "%p"' + export PGDATAKEYWRAPCMD='openssl enc -e -aes-128-cbc -pbkdf2 -pass pass:ok -out "%p"' + export PGDATAKEYUNWRAPCMD='openssl enc -d -aes-128-cbc -pbkdf2 -pass pass:ok -in "%p"' ``` !!!note diff --git a/product_docs/docs/tde/15/encrypted_files/wal_files.mdx b/product_docs/docs/tde/15/encrypted_files/wal_files.mdx index 18814fe7dfe..1eb765126d0 100644 --- a/product_docs/docs/tde/15/encrypted_files/wal_files.mdx +++ b/product_docs/docs/tde/15/encrypted_files/wal_files.mdx @@ -41,7 +41,7 @@ Alternatively, you can set the `PGDATAKEYUNWRAPCMD` environment variable before This example uses `pg_waldump` to display the WAL log of an encrypted cluster that uses `openssl` to wrap the data encryption key: ``` -pg_waldump --data-encryption --key-file-name=pg_encryption/key.bin --key-unwrap-command='openssl enc -d -aes-128-cbc -pass pass: -in "%p"' +pg_waldump --data-encryption --key-file-name=pg_encryption/key.bin --key-unwrap-command='openssl enc -d -aes-128-cbc -pbkdf2 -pass pass: -in "%p"' ``` ## Resetting a corrupt TDE-encrypted WAL file @@ -59,5 +59,5 @@ Alternatively, you can set the `PGDATAKEYUNWRAPCMD` environment variable before This example uses `pg_resetwal` to reset a corrupt encrypted WAL log of an encrypted cluster that uses `openssl` to wrap the data encryption key: ``` -pg_resetwal --key-unwrap-command='openssl enc -d -aes-128-cbc -pass pass: -in" "%p"' +pg_resetwal --key-unwrap-command='openssl enc -d -aes-128-cbc -pbkdf2 -pass pass: -in" "%p"' ``` \ No newline at end of file diff --git a/product_docs/docs/tde/15/initdb_tde_options.mdx b/product_docs/docs/tde/15/initdb_tde_options.mdx index 9b0387b0f1f..77e42b2ce09 100644 --- a/product_docs/docs/tde/15/initdb_tde_options.mdx +++ b/product_docs/docs/tde/15/initdb_tde_options.mdx @@ -11,7 +11,9 @@ Adds transparent data encryption when initializing a database server. ### Supported values -You can optionally specify an AES key length. Valid values are 128 and 256. The default is 128. +You can optionally specify an AES key length in the form of `--data-encryption[=KEYLEN]`. + +Valid values are 128 and 256. The default is 128. ## Option: `--key-wrap-command=` diff --git a/product_docs/docs/tde/15/secure_key/disabling_key.mdx b/product_docs/docs/tde/15/secure_key/disabling_key.mdx index bdf8edb7e19..bdeb1ea7d60 100644 --- a/product_docs/docs/tde/15/secure_key/disabling_key.mdx +++ b/product_docs/docs/tde/15/secure_key/disabling_key.mdx @@ -4,6 +4,14 @@ description: Learn how to omit using a wrapping key. deepToc: true --- -If you don't want key wrapping, for example for testing, then you must set the wrap and unwrap commands to the special value `-`. +If you don't want key wrapping, for example for testing purposes, you have two options: -This setting specifies to use the key from the file without further processing. This approach differs from not setting a wrap or unwrap command at all and from setting either or both to an empty string. Having no wrap or unwrap command set when TDE is used leaves your data encryption key unsecured and results in a fatal error when running an affected utility program. +- You can set the wrap and unwrap commands to the special value `-` when initializing the cluster with `initdb`. For example, with the flags `--key-wrap-command=-` and `--key-unwrap-command=-`. + + With this configuration TDE generates encryption key files, but leaves them unprotected. + +- You can disable key wrapping when initializing the cluster with `initdb` by adding the flag `--no-key-wrap`. + + With this configuration TDE generates encryption key files, but leaves them unprotected. + +For `intidb --data-encryption` to run successfully, you have to either, specify a wrapping/unwrapping command, set a fallback environment variable with wrapping/unwrapping commands, or disable key wrapping with the one of the previous mechanisms. Otherwise, the database cluster will fail. From 01b229335855865a9fb57ebaf52fa00fea82d29b Mon Sep 17 00:00:00 2001 From: gvasquezvargas Date: Thu, 12 Dec 2024 12:04:47 -0500 Subject: [PATCH 17/20] fixed disabling wrapping key page --- product_docs/docs/tde/15/secure_key/disabling_key.mdx | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/product_docs/docs/tde/15/secure_key/disabling_key.mdx b/product_docs/docs/tde/15/secure_key/disabling_key.mdx index bdeb1ea7d60..474231f6328 100644 --- a/product_docs/docs/tde/15/secure_key/disabling_key.mdx +++ b/product_docs/docs/tde/15/secure_key/disabling_key.mdx @@ -4,14 +4,12 @@ description: Learn how to omit using a wrapping key. deepToc: true --- -If you don't want key wrapping, for example for testing purposes, you have two options: +If you don't want key wrapping, for example, for testing purposes, you can use either one of the following options to disable key wrapping: - You can set the wrap and unwrap commands to the special value `-` when initializing the cluster with `initdb`. For example, with the flags `--key-wrap-command=-` and `--key-unwrap-command=-`. - With this configuration TDE generates encryption key files, but leaves them unprotected. +- Or you can disable key wrapping when initializing the cluster with `initdb` by adding the flag `--no-key-wrap`. -- You can disable key wrapping when initializing the cluster with `initdb` by adding the flag `--no-key-wrap`. +With either one of the configurations, TDE generates encryption key files, but leaves them unprotected. - With this configuration TDE generates encryption key files, but leaves them unprotected. - -For `intidb --data-encryption` to run successfully, you have to either, specify a wrapping/unwrapping command, set a fallback environment variable with wrapping/unwrapping commands, or disable key wrapping with the one of the previous mechanisms. Otherwise, the database cluster will fail. +For `intidb --data-encryption` to run successfully, you have to either specify a wrapping/unwrapping command, set a fallback environment variable with wrapping/unwrapping commands, or disable key wrapping with the one of the previous mechanisms. Otherwise, the creation of an encrypted database cluster will fail. From 14ebf5d344c22953781004bd994d3e3e7d21c422 Mon Sep 17 00:00:00 2001 From: gvasquezvargas Date: Thu, 12 Dec 2024 12:23:52 -0500 Subject: [PATCH 18/20] manually wrapped command with wrapping issue in PDF format --- product_docs/docs/tde/15/secure_key/passphrase.mdx | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/product_docs/docs/tde/15/secure_key/passphrase.mdx b/product_docs/docs/tde/15/secure_key/passphrase.mdx index 4c1d2090098..5eda1eea0b6 100644 --- a/product_docs/docs/tde/15/secure_key/passphrase.mdx +++ b/product_docs/docs/tde/15/secure_key/passphrase.mdx @@ -6,7 +6,9 @@ description: Learn how to secure your encryption key with a passphrase. You can protect the data key with a passphrase using the OpenSSL command line utility. The following is an example that sets up this protection: ```shell -initdb -D datadir --data-encryption --key-wrap-command='openssl enc -e -aes-128-cbc -pbkdf2 -out "%p"' --key-unwrap-command='openssl enc -d -aes-128-cbc -pbkdf2 -in "%p"' +initdb -D datadir --data-encryption \ + --key-wrap-command='openssl enc -e -aes-128-cbc -pbkdf2 -out "%p"' \ + --key-unwrap-command='openssl enc -d -aes-128-cbc -pbkdf2 -in "%p"' ``` This example wraps the randomly generated data key (done internally by initdb) by encrypting it with the AES-128-CBC (AESKW) algorithm. The encryption uses a key derived from a passphrase with the PBKDF2 key derivation function and a randomly generated salt. The terminal prompts for the passphrase. (See the openssl-enc manual page for details about these options. Available options vary across versions.) The initdb utility replaces `%p` with the name of the file that stores the wrapped key. From f0f96019d2dceca2ba2d1cf57372afbf6a69b90c Mon Sep 17 00:00:00 2001 From: gvasquezvargas Date: Thu, 12 Dec 2024 12:39:57 -0500 Subject: [PATCH 19/20] Manually wrapped long commands --- product_docs/docs/tde/15/encrypted_files/wal_files.mdx | 3 ++- product_docs/docs/tde/15/secure_key/index.mdx | 4 +++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/product_docs/docs/tde/15/encrypted_files/wal_files.mdx b/product_docs/docs/tde/15/encrypted_files/wal_files.mdx index 1eb765126d0..e9f9cc586bf 100644 --- a/product_docs/docs/tde/15/encrypted_files/wal_files.mdx +++ b/product_docs/docs/tde/15/encrypted_files/wal_files.mdx @@ -41,7 +41,8 @@ Alternatively, you can set the `PGDATAKEYUNWRAPCMD` environment variable before This example uses `pg_waldump` to display the WAL log of an encrypted cluster that uses `openssl` to wrap the data encryption key: ``` -pg_waldump --data-encryption --key-file-name=pg_encryption/key.bin --key-unwrap-command='openssl enc -d -aes-128-cbc -pbkdf2 -pass pass: -in "%p"' +pg_waldump --data-encryption --key-file-name=pg_encryption/key.bin \ + --key-unwrap-command='openssl enc -d -aes-128-cbc -pbkdf2 -pass pass: -in "%p"' ``` ## Resetting a corrupt TDE-encrypted WAL file diff --git a/product_docs/docs/tde/15/secure_key/index.mdx b/product_docs/docs/tde/15/secure_key/index.mdx index 10f3be904f4..a57a37991bf 100644 --- a/product_docs/docs/tde/15/secure_key/index.mdx +++ b/product_docs/docs/tde/15/secure_key/index.mdx @@ -63,7 +63,9 @@ You must make the commands available to the TDE database server so it can wrap a
Example ```shell - initdb --data-encryption -D /var/lib/edb/as16/data --key-wrap-command='openssl enc -e -aes-128-cbc -pbkdf2 -out "%p"' --key-unwrap-command='openssl enc -d -aes-128-cbc -pbkdf2 -in "%p"' + initdb --data-encryption -D /var/lib/edb/as16/data \ + --key-wrap-command='openssl enc -e -aes-128-cbc -pbkdf2 -out "%p"' \ + --key-unwrap-command='openssl enc -d -aes-128-cbc -pbkdf2 -in "%p"' ```
From 4f48893e95dad36718e156c0be7705617245c8f7 Mon Sep 17 00:00:00 2001 From: gvasquezvargas Date: Thu, 12 Dec 2024 12:45:40 -0500 Subject: [PATCH 20/20] Further command wrapping --- product_docs/docs/tde/15/enabling/enabling_tde_epas.mdx | 6 ++++-- product_docs/docs/tde/15/enabling/postgres_to_extended.mdx | 6 ++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/product_docs/docs/tde/15/enabling/enabling_tde_epas.mdx b/product_docs/docs/tde/15/enabling/enabling_tde_epas.mdx index 995d5c030e5..5819920064e 100644 --- a/product_docs/docs/tde/15/enabling/enabling_tde_epas.mdx +++ b/product_docs/docs/tde/15/enabling/enabling_tde_epas.mdx @@ -109,7 +109,8 @@ Use [pg_dumpall](https://www.postgresql.org/docs/current/app-pg-dumpall.html), [ Include the `--copy-by-block` option. ``` - /usr/lib/edb-as/16/bin/pg_upgrade -b /usr/lib/edb-as/16/bin -B /usr/lib/edb-as/16/bin -d /var/lib/edb-as/16/non-TDE -D /var/lib/edb-as/16/TDE --copy-by-block --check + /usr/lib/edb-as/16/bin/pg_upgrade -b /usr/lib/edb-as/16/bin -B /usr/lib/edb-as/16/bin \ + -d /var/lib/edb-as/16/non-TDE -D /var/lib/edb-as/16/TDE --copy-by-block --check ``` !!!note @@ -118,7 +119,8 @@ Use [pg_dumpall](https://www.postgresql.org/docs/current/app-pg-dumpall.html), [ 1. To copy data from the source server to the target server, run the `pg_upgrade` command in normal mode: ``` - /usr/lib/edb-as/16/bin/pg_upgrade -b /usr/lib/edb-as/16/bin -B /usr/lib/edb-as/16/bin -d /var/lib/edb-as/16/non-TDE -D /var/lib/edb-as/16/TDE --copy-by-block + /usr/lib/edb-as/16/bin/pg_upgrade -b /usr/lib/edb-as/16/bin -B /usr/lib/edb-as/16/bin \ + -d /var/lib/edb-as/16/non-TDE -D /var/lib/edb-as/16/TDE --copy-by-block ``` 1. Restart the encrypted server: diff --git a/product_docs/docs/tde/15/enabling/postgres_to_extended.mdx b/product_docs/docs/tde/15/enabling/postgres_to_extended.mdx index 1d6fe963c2f..ca82b8c960a 100644 --- a/product_docs/docs/tde/15/enabling/postgres_to_extended.mdx +++ b/product_docs/docs/tde/15/enabling/postgres_to_extended.mdx @@ -109,7 +109,8 @@ This example upgrades a PostgreSQL 16 instance to EDB Postgres Extended Server 1 Include the `--copy-by-block` option. ``` - /usr/lib/edb-pge/16/bin/pg_upgrade -b /usr/lib/postgresql/16/bin -B /usr/lib/edb-pge/16/bin -d /var/lib/postgresql/16/non-TDE -D /var/lib/edb-pge/16/TDE --copy-by-block --check + /usr/lib/edb-pge/16/bin/pg_upgrade -b /usr/lib/postgresql/16/bin -B /usr/lib/edb-pge/16/bin \ + -d /var/lib/postgresql/16/non-TDE -D /var/lib/edb-pge/16/TDE --copy-by-block --check ``` !!!note @@ -118,7 +119,8 @@ This example upgrades a PostgreSQL 16 instance to EDB Postgres Extended Server 1 1. To copy data from the source server to the target server, run the `pg_upgrade` command in normal mode: ``` - /usr/lib/edb-pge/16/bin/pg_upgrade -b /usr/lib/postgresql/16/bin -B /usr/lib/edb-pge/16/bin -d /var/lib/postgresql/16/non-TDE -D /var/lib/edb-pge/16/TDE --copy-by-block + /usr/lib/edb-pge/16/bin/pg_upgrade -b /usr/lib/postgresql/16/bin -B /usr/lib/edb-pge/16/bin \ + -d /var/lib/postgresql/16/non-TDE -D /var/lib/edb-pge/16/TDE --copy-by-block ``` 1. Restart the encrypted server: